]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.0.4-201108292329.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.4-201108292329.patch
1 diff -urNp linux-3.0.4/arch/alpha/include/asm/elf.h linux-3.0.4/arch/alpha/include/asm/elf.h
2 --- linux-3.0.4/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3 +++ linux-3.0.4/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-3.0.4/arch/alpha/include/asm/pgtable.h linux-3.0.4/arch/alpha/include/asm/pgtable.h
19 --- linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20 +++ linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-3.0.4/arch/alpha/kernel/module.c linux-3.0.4/arch/alpha/kernel/module.c
40 --- linux-3.0.4/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41 +++ linux-3.0.4/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-3.0.4/arch/alpha/kernel/osf_sys.c linux-3.0.4/arch/alpha/kernel/osf_sys.c
52 --- linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53 +++ linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-3.0.4/arch/alpha/mm/fault.c linux-3.0.4/arch/alpha/mm/fault.c
86 --- linux-3.0.4/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87 +++ linux-3.0.4/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-3.0.4/arch/arm/include/asm/elf.h linux-3.0.4/arch/arm/include/asm/elf.h
245 --- linux-3.0.4/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246 +++ linux-3.0.4/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-3.0.4/arch/arm/include/asm/kmap_types.h linux-3.0.4/arch/arm/include/asm/kmap_types.h
275 --- linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276 +++ linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-3.0.4/arch/arm/include/asm/uaccess.h linux-3.0.4/arch/arm/include/asm/uaccess.h
286 --- linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287 +++ linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-3.0.4/arch/arm/kernel/armksyms.c linux-3.0.4/arch/arm/kernel/armksyms.c
344 --- linux-3.0.4/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345 +++ linux-3.0.4/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-3.0.4/arch/arm/kernel/process.c linux-3.0.4/arch/arm/kernel/process.c
358 --- linux-3.0.4/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359 +++ linux-3.0.4/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-3.0.4/arch/arm/kernel/traps.c linux-3.0.4/arch/arm/kernel/traps.c
382 --- linux-3.0.4/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383 +++ linux-3.0.4/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384 @@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-3.0.4/arch/arm/lib/copy_from_user.S linux-3.0.4/arch/arm/lib/copy_from_user.S
404 --- linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405 +++ linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-3.0.4/arch/arm/lib/copy_to_user.S linux-3.0.4/arch/arm/lib/copy_to_user.S
430 --- linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431 +++ linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-3.0.4/arch/arm/lib/uaccess.S linux-3.0.4/arch/arm/lib/uaccess.S
456 --- linux-3.0.4/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457 +++ linux-3.0.4/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513 +++ linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525 +++ linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-3.0.4/arch/arm/mm/fault.c linux-3.0.4/arch/arm/mm/fault.c
536 --- linux-3.0.4/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537 +++ linux-3.0.4/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-3.0.4/arch/arm/mm/mmap.c linux-3.0.4/arch/arm/mm/mmap.c
587 --- linux-3.0.4/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588 +++ linux-3.0.4/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-3.0.4/arch/avr32/include/asm/elf.h linux-3.0.4/arch/avr32/include/asm/elf.h
639 --- linux-3.0.4/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640 +++ linux-3.0.4/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-3.0.4/arch/avr32/include/asm/kmap_types.h linux-3.0.4/arch/avr32/include/asm/kmap_types.h
658 --- linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659 +++ linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-3.0.4/arch/avr32/mm/fault.c linux-3.0.4/arch/avr32/mm/fault.c
671 --- linux-3.0.4/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672 +++ linux-3.0.4/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-3.0.4/arch/frv/include/asm/kmap_types.h linux-3.0.4/arch/frv/include/asm/kmap_types.h
715 --- linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716 +++ linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-3.0.4/arch/frv/mm/elf-fdpic.c linux-3.0.4/arch/frv/mm/elf-fdpic.c
726 --- linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727 +++ linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-3.0.4/arch/ia64/include/asm/elf.h linux-3.0.4/arch/ia64/include/asm/elf.h
757 --- linux-3.0.4/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758 +++ linux-3.0.4/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-3.0.4/arch/ia64/include/asm/pgtable.h linux-3.0.4/arch/ia64/include/asm/pgtable.h
774 --- linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775 +++ linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-3.0.4/arch/ia64/include/asm/spinlock.h linux-3.0.4/arch/ia64/include/asm/spinlock.h
804 --- linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805 +++ linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-3.0.4/arch/ia64/include/asm/uaccess.h linux-3.0.4/arch/ia64/include/asm/uaccess.h
816 --- linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817 +++ linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-3.0.4/arch/ia64/kernel/module.c linux-3.0.4/arch/ia64/kernel/module.c
837 --- linux-3.0.4/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838 +++ linux-3.0.4/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-3.0.4/arch/ia64/kernel/sys_ia64.c linux-3.0.4/arch/ia64/kernel/sys_ia64.c
928 --- linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929 +++ linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964 +++ linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-3.0.4/arch/ia64/mm/fault.c linux-3.0.4/arch/ia64/mm/fault.c
975 --- linux-3.0.4/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976 +++ linux-3.0.4/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-3.0.4/arch/ia64/mm/hugetlbpage.c linux-3.0.4/arch/ia64/mm/hugetlbpage.c
1027 --- linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028 +++ linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-3.0.4/arch/ia64/mm/init.c linux-3.0.4/arch/ia64/mm/init.c
1039 --- linux-3.0.4/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040 +++ linux-3.0.4/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-3.0.4/arch/m32r/lib/usercopy.c linux-3.0.4/arch/m32r/lib/usercopy.c
1062 --- linux-3.0.4/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063 +++ linux-3.0.4/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-3.0.4/arch/mips/include/asm/elf.h linux-3.0.4/arch/mips/include/asm/elf.h
1085 --- linux-3.0.4/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086 +++ linux-3.0.4/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-3.0.4/arch/mips/include/asm/page.h linux-3.0.4/arch/mips/include/asm/page.h
1109 --- linux-3.0.4/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110 +++ linux-3.0.4/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-3.0.4/arch/mips/include/asm/system.h linux-3.0.4/arch/mips/include/asm/system.h
1121 --- linux-3.0.4/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122 +++ linux-3.0.4/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133 +++ linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150 +++ linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-3.0.4/arch/mips/kernel/process.c linux-3.0.4/arch/mips/kernel/process.c
1166 --- linux-3.0.4/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167 +++ linux-3.0.4/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-3.0.4/arch/mips/mm/fault.c linux-3.0.4/arch/mips/mm/fault.c
1185 --- linux-3.0.4/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186 +++ linux-3.0.4/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187 @@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191 +#ifdef CONFIG_PAX_PAGEEXEC
1192 +void pax_report_insns(void *pc, void *sp)
1193 +{
1194 + unsigned long i;
1195 +
1196 + printk(KERN_ERR "PAX: bytes at PC: ");
1197 + for (i = 0; i < 5; i++) {
1198 + unsigned int c;
1199 + if (get_user(c, (unsigned int *)pc+i))
1200 + printk(KERN_CONT "???????? ");
1201 + else
1202 + printk(KERN_CONT "%08x ", c);
1203 + }
1204 + printk("\n");
1205 +}
1206 +#endif
1207 +
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211 diff -urNp linux-3.0.4/arch/mips/mm/mmap.c linux-3.0.4/arch/mips/mm/mmap.c
1212 --- linux-3.0.4/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213 +++ linux-3.0.4/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214 @@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218 +
1219 +#ifdef CONFIG_PAX_RANDMMAP
1220 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221 +#endif
1222 +
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229 - if (TASK_SIZE - len >= addr &&
1230 - (!vmm || addr + len <= vmm->vm_start))
1231 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235 @@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239 - if (!vmm || addr + len <= vmm->vm_start)
1240 + if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244 @@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248 -
1249 -static inline unsigned long brk_rnd(void)
1250 -{
1251 - unsigned long rnd = get_random_int();
1252 -
1253 - rnd = rnd << PAGE_SHIFT;
1254 - /* 8MB for 32bit, 256MB for 64bit */
1255 - if (TASK_IS_32BIT_ADDR)
1256 - rnd = rnd & 0x7ffffful;
1257 - else
1258 - rnd = rnd & 0xffffffful;
1259 -
1260 - return rnd;
1261 -}
1262 -
1263 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1264 -{
1265 - unsigned long base = mm->brk;
1266 - unsigned long ret;
1267 -
1268 - ret = PAGE_ALIGN(base + brk_rnd());
1269 -
1270 - if (ret < mm->brk)
1271 - return mm->brk;
1272 -
1273 - return ret;
1274 -}
1275 diff -urNp linux-3.0.4/arch/parisc/include/asm/elf.h linux-3.0.4/arch/parisc/include/asm/elf.h
1276 --- linux-3.0.4/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277 +++ linux-3.0.4/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282 +#ifdef CONFIG_PAX_ASLR
1283 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284 +
1285 +#define PAX_DELTA_MMAP_LEN 16
1286 +#define PAX_DELTA_STACK_LEN 16
1287 +#endif
1288 +
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292 diff -urNp linux-3.0.4/arch/parisc/include/asm/pgtable.h linux-3.0.4/arch/parisc/include/asm/pgtable.h
1293 --- linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294 +++ linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295 @@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299 +
1300 +#ifdef CONFIG_PAX_PAGEEXEC
1301 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304 +#else
1305 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306 +# define PAGE_COPY_NOEXEC PAGE_COPY
1307 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308 +#endif
1309 +
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313 diff -urNp linux-3.0.4/arch/parisc/kernel/module.c linux-3.0.4/arch/parisc/kernel/module.c
1314 --- linux-3.0.4/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315 +++ linux-3.0.4/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316 @@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320 +static inline int in_init_rx(struct module *me, void *loc)
1321 +{
1322 + return (loc >= me->module_init_rx &&
1323 + loc < (me->module_init_rx + me->init_size_rx));
1324 +}
1325 +
1326 +static inline int in_init_rw(struct module *me, void *loc)
1327 +{
1328 + return (loc >= me->module_init_rw &&
1329 + loc < (me->module_init_rw + me->init_size_rw));
1330 +}
1331 +
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334 - return (loc >= me->module_init &&
1335 - loc <= (me->module_init + me->init_size));
1336 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1337 +}
1338 +
1339 +static inline int in_core_rx(struct module *me, void *loc)
1340 +{
1341 + return (loc >= me->module_core_rx &&
1342 + loc < (me->module_core_rx + me->core_size_rx));
1343 +}
1344 +
1345 +static inline int in_core_rw(struct module *me, void *loc)
1346 +{
1347 + return (loc >= me->module_core_rw &&
1348 + loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353 - return (loc >= me->module_core &&
1354 - loc <= (me->module_core + me->core_size));
1355 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363 - me->core_size = ALIGN(me->core_size, 16);
1364 - me->arch.got_offset = me->core_size;
1365 - me->core_size += gots * sizeof(struct got_entry);
1366 -
1367 - me->core_size = ALIGN(me->core_size, 16);
1368 - me->arch.fdesc_offset = me->core_size;
1369 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1370 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371 + me->arch.got_offset = me->core_size_rw;
1372 + me->core_size_rw += gots * sizeof(struct got_entry);
1373 +
1374 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375 + me->arch.fdesc_offset = me->core_size_rw;
1376 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384 - got = me->module_core + me->arch.got_offset;
1385 + got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407 @@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416 diff -urNp linux-3.0.4/arch/parisc/kernel/sys_parisc.c linux-3.0.4/arch/parisc/kernel/sys_parisc.c
1417 --- linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418 +++ linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423 - if (!vma || addr + len <= vma->vm_start)
1424 + if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432 - if (!vma || addr + len <= vma->vm_start)
1433 + if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441 - addr = TASK_UNMAPPED_BASE;
1442 + addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446 diff -urNp linux-3.0.4/arch/parisc/kernel/traps.c linux-3.0.4/arch/parisc/kernel/traps.c
1447 --- linux-3.0.4/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448 +++ linux-3.0.4/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1454 - && (vma->vm_flags & VM_EXEC)) {
1455 -
1456 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460 diff -urNp linux-3.0.4/arch/parisc/mm/fault.c linux-3.0.4/arch/parisc/mm/fault.c
1461 --- linux-3.0.4/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462 +++ linux-3.0.4/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463 @@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467 +#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475 - if (code == 6 || code == 16)
1476 + if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484 +#ifdef CONFIG_PAX_PAGEEXEC
1485 +/*
1486 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487 + *
1488 + * returns 1 when task should be killed
1489 + * 2 when rt_sigreturn trampoline was detected
1490 + * 3 when unpatched PLT trampoline was detected
1491 + */
1492 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1493 +{
1494 +
1495 +#ifdef CONFIG_PAX_EMUPLT
1496 + int err;
1497 +
1498 + do { /* PaX: unpatched PLT emulation */
1499 + unsigned int bl, depwi;
1500 +
1501 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503 +
1504 + if (err)
1505 + break;
1506 +
1507 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509 +
1510 + err = get_user(ldw, (unsigned int *)addr);
1511 + err |= get_user(bv, (unsigned int *)(addr+4));
1512 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1513 +
1514 + if (err)
1515 + break;
1516 +
1517 + if (ldw == 0x0E801096U &&
1518 + bv == 0xEAC0C000U &&
1519 + ldw2 == 0x0E881095U)
1520 + {
1521 + unsigned int resolver, map;
1522 +
1523 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525 + if (err)
1526 + break;
1527 +
1528 + regs->gr[20] = instruction_pointer(regs)+8;
1529 + regs->gr[21] = map;
1530 + regs->gr[22] = resolver;
1531 + regs->iaoq[0] = resolver | 3UL;
1532 + regs->iaoq[1] = regs->iaoq[0] + 4;
1533 + return 3;
1534 + }
1535 + }
1536 + } while (0);
1537 +#endif
1538 +
1539 +#ifdef CONFIG_PAX_EMUTRAMP
1540 +
1541 +#ifndef CONFIG_PAX_EMUSIGRT
1542 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543 + return 1;
1544 +#endif
1545 +
1546 + do { /* PaX: rt_sigreturn emulation */
1547 + unsigned int ldi1, ldi2, bel, nop;
1548 +
1549 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553 +
1554 + if (err)
1555 + break;
1556 +
1557 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558 + ldi2 == 0x3414015AU &&
1559 + bel == 0xE4008200U &&
1560 + nop == 0x08000240U)
1561 + {
1562 + regs->gr[25] = (ldi1 & 2) >> 1;
1563 + regs->gr[20] = __NR_rt_sigreturn;
1564 + regs->gr[31] = regs->iaoq[1] + 16;
1565 + regs->sr[0] = regs->iasq[1];
1566 + regs->iaoq[0] = 0x100UL;
1567 + regs->iaoq[1] = regs->iaoq[0] + 4;
1568 + regs->iasq[0] = regs->sr[2];
1569 + regs->iasq[1] = regs->sr[2];
1570 + return 2;
1571 + }
1572 + } while (0);
1573 +#endif
1574 +
1575 + return 1;
1576 +}
1577 +
1578 +void pax_report_insns(void *pc, void *sp)
1579 +{
1580 + unsigned long i;
1581 +
1582 + printk(KERN_ERR "PAX: bytes at PC: ");
1583 + for (i = 0; i < 5; i++) {
1584 + unsigned int c;
1585 + if (get_user(c, (unsigned int *)pc+i))
1586 + printk(KERN_CONT "???????? ");
1587 + else
1588 + printk(KERN_CONT "%08x ", c);
1589 + }
1590 + printk("\n");
1591 +}
1592 +#endif
1593 +
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597 @@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601 - if ((vma->vm_flags & acc_type) != acc_type)
1602 + if ((vma->vm_flags & acc_type) != acc_type) {
1603 +
1604 +#ifdef CONFIG_PAX_PAGEEXEC
1605 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606 + (address & ~3UL) == instruction_pointer(regs))
1607 + {
1608 + up_read(&mm->mmap_sem);
1609 + switch (pax_handle_fetch_fault(regs)) {
1610 +
1611 +#ifdef CONFIG_PAX_EMUPLT
1612 + case 3:
1613 + return;
1614 +#endif
1615 +
1616 +#ifdef CONFIG_PAX_EMUTRAMP
1617 + case 2:
1618 + return;
1619 +#endif
1620 +
1621 + }
1622 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623 + do_group_exit(SIGKILL);
1624 + }
1625 +#endif
1626 +
1627 goto bad_area;
1628 + }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632 diff -urNp linux-3.0.4/arch/powerpc/include/asm/elf.h linux-3.0.4/arch/powerpc/include/asm/elf.h
1633 --- linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634 +++ linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639 -extern unsigned long randomize_et_dyn(unsigned long base);
1640 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641 +#define ELF_ET_DYN_BASE (0x20000000)
1642 +
1643 +#ifdef CONFIG_PAX_ASLR
1644 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645 +
1646 +#ifdef __powerpc64__
1647 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649 +#else
1650 +#define PAX_DELTA_MMAP_LEN 15
1651 +#define PAX_DELTA_STACK_LEN 15
1652 +#endif
1653 +#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662 -#define arch_randomize_brk arch_randomize_brk
1663 -
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667 diff -urNp linux-3.0.4/arch/powerpc/include/asm/kmap_types.h linux-3.0.4/arch/powerpc/include/asm/kmap_types.h
1668 --- linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669 +++ linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670 @@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674 + KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678 diff -urNp linux-3.0.4/arch/powerpc/include/asm/mman.h linux-3.0.4/arch/powerpc/include/asm/mman.h
1679 --- linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680 +++ linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690 diff -urNp linux-3.0.4/arch/powerpc/include/asm/page_64.h linux-3.0.4/arch/powerpc/include/asm/page_64.h
1691 --- linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692 +++ linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693 @@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699 +#define VM_STACK_DEFAULT_FLAGS32 \
1700 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706 +#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710 +#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714 diff -urNp linux-3.0.4/arch/powerpc/include/asm/page.h linux-3.0.4/arch/powerpc/include/asm/page.h
1715 --- linux-3.0.4/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716 +++ linux-3.0.4/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723 +#define VM_DATA_DEFAULT_FLAGS32 \
1724 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733 +#define ktla_ktva(addr) (addr)
1734 +#define ktva_ktla(addr) (addr)
1735 +
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739 diff -urNp linux-3.0.4/arch/powerpc/include/asm/pgtable.h linux-3.0.4/arch/powerpc/include/asm/pgtable.h
1740 --- linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741 +++ linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742 @@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746 +#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750 diff -urNp linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h
1751 --- linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752 +++ linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753 @@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757 +#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761 diff -urNp linux-3.0.4/arch/powerpc/include/asm/reg.h linux-3.0.4/arch/powerpc/include/asm/reg.h
1762 --- linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763 +++ linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764 @@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772 diff -urNp linux-3.0.4/arch/powerpc/include/asm/system.h linux-3.0.4/arch/powerpc/include/asm/system.h
1773 --- linux-3.0.4/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774 +++ linux-3.0.4/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779 -extern unsigned long arch_align_stack(unsigned long sp);
1780 +#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784 diff -urNp linux-3.0.4/arch/powerpc/include/asm/uaccess.h linux-3.0.4/arch/powerpc/include/asm/uaccess.h
1785 --- linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786 +++ linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787 @@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792 +
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796 @@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800 -#ifndef __powerpc64__
1801 -
1802 -static inline unsigned long copy_from_user(void *to,
1803 - const void __user *from, unsigned long n)
1804 -{
1805 - unsigned long over;
1806 -
1807 - if (access_ok(VERIFY_READ, from, n))
1808 - return __copy_tofrom_user((__force void __user *)to, from, n);
1809 - if ((unsigned long)from < TASK_SIZE) {
1810 - over = (unsigned long)from + n - TASK_SIZE;
1811 - return __copy_tofrom_user((__force void __user *)to, from,
1812 - n - over) + over;
1813 - }
1814 - return n;
1815 -}
1816 -
1817 -static inline unsigned long copy_to_user(void __user *to,
1818 - const void *from, unsigned long n)
1819 -{
1820 - unsigned long over;
1821 -
1822 - if (access_ok(VERIFY_WRITE, to, n))
1823 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1824 - if ((unsigned long)to < TASK_SIZE) {
1825 - over = (unsigned long)to + n - TASK_SIZE;
1826 - return __copy_tofrom_user(to, (__force void __user *)from,
1827 - n - over) + over;
1828 - }
1829 - return n;
1830 -}
1831 -
1832 -#else /* __powerpc64__ */
1833 -
1834 -#define __copy_in_user(to, from, size) \
1835 - __copy_tofrom_user((to), (from), (size))
1836 -
1837 -extern unsigned long copy_from_user(void *to, const void __user *from,
1838 - unsigned long n);
1839 -extern unsigned long copy_to_user(void __user *to, const void *from,
1840 - unsigned long n);
1841 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842 - unsigned long n);
1843 -
1844 -#endif /* __powerpc64__ */
1845 -
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853 +
1854 + if (!__builtin_constant_p(n))
1855 + check_object_size(to, n, false);
1856 +
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864 +
1865 + if (!__builtin_constant_p(n))
1866 + check_object_size(from, n, true);
1867 +
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875 +#ifndef __powerpc64__
1876 +
1877 +static inline unsigned long __must_check copy_from_user(void *to,
1878 + const void __user *from, unsigned long n)
1879 +{
1880 + unsigned long over;
1881 +
1882 + if ((long)n < 0)
1883 + return n;
1884 +
1885 + if (access_ok(VERIFY_READ, from, n)) {
1886 + if (!__builtin_constant_p(n))
1887 + check_object_size(to, n, false);
1888 + return __copy_tofrom_user((__force void __user *)to, from, n);
1889 + }
1890 + if ((unsigned long)from < TASK_SIZE) {
1891 + over = (unsigned long)from + n - TASK_SIZE;
1892 + if (!__builtin_constant_p(n - over))
1893 + check_object_size(to, n - over, false);
1894 + return __copy_tofrom_user((__force void __user *)to, from,
1895 + n - over) + over;
1896 + }
1897 + return n;
1898 +}
1899 +
1900 +static inline unsigned long __must_check copy_to_user(void __user *to,
1901 + const void *from, unsigned long n)
1902 +{
1903 + unsigned long over;
1904 +
1905 + if ((long)n < 0)
1906 + return n;
1907 +
1908 + if (access_ok(VERIFY_WRITE, to, n)) {
1909 + if (!__builtin_constant_p(n))
1910 + check_object_size(from, n, true);
1911 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1912 + }
1913 + if ((unsigned long)to < TASK_SIZE) {
1914 + over = (unsigned long)to + n - TASK_SIZE;
1915 + if (!__builtin_constant_p(n))
1916 + check_object_size(from, n - over, true);
1917 + return __copy_tofrom_user(to, (__force void __user *)from,
1918 + n - over) + over;
1919 + }
1920 + return n;
1921 +}
1922 +
1923 +#else /* __powerpc64__ */
1924 +
1925 +#define __copy_in_user(to, from, size) \
1926 + __copy_tofrom_user((to), (from), (size))
1927 +
1928 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929 +{
1930 + if ((long)n < 0 || n > INT_MAX)
1931 + return n;
1932 +
1933 + if (!__builtin_constant_p(n))
1934 + check_object_size(to, n, false);
1935 +
1936 + if (likely(access_ok(VERIFY_READ, from, n)))
1937 + n = __copy_from_user(to, from, n);
1938 + else
1939 + memset(to, 0, n);
1940 + return n;
1941 +}
1942 +
1943 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944 +{
1945 + if ((long)n < 0 || n > INT_MAX)
1946 + return n;
1947 +
1948 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949 + if (!__builtin_constant_p(n))
1950 + check_object_size(from, n, true);
1951 + n = __copy_to_user(to, from, n);
1952 + }
1953 + return n;
1954 +}
1955 +
1956 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957 + unsigned long n);
1958 +
1959 +#endif /* __powerpc64__ */
1960 +
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964 diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S
1965 --- linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966 +++ linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967 @@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971 + bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975 @@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979 -1: bl .save_nvgprs
1980 - mr r5,r3
1981 +1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985 diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S
1986 --- linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987 +++ linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988 @@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992 + bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996 - bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000 diff -urNp linux-3.0.4/arch/powerpc/kernel/module_32.c linux-3.0.4/arch/powerpc/kernel/module_32.c
2001 --- linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002 +++ linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2008 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016 - if (location >= mod->module_core
2017 - && location < mod->module_core + mod->core_size)
2018 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021 - else
2022 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025 + else {
2026 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027 + return ~0UL;
2028 + }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032 diff -urNp linux-3.0.4/arch/powerpc/kernel/module.c linux-3.0.4/arch/powerpc/kernel/module.c
2033 --- linux-3.0.4/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034 +++ linux-3.0.4/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035 @@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039 +#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045 + return vmalloc(size);
2046 +}
2047 +
2048 +void *module_alloc_exec(unsigned long size)
2049 +#else
2050 +void *module_alloc(unsigned long size)
2051 +#endif
2052 +
2053 +{
2054 + if (size == 0)
2055 + return NULL;
2056 +
2057 return vmalloc_exec(size);
2058 }
2059
2060 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064 +#ifdef CONFIG_PAX_KERNEXEC
2065 +void module_free_exec(struct module *mod, void *module_region)
2066 +{
2067 + module_free(mod, module_region);
2068 +}
2069 +#endif
2070 +
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074 diff -urNp linux-3.0.4/arch/powerpc/kernel/process.c linux-3.0.4/arch/powerpc/kernel/process.c
2075 --- linux-3.0.4/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076 +++ linux-3.0.4/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088 @@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096 - printk(" (%pS)",
2097 + printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101 @@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110 @@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114 -
2115 -unsigned long arch_align_stack(unsigned long sp)
2116 -{
2117 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118 - sp -= get_random_int() & ~PAGE_MASK;
2119 - return sp & ~0xf;
2120 -}
2121 -
2122 -static inline unsigned long brk_rnd(void)
2123 -{
2124 - unsigned long rnd = 0;
2125 -
2126 - /* 8MB for 32bit, 1GB for 64bit */
2127 - if (is_32bit_task())
2128 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129 - else
2130 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131 -
2132 - return rnd << PAGE_SHIFT;
2133 -}
2134 -
2135 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2136 -{
2137 - unsigned long base = mm->brk;
2138 - unsigned long ret;
2139 -
2140 -#ifdef CONFIG_PPC_STD_MMU_64
2141 - /*
2142 - * If we are using 1TB segments and we are allowed to randomise
2143 - * the heap, we can put it above 1TB so it is backed by a 1TB
2144 - * segment. Otherwise the heap will be in the bottom 1TB
2145 - * which always uses 256MB segments and this may result in a
2146 - * performance penalty.
2147 - */
2148 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150 -#endif
2151 -
2152 - ret = PAGE_ALIGN(base + brk_rnd());
2153 -
2154 - if (ret < mm->brk)
2155 - return mm->brk;
2156 -
2157 - return ret;
2158 -}
2159 -
2160 -unsigned long randomize_et_dyn(unsigned long base)
2161 -{
2162 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163 -
2164 - if (ret < base)
2165 - return base;
2166 -
2167 - return ret;
2168 -}
2169 diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_32.c linux-3.0.4/arch/powerpc/kernel/signal_32.c
2170 --- linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171 +++ linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181 diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_64.c linux-3.0.4/arch/powerpc/kernel/signal_64.c
2182 --- linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183 +++ linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193 diff -urNp linux-3.0.4/arch/powerpc/kernel/traps.c linux-3.0.4/arch/powerpc/kernel/traps.c
2194 --- linux-3.0.4/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195 +++ linux-3.0.4/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200 +extern void gr_handle_kernel_exploit(void);
2201 +
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209 + gr_handle_kernel_exploit();
2210 +
2211 oops_exit();
2212 do_exit(err);
2213
2214 diff -urNp linux-3.0.4/arch/powerpc/kernel/vdso.c linux-3.0.4/arch/powerpc/kernel/vdso.c
2215 --- linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216 +++ linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217 @@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221 +#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229 - current->mm->context.vdso_base = 0;
2230 + current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238 - 0, 0);
2239 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243 diff -urNp linux-3.0.4/arch/powerpc/lib/usercopy_64.c linux-3.0.4/arch/powerpc/lib/usercopy_64.c
2244 --- linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245 +++ linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246 @@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_READ, from, n)))
2253 - n = __copy_from_user(to, from, n);
2254 - else
2255 - memset(to, 0, n);
2256 - return n;
2257 -}
2258 -
2259 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260 -{
2261 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2262 - n = __copy_to_user(to, from, n);
2263 - return n;
2264 -}
2265 -
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273 -EXPORT_SYMBOL(copy_from_user);
2274 -EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277 diff -urNp linux-3.0.4/arch/powerpc/mm/fault.c linux-3.0.4/arch/powerpc/mm/fault.c
2278 --- linux-3.0.4/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279 +++ linux-3.0.4/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280 @@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284 +#include <linux/slab.h>
2285 +#include <linux/pagemap.h>
2286 +#include <linux/compiler.h>
2287 +#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291 @@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295 +#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299 @@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303 +#ifdef CONFIG_PAX_PAGEEXEC
2304 +/*
2305 + * PaX: decide what to do with offenders (regs->nip = fault address)
2306 + *
2307 + * returns 1 when task should be killed
2308 + */
2309 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2310 +{
2311 + return 1;
2312 +}
2313 +
2314 +void pax_report_insns(void *pc, void *sp)
2315 +{
2316 + unsigned long i;
2317 +
2318 + printk(KERN_ERR "PAX: bytes at PC: ");
2319 + for (i = 0; i < 5; i++) {
2320 + unsigned int c;
2321 + if (get_user(c, (unsigned int __user *)pc+i))
2322 + printk(KERN_CONT "???????? ");
2323 + else
2324 + printk(KERN_CONT "%08x ", c);
2325 + }
2326 + printk("\n");
2327 +}
2328 +#endif
2329 +
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337 - error_code &= 0x48200000;
2338 + error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342 @@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346 - if (error_code & 0x10000000)
2347 + if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351 @@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355 - if (error_code & DSISR_PROTFAULT)
2356 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360 @@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364 +
2365 +#ifdef CONFIG_PAX_PAGEEXEC
2366 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367 +#ifdef CONFIG_PPC_STD_MMU
2368 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369 +#else
2370 + if (is_exec && regs->nip == address) {
2371 +#endif
2372 + switch (pax_handle_fetch_fault(regs)) {
2373 + }
2374 +
2375 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376 + do_group_exit(SIGKILL);
2377 + }
2378 + }
2379 +#endif
2380 +
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384 diff -urNp linux-3.0.4/arch/powerpc/mm/mmap_64.c linux-3.0.4/arch/powerpc/mm/mmap_64.c
2385 --- linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386 +++ linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391 +
2392 +#ifdef CONFIG_PAX_RANDMMAP
2393 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2394 + mm->mmap_base += mm->delta_mmap;
2395 +#endif
2396 +
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401 +
2402 +#ifdef CONFIG_PAX_RANDMMAP
2403 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2404 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405 +#endif
2406 +
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410 diff -urNp linux-3.0.4/arch/powerpc/mm/slice.c linux-3.0.4/arch/powerpc/mm/slice.c
2411 --- linux-3.0.4/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412 +++ linux-3.0.4/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417 - return (!vma || (addr + len) <= vma->vm_start);
2418 + return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422 @@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426 - if (!vma || addr + len <= vma->vm_start) {
2427 + if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435 - addr = mm->mmap_base;
2436 - while (addr > len) {
2437 + if (mm->mmap_base < len)
2438 + addr = -ENOMEM;
2439 + else
2440 + addr = mm->mmap_base - len;
2441 +
2442 + while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453 - if (!vma || (addr + len) <= vma->vm_start) {
2454 + if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462 - addr = vma->vm_start;
2463 + addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471 +#ifdef CONFIG_PAX_RANDMMAP
2472 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473 + addr = 0;
2474 +#endif
2475 +
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479 diff -urNp linux-3.0.4/arch/s390/include/asm/elf.h linux-3.0.4/arch/s390/include/asm/elf.h
2480 --- linux-3.0.4/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481 +++ linux-3.0.4/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486 -extern unsigned long randomize_et_dyn(unsigned long base);
2487 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489 +
2490 +#ifdef CONFIG_PAX_ASLR
2491 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492 +
2493 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495 +#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499 @@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504 -#define arch_randomize_brk arch_randomize_brk
2505 -
2506 #endif
2507 diff -urNp linux-3.0.4/arch/s390/include/asm/system.h linux-3.0.4/arch/s390/include/asm/system.h
2508 --- linux-3.0.4/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509 +++ linux-3.0.4/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514 -extern unsigned long arch_align_stack(unsigned long sp);
2515 +#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519 diff -urNp linux-3.0.4/arch/s390/include/asm/uaccess.h linux-3.0.4/arch/s390/include/asm/uaccess.h
2520 --- linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521 +++ linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526 +
2527 + if ((long)n < 0)
2528 + return n;
2529 +
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537 + if ((long)n < 0)
2538 + return n;
2539 +
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547 +
2548 + if ((long)n < 0)
2549 + return n;
2550 +
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554 diff -urNp linux-3.0.4/arch/s390/kernel/module.c linux-3.0.4/arch/s390/kernel/module.c
2555 --- linux-3.0.4/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556 +++ linux-3.0.4/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561 - me->core_size = ALIGN(me->core_size, 4);
2562 - me->arch.got_offset = me->core_size;
2563 - me->core_size += me->arch.got_size;
2564 - me->arch.plt_offset = me->core_size;
2565 - me->core_size += me->arch.plt_size;
2566 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567 + me->arch.got_offset = me->core_size_rw;
2568 + me->core_size_rw += me->arch.got_size;
2569 + me->arch.plt_offset = me->core_size_rx;
2570 + me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578 - gotent = me->module_core + me->arch.got_offset +
2579 + gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2588 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596 - ip = me->module_core + me->arch.plt_offset +
2597 + ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605 - val = (Elf_Addr) me->module_core +
2606 + val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2615 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628 diff -urNp linux-3.0.4/arch/s390/kernel/process.c linux-3.0.4/arch/s390/kernel/process.c
2629 --- linux-3.0.4/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630 +++ linux-3.0.4/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635 -
2636 -unsigned long arch_align_stack(unsigned long sp)
2637 -{
2638 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639 - sp -= get_random_int() & ~PAGE_MASK;
2640 - return sp & ~0xf;
2641 -}
2642 -
2643 -static inline unsigned long brk_rnd(void)
2644 -{
2645 - /* 8MB for 32bit, 1GB for 64bit */
2646 - if (is_32bit_task())
2647 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648 - else
2649 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650 -}
2651 -
2652 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2653 -{
2654 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655 -
2656 - if (ret < mm->brk)
2657 - return mm->brk;
2658 - return ret;
2659 -}
2660 -
2661 -unsigned long randomize_et_dyn(unsigned long base)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664 -
2665 - if (!(current->flags & PF_RANDOMIZE))
2666 - return base;
2667 - if (ret < base)
2668 - return base;
2669 - return ret;
2670 -}
2671 diff -urNp linux-3.0.4/arch/s390/kernel/setup.c linux-3.0.4/arch/s390/kernel/setup.c
2672 --- linux-3.0.4/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673 +++ linux-3.0.4/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678 -unsigned int user_mode = HOME_SPACE_MODE;
2679 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683 diff -urNp linux-3.0.4/arch/s390/mm/mmap.c linux-3.0.4/arch/s390/mm/mmap.c
2684 --- linux-3.0.4/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685 +++ linux-3.0.4/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690 +
2691 +#ifdef CONFIG_PAX_RANDMMAP
2692 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2693 + mm->mmap_base += mm->delta_mmap;
2694 +#endif
2695 +
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700 +
2701 +#ifdef CONFIG_PAX_RANDMMAP
2702 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2703 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704 +#endif
2705 +
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713 +
2714 +#ifdef CONFIG_PAX_RANDMMAP
2715 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2716 + mm->mmap_base += mm->delta_mmap;
2717 +#endif
2718 +
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723 +
2724 +#ifdef CONFIG_PAX_RANDMMAP
2725 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2726 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727 +#endif
2728 +
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732 diff -urNp linux-3.0.4/arch/score/include/asm/system.h linux-3.0.4/arch/score/include/asm/system.h
2733 --- linux-3.0.4/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734 +++ linux-3.0.4/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735 @@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739 -extern unsigned long arch_align_stack(unsigned long sp);
2740 +#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744 diff -urNp linux-3.0.4/arch/score/kernel/process.c linux-3.0.4/arch/score/kernel/process.c
2745 --- linux-3.0.4/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746 +++ linux-3.0.4/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751 -
2752 -unsigned long arch_align_stack(unsigned long sp)
2753 -{
2754 - return sp;
2755 -}
2756 diff -urNp linux-3.0.4/arch/sh/mm/mmap.c linux-3.0.4/arch/sh/mm/mmap.c
2757 --- linux-3.0.4/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758 +++ linux-3.0.4/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763 - if (TASK_SIZE - len >= addr &&
2764 - (!vma || addr + len <= vma->vm_start))
2765 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769 @@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773 - if (likely(!vma || addr + len <= vma->vm_start)) {
2774 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782 - if (TASK_SIZE - len >= addr &&
2783 - (!vma || addr + len <= vma->vm_start))
2784 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792 - if (!vma || addr <= vma->vm_start) {
2793 + if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801 - addr = mm->mmap_base-len;
2802 - if (do_colour_align)
2803 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804 + addr = mm->mmap_base - len;
2805
2806 do {
2807 + if (do_colour_align)
2808 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815 - if (likely(!vma || addr+len <= vma->vm_start)) {
2816 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824 - addr = vma->vm_start-len;
2825 - if (do_colour_align)
2826 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827 - } while (likely(len < vma->vm_start));
2828 + addr = skip_heap_stack_gap(vma, len);
2829 + } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833 diff -urNp linux-3.0.4/arch/sparc/include/asm/atomic_64.h linux-3.0.4/arch/sparc/include/asm/atomic_64.h
2834 --- linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835 +++ linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836 @@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841 +{
2842 + return v->counter;
2843 +}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846 +{
2847 + return v->counter;
2848 +}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852 +{
2853 + v->counter = i;
2854 +}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857 +{
2858 + v->counter = i;
2859 +}
2860
2861 extern void atomic_add(int, atomic_t *);
2862 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882 +{
2883 + return atomic_add_ret_unchecked(1, v);
2884 +}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887 +{
2888 + return atomic64_add_ret_unchecked(1, v);
2889 +}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896 +{
2897 + return atomic_add_ret_unchecked(i, v);
2898 +}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901 +{
2902 + return atomic64_add_ret_unchecked(i, v);
2903 +}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912 +{
2913 + return atomic_inc_return_unchecked(v) == 0;
2914 +}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923 +{
2924 + atomic_add_unchecked(1, v);
2925 +}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928 +{
2929 + atomic64_add_unchecked(1, v);
2930 +}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934 +{
2935 + atomic_sub_unchecked(1, v);
2936 +}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939 +{
2940 + atomic64_sub_unchecked(1, v);
2941 +}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948 +{
2949 + return cmpxchg(&v->counter, old, new);
2950 +}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953 +{
2954 + return xchg(&v->counter, new);
2955 +}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959 - int c, old;
2960 + int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963 - if (unlikely(c == (u)))
2964 + if (unlikely(c == u))
2965 break;
2966 - old = atomic_cmpxchg((v), c, c + (a));
2967 +
2968 + asm volatile("addcc %2, %0, %0\n"
2969 +
2970 +#ifdef CONFIG_PAX_REFCOUNT
2971 + "tvs %%icc, 6\n"
2972 +#endif
2973 +
2974 + : "=r" (new)
2975 + : "0" (c), "ir" (a)
2976 + : "cc");
2977 +
2978 + old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983 - return c != (u);
2984 + return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993 +{
2994 + return xchg(&v->counter, new);
2995 +}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999 - long c, old;
3000 + long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003 - if (unlikely(c == (u)))
3004 + if (unlikely(c == u))
3005 break;
3006 - old = atomic64_cmpxchg((v), c, c + (a));
3007 +
3008 + asm volatile("addcc %2, %0, %0\n"
3009 +
3010 +#ifdef CONFIG_PAX_REFCOUNT
3011 + "tvs %%xcc, 6\n"
3012 +#endif
3013 +
3014 + : "=r" (new)
3015 + : "0" (c), "ir" (a)
3016 + : "cc");
3017 +
3018 + old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023 - return c != (u);
3024 + return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028 diff -urNp linux-3.0.4/arch/sparc/include/asm/cache.h linux-3.0.4/arch/sparc/include/asm/cache.h
3029 --- linux-3.0.4/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030 +++ linux-3.0.4/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031 @@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035 -#define L1_CACHE_BYTES 32
3036 +#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040 diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_32.h linux-3.0.4/arch/sparc/include/asm/elf_32.h
3041 --- linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042 +++ linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043 @@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047 +#ifdef CONFIG_PAX_ASLR
3048 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049 +
3050 +#define PAX_DELTA_MMAP_LEN 16
3051 +#define PAX_DELTA_STACK_LEN 16
3052 +#endif
3053 +
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057 diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_64.h linux-3.0.4/arch/sparc/include/asm/elf_64.h
3058 --- linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:44:40.000000000 -0400
3059 +++ linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060 @@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064 +#ifdef CONFIG_PAX_ASLR
3065 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066 +
3067 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069 +#endif
3070 +
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074 diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtable_32.h linux-3.0.4/arch/sparc/include/asm/pgtable_32.h
3075 --- linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076 +++ linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081 +
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +BTFIXUPDEF_INT(page_shared_noexec)
3084 +BTFIXUPDEF_INT(page_copy_noexec)
3085 +BTFIXUPDEF_INT(page_readonly_noexec)
3086 +#endif
3087 +
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095 +#ifdef CONFIG_PAX_PAGEEXEC
3096 +extern pgprot_t PAGE_SHARED_NOEXEC;
3097 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099 +#else
3100 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101 +# define PAGE_COPY_NOEXEC PAGE_COPY
3102 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103 +#endif
3104 +
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108 diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h
3109 --- linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110 +++ linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111 @@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115 +
3116 +#ifdef CONFIG_PAX_PAGEEXEC
3117 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120 +#endif
3121 +
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125 diff -urNp linux-3.0.4/arch/sparc/include/asm/spinlock_64.h linux-3.0.4/arch/sparc/include/asm/spinlock_64.h
3126 --- linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127 +++ linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132 -static void inline arch_read_lock(arch_rwlock_t *lock)
3133 +static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140 -"4: add %0, 1, %1\n"
3141 +"4: addcc %0, 1, %1\n"
3142 +
3143 +#ifdef CONFIG_PAX_REFCOUNT
3144 +" tvs %%icc, 6\n"
3145 +#endif
3146 +
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154 - : "memory");
3155 + : "memory", "cc");
3156 }
3157
3158 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3159 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167 -" add %0, 1, %1\n"
3168 +" addcc %0, 1, %1\n"
3169 +
3170 +#ifdef CONFIG_PAX_REFCOUNT
3171 +" tvs %%icc, 6\n"
3172 +#endif
3173 +
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3182 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188 -" sub %0, 1, %1\n"
3189 +" subcc %0, 1, %1\n"
3190 +
3191 +#ifdef CONFIG_PAX_REFCOUNT
3192 +" tvs %%icc, 6\n"
3193 +#endif
3194 +
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202 -static void inline arch_write_lock(arch_rwlock_t *lock)
3203 +static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3212 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3221 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225 diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_32.h linux-3.0.4/arch/sparc/include/asm/thread_info_32.h
3226 --- linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227 +++ linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228 @@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232 +
3233 + unsigned long lowest_stack;
3234 };
3235
3236 /*
3237 diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_64.h linux-3.0.4/arch/sparc/include/asm/thread_info_64.h
3238 --- linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239 +++ linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240 @@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244 + unsigned long lowest_stack;
3245 +
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_32.h linux-3.0.4/arch/sparc/include/asm/uaccess_32.h
3250 --- linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251 +++ linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 - if (n && __access_ok((unsigned long) to, n))
3257 + if ((long)n < 0)
3258 + return n;
3259 +
3260 + if (n && __access_ok((unsigned long) to, n)) {
3261 + if (!__builtin_constant_p(n))
3262 + check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264 - else
3265 + } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271 + if ((long)n < 0)
3272 + return n;
3273 +
3274 + if (!__builtin_constant_p(n))
3275 + check_object_size(from, n, true);
3276 +
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282 - if (n && __access_ok((unsigned long) from, n))
3283 + if ((long)n < 0)
3284 + return n;
3285 +
3286 + if (n && __access_ok((unsigned long) from, n)) {
3287 + if (!__builtin_constant_p(n))
3288 + check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290 - else
3291 + } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297 + if ((long)n < 0)
3298 + return n;
3299 +
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_64.h linux-3.0.4/arch/sparc/include/asm/uaccess_64.h
3304 --- linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305 +++ linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306 @@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310 +#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318 - unsigned long ret = ___copy_from_user(to, from, size);
3319 + unsigned long ret;
3320
3321 + if ((long)size < 0 || size > INT_MAX)
3322 + return size;
3323 +
3324 + if (!__builtin_constant_p(size))
3325 + check_object_size(to, size, false);
3326 +
3327 + ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335 - unsigned long ret = ___copy_to_user(to, from, size);
3336 + unsigned long ret;
3337 +
3338 + if ((long)size < 0 || size > INT_MAX)
3339 + return size;
3340 +
3341 + if (!__builtin_constant_p(size))
3342 + check_object_size(from, size, true);
3343
3344 + ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess.h linux-3.0.4/arch/sparc/include/asm/uaccess.h
3349 --- linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350 +++ linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351 @@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354 +
3355 +#ifdef __KERNEL__
3356 +#ifndef __ASSEMBLY__
3357 +#include <linux/types.h>
3358 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359 +#endif
3360 +#endif
3361 +
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365 diff -urNp linux-3.0.4/arch/sparc/kernel/Makefile linux-3.0.4/arch/sparc/kernel/Makefile
3366 --- linux-3.0.4/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367 +++ linux-3.0.4/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368 @@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372 -ccflags-y := -Werror
3373 +#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377 diff -urNp linux-3.0.4/arch/sparc/kernel/process_32.c linux-3.0.4/arch/sparc/kernel/process_32.c
3378 --- linux-3.0.4/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379 +++ linux-3.0.4/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384 - printk("%pS\n", (void *) rw->ins[7]);
3385 + printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393 - printk("PC: <%pS>\n", (void *) r->pc);
3394 + printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410 - printk("%pS ] ", (void *) pc);
3411 + printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415 diff -urNp linux-3.0.4/arch/sparc/kernel/process_64.c linux-3.0.4/arch/sparc/kernel/process_64.c
3416 --- linux-3.0.4/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417 +++ linux-3.0.4/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3431 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453 diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c
3454 --- linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455 +++ linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460 - addr = TASK_UNMAPPED_BASE;
3461 + addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469 - if (!vmm || addr + len <= vmm->vm_start)
3470 + if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474 diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c
3475 --- linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476 +++ linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481 - if ((flags & MAP_SHARED) &&
3482 + if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490 +#ifdef CONFIG_PAX_RANDMMAP
3491 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492 +#endif
3493 +
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501 - if (task_size - len >= addr &&
3502 - (!vma || addr + len <= vma->vm_start))
3503 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508 - start_addr = addr = mm->free_area_cache;
3509 + start_addr = addr = mm->free_area_cache;
3510 } else {
3511 - start_addr = addr = TASK_UNMAPPED_BASE;
3512 + start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516 @@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520 - if (start_addr != TASK_UNMAPPED_BASE) {
3521 - start_addr = addr = TASK_UNMAPPED_BASE;
3522 + if (start_addr != mm->mmap_base) {
3523 + start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529 - if (likely(!vma || addr + len <= vma->vm_start)) {
3530 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538 - if ((flags & MAP_SHARED) &&
3539 + if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547 - if (task_size - len >= addr &&
3548 - (!vma || addr + len <= vma->vm_start))
3549 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557 - if (!vma || addr <= vma->vm_start) {
3558 + if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566 - addr = mm->mmap_base-len;
3567 - if (do_color_align)
3568 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569 + addr = mm->mmap_base - len;
3570
3571 do {
3572 + if (do_color_align)
3573 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580 - if (likely(!vma || addr+len <= vma->vm_start)) {
3581 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589 - addr = vma->vm_start-len;
3590 - if (do_color_align)
3591 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592 - } while (likely(len < vma->vm_start));
3593 + addr = skip_heap_stack_gap(vma, len);
3594 + } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602 +
3603 +#ifdef CONFIG_PAX_RANDMMAP
3604 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3605 + mm->mmap_base += mm->delta_mmap;
3606 +#endif
3607 +
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615 +
3616 +#ifdef CONFIG_PAX_RANDMMAP
3617 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3618 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619 +#endif
3620 +
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624 diff -urNp linux-3.0.4/arch/sparc/kernel/traps_32.c linux-3.0.4/arch/sparc/kernel/traps_32.c
3625 --- linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626 +++ linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631 +extern void gr_handle_kernel_exploit(void);
3632 +
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648 - if(regs->psr & PSR_PS)
3649 + if(regs->psr & PSR_PS) {
3650 + gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652 + }
3653 do_exit(SIGSEGV);
3654 }
3655
3656 diff -urNp linux-3.0.4/arch/sparc/kernel/traps_64.c linux-3.0.4/arch/sparc/kernel/traps_64.c
3657 --- linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658 +++ linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672 +
3673 +#ifdef CONFIG_PAX_REFCOUNT
3674 + if (lvl == 6)
3675 + pax_report_refcount_overflow(regs);
3676 +#endif
3677 +
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685 -
3686 +
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691 +#ifdef CONFIG_PAX_REFCOUNT
3692 + if (lvl == 6)
3693 + pax_report_refcount_overflow(regs);
3694 +#endif
3695 +
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703 - printk("TPC<%pS>\n", (void *) regs->tpc);
3704 + printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3755 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3762 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770 +extern void gr_handle_kernel_exploit(void);
3771 +
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788 - if (regs->tstate & TSTATE_PRIV)
3789 + if (regs->tstate & TSTATE_PRIV) {
3790 + gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792 + }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796 diff -urNp linux-3.0.4/arch/sparc/kernel/unaligned_64.c linux-3.0.4/arch/sparc/kernel/unaligned_64.c
3797 --- linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:44:40.000000000 -0400
3798 +++ linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808 diff -urNp linux-3.0.4/arch/sparc/lib/atomic_64.S linux-3.0.4/arch/sparc/lib/atomic_64.S
3809 --- linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810 +++ linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811 @@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815 - add %g1, %o0, %g7
3816 + addcc %g1, %o0, %g7
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 + tvs %icc, 6
3820 +#endif
3821 +
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829 + .globl atomic_add_unchecked
3830 + .type atomic_add_unchecked,#function
3831 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832 + BACKOFF_SETUP(%o2)
3833 +1: lduw [%o1], %g1
3834 + add %g1, %o0, %g7
3835 + cas [%o1], %g1, %g7
3836 + cmp %g1, %g7
3837 + bne,pn %icc, 2f
3838 + nop
3839 + retl
3840 + nop
3841 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3842 + .size atomic_add_unchecked, .-atomic_add_unchecked
3843 +
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849 - sub %g1, %o0, %g7
3850 + subcc %g1, %o0, %g7
3851 +
3852 +#ifdef CONFIG_PAX_REFCOUNT
3853 + tvs %icc, 6
3854 +#endif
3855 +
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863 + .globl atomic_sub_unchecked
3864 + .type atomic_sub_unchecked,#function
3865 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866 + BACKOFF_SETUP(%o2)
3867 +1: lduw [%o1], %g1
3868 + sub %g1, %o0, %g7
3869 + cas [%o1], %g1, %g7
3870 + cmp %g1, %g7
3871 + bne,pn %icc, 2f
3872 + nop
3873 + retl
3874 + nop
3875 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3876 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877 +
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883 - add %g1, %o0, %g7
3884 + addcc %g1, %o0, %g7
3885 +
3886 +#ifdef CONFIG_PAX_REFCOUNT
3887 + tvs %icc, 6
3888 +#endif
3889 +
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897 + .globl atomic_add_ret_unchecked
3898 + .type atomic_add_ret_unchecked,#function
3899 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900 + BACKOFF_SETUP(%o2)
3901 +1: lduw [%o1], %g1
3902 + addcc %g1, %o0, %g7
3903 + cas [%o1], %g1, %g7
3904 + cmp %g1, %g7
3905 + bne,pn %icc, 2f
3906 + add %g7, %o0, %g7
3907 + sra %g7, 0, %o0
3908 + retl
3909 + nop
3910 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3911 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912 +
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918 - sub %g1, %o0, %g7
3919 + subcc %g1, %o0, %g7
3920 +
3921 +#ifdef CONFIG_PAX_REFCOUNT
3922 + tvs %icc, 6
3923 +#endif
3924 +
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932 - add %g1, %o0, %g7
3933 + addcc %g1, %o0, %g7
3934 +
3935 +#ifdef CONFIG_PAX_REFCOUNT
3936 + tvs %xcc, 6
3937 +#endif
3938 +
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946 + .globl atomic64_add_unchecked
3947 + .type atomic64_add_unchecked,#function
3948 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949 + BACKOFF_SETUP(%o2)
3950 +1: ldx [%o1], %g1
3951 + addcc %g1, %o0, %g7
3952 + casx [%o1], %g1, %g7
3953 + cmp %g1, %g7
3954 + bne,pn %xcc, 2f
3955 + nop
3956 + retl
3957 + nop
3958 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3959 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960 +
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966 - sub %g1, %o0, %g7
3967 + subcc %g1, %o0, %g7
3968 +
3969 +#ifdef CONFIG_PAX_REFCOUNT
3970 + tvs %xcc, 6
3971 +#endif
3972 +
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980 + .globl atomic64_sub_unchecked
3981 + .type atomic64_sub_unchecked,#function
3982 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983 + BACKOFF_SETUP(%o2)
3984 +1: ldx [%o1], %g1
3985 + subcc %g1, %o0, %g7
3986 + casx [%o1], %g1, %g7
3987 + cmp %g1, %g7
3988 + bne,pn %xcc, 2f
3989 + nop
3990 + retl
3991 + nop
3992 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3993 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994 +
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000 - add %g1, %o0, %g7
4001 + addcc %g1, %o0, %g7
4002 +
4003 +#ifdef CONFIG_PAX_REFCOUNT
4004 + tvs %xcc, 6
4005 +#endif
4006 +
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014 + .globl atomic64_add_ret_unchecked
4015 + .type atomic64_add_ret_unchecked,#function
4016 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017 + BACKOFF_SETUP(%o2)
4018 +1: ldx [%o1], %g1
4019 + addcc %g1, %o0, %g7
4020 + casx [%o1], %g1, %g7
4021 + cmp %g1, %g7
4022 + bne,pn %xcc, 2f
4023 + add %g7, %o0, %g7
4024 + mov %g7, %o0
4025 + retl
4026 + nop
4027 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4028 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029 +
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035 - sub %g1, %o0, %g7
4036 + subcc %g1, %o0, %g7
4037 +
4038 +#ifdef CONFIG_PAX_REFCOUNT
4039 + tvs %xcc, 6
4040 +#endif
4041 +
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045 diff -urNp linux-3.0.4/arch/sparc/lib/ksyms.c linux-3.0.4/arch/sparc/lib/ksyms.c
4046 --- linux-3.0.4/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047 +++ linux-3.0.4/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052 +EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056 +EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059 +EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067 diff -urNp linux-3.0.4/arch/sparc/lib/Makefile linux-3.0.4/arch/sparc/lib/Makefile
4068 --- linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:44:40.000000000 -0400
4069 +++ linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070 @@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074 -ccflags-y := -Werror
4075 +#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079 diff -urNp linux-3.0.4/arch/sparc/Makefile linux-3.0.4/arch/sparc/Makefile
4080 --- linux-3.0.4/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081 +++ linux-3.0.4/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091 diff -urNp linux-3.0.4/arch/sparc/mm/fault_32.c linux-3.0.4/arch/sparc/mm/fault_32.c
4092 --- linux-3.0.4/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093 +++ linux-3.0.4/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094 @@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098 +#include <linux/slab.h>
4099 +#include <linux/pagemap.h>
4100 +#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108 +#ifdef CONFIG_PAX_PAGEEXEC
4109 +#ifdef CONFIG_PAX_DLRESOLVE
4110 +static void pax_emuplt_close(struct vm_area_struct *vma)
4111 +{
4112 + vma->vm_mm->call_dl_resolve = 0UL;
4113 +}
4114 +
4115 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116 +{
4117 + unsigned int *kaddr;
4118 +
4119 + vmf->page = alloc_page(GFP_HIGHUSER);
4120 + if (!vmf->page)
4121 + return VM_FAULT_OOM;
4122 +
4123 + kaddr = kmap(vmf->page);
4124 + memset(kaddr, 0, PAGE_SIZE);
4125 + kaddr[0] = 0x9DE3BFA8U; /* save */
4126 + flush_dcache_page(vmf->page);
4127 + kunmap(vmf->page);
4128 + return VM_FAULT_MAJOR;
4129 +}
4130 +
4131 +static const struct vm_operations_struct pax_vm_ops = {
4132 + .close = pax_emuplt_close,
4133 + .fault = pax_emuplt_fault
4134 +};
4135 +
4136 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137 +{
4138 + int ret;
4139 +
4140 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4141 + vma->vm_mm = current->mm;
4142 + vma->vm_start = addr;
4143 + vma->vm_end = addr + PAGE_SIZE;
4144 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146 + vma->vm_ops = &pax_vm_ops;
4147 +
4148 + ret = insert_vm_struct(current->mm, vma);
4149 + if (ret)
4150 + return ret;
4151 +
4152 + ++current->mm->total_vm;
4153 + return 0;
4154 +}
4155 +#endif
4156 +
4157 +/*
4158 + * PaX: decide what to do with offenders (regs->pc = fault address)
4159 + *
4160 + * returns 1 when task should be killed
4161 + * 2 when patched PLT trampoline was detected
4162 + * 3 when unpatched PLT trampoline was detected
4163 + */
4164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4165 +{
4166 +
4167 +#ifdef CONFIG_PAX_EMUPLT
4168 + int err;
4169 +
4170 + do { /* PaX: patched PLT emulation #1 */
4171 + unsigned int sethi1, sethi2, jmpl;
4172 +
4173 + err = get_user(sethi1, (unsigned int *)regs->pc);
4174 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176 +
4177 + if (err)
4178 + break;
4179 +
4180 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183 + {
4184 + unsigned int addr;
4185 +
4186 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187 + addr = regs->u_regs[UREG_G1];
4188 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189 + regs->pc = addr;
4190 + regs->npc = addr+4;
4191 + return 2;
4192 + }
4193 + } while (0);
4194 +
4195 + { /* PaX: patched PLT emulation #2 */
4196 + unsigned int ba;
4197 +
4198 + err = get_user(ba, (unsigned int *)regs->pc);
4199 +
4200 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201 + unsigned int addr;
4202 +
4203 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204 + regs->pc = addr;
4205 + regs->npc = addr+4;
4206 + return 2;
4207 + }
4208 + }
4209 +
4210 + do { /* PaX: patched PLT emulation #3 */
4211 + unsigned int sethi, jmpl, nop;
4212 +
4213 + err = get_user(sethi, (unsigned int *)regs->pc);
4214 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216 +
4217 + if (err)
4218 + break;
4219 +
4220 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222 + nop == 0x01000000U)
4223 + {
4224 + unsigned int addr;
4225 +
4226 + addr = (sethi & 0x003FFFFFU) << 10;
4227 + regs->u_regs[UREG_G1] = addr;
4228 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229 + regs->pc = addr;
4230 + regs->npc = addr+4;
4231 + return 2;
4232 + }
4233 + } while (0);
4234 +
4235 + do { /* PaX: unpatched PLT emulation step 1 */
4236 + unsigned int sethi, ba, nop;
4237 +
4238 + err = get_user(sethi, (unsigned int *)regs->pc);
4239 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241 +
4242 + if (err)
4243 + break;
4244 +
4245 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247 + nop == 0x01000000U)
4248 + {
4249 + unsigned int addr, save, call;
4250 +
4251 + if ((ba & 0xFFC00000U) == 0x30800000U)
4252 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253 + else
4254 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255 +
4256 + err = get_user(save, (unsigned int *)addr);
4257 + err |= get_user(call, (unsigned int *)(addr+4));
4258 + err |= get_user(nop, (unsigned int *)(addr+8));
4259 + if (err)
4260 + break;
4261 +
4262 +#ifdef CONFIG_PAX_DLRESOLVE
4263 + if (save == 0x9DE3BFA8U &&
4264 + (call & 0xC0000000U) == 0x40000000U &&
4265 + nop == 0x01000000U)
4266 + {
4267 + struct vm_area_struct *vma;
4268 + unsigned long call_dl_resolve;
4269 +
4270 + down_read(&current->mm->mmap_sem);
4271 + call_dl_resolve = current->mm->call_dl_resolve;
4272 + up_read(&current->mm->mmap_sem);
4273 + if (likely(call_dl_resolve))
4274 + goto emulate;
4275 +
4276 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277 +
4278 + down_write(&current->mm->mmap_sem);
4279 + if (current->mm->call_dl_resolve) {
4280 + call_dl_resolve = current->mm->call_dl_resolve;
4281 + up_write(&current->mm->mmap_sem);
4282 + if (vma)
4283 + kmem_cache_free(vm_area_cachep, vma);
4284 + goto emulate;
4285 + }
4286 +
4287 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289 + up_write(&current->mm->mmap_sem);
4290 + if (vma)
4291 + kmem_cache_free(vm_area_cachep, vma);
4292 + return 1;
4293 + }
4294 +
4295 + if (pax_insert_vma(vma, call_dl_resolve)) {
4296 + up_write(&current->mm->mmap_sem);
4297 + kmem_cache_free(vm_area_cachep, vma);
4298 + return 1;
4299 + }
4300 +
4301 + current->mm->call_dl_resolve = call_dl_resolve;
4302 + up_write(&current->mm->mmap_sem);
4303 +
4304 +emulate:
4305 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306 + regs->pc = call_dl_resolve;
4307 + regs->npc = addr+4;
4308 + return 3;
4309 + }
4310 +#endif
4311 +
4312 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313 + if ((save & 0xFFC00000U) == 0x05000000U &&
4314 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4315 + nop == 0x01000000U)
4316 + {
4317 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318 + regs->u_regs[UREG_G2] = addr + 4;
4319 + addr = (save & 0x003FFFFFU) << 10;
4320 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321 + regs->pc = addr;
4322 + regs->npc = addr+4;
4323 + return 3;
4324 + }
4325 + }
4326 + } while (0);
4327 +
4328 + do { /* PaX: unpatched PLT emulation step 2 */
4329 + unsigned int save, call, nop;
4330 +
4331 + err = get_user(save, (unsigned int *)(regs->pc-4));
4332 + err |= get_user(call, (unsigned int *)regs->pc);
4333 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334 + if (err)
4335 + break;
4336 +
4337 + if (save == 0x9DE3BFA8U &&
4338 + (call & 0xC0000000U) == 0x40000000U &&
4339 + nop == 0x01000000U)
4340 + {
4341 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342 +
4343 + regs->u_regs[UREG_RETPC] = regs->pc;
4344 + regs->pc = dl_resolve;
4345 + regs->npc = dl_resolve+4;
4346 + return 3;
4347 + }
4348 + } while (0);
4349 +#endif
4350 +
4351 + return 1;
4352 +}
4353 +
4354 +void pax_report_insns(void *pc, void *sp)
4355 +{
4356 + unsigned long i;
4357 +
4358 + printk(KERN_ERR "PAX: bytes at PC: ");
4359 + for (i = 0; i < 8; i++) {
4360 + unsigned int c;
4361 + if (get_user(c, (unsigned int *)pc+i))
4362 + printk(KERN_CONT "???????? ");
4363 + else
4364 + printk(KERN_CONT "%08x ", c);
4365 + }
4366 + printk("\n");
4367 +}
4368 +#endif
4369 +
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373 @@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377 +
4378 +#ifdef CONFIG_PAX_PAGEEXEC
4379 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380 + up_read(&mm->mmap_sem);
4381 + switch (pax_handle_fetch_fault(regs)) {
4382 +
4383 +#ifdef CONFIG_PAX_EMUPLT
4384 + case 2:
4385 + case 3:
4386 + return;
4387 +#endif
4388 +
4389 + }
4390 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391 + do_group_exit(SIGKILL);
4392 + }
4393 +#endif
4394 +
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398 diff -urNp linux-3.0.4/arch/sparc/mm/fault_64.c linux-3.0.4/arch/sparc/mm/fault_64.c
4399 --- linux-3.0.4/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400 +++ linux-3.0.4/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401 @@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405 +#include <linux/slab.h>
4406 +#include <linux/pagemap.h>
4407 +#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424 +#ifdef CONFIG_PAX_PAGEEXEC
4425 +#ifdef CONFIG_PAX_DLRESOLVE
4426 +static void pax_emuplt_close(struct vm_area_struct *vma)
4427 +{
4428 + vma->vm_mm->call_dl_resolve = 0UL;
4429 +}
4430 +
4431 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432 +{
4433 + unsigned int *kaddr;
4434 +
4435 + vmf->page = alloc_page(GFP_HIGHUSER);
4436 + if (!vmf->page)
4437 + return VM_FAULT_OOM;
4438 +
4439 + kaddr = kmap(vmf->page);
4440 + memset(kaddr, 0, PAGE_SIZE);
4441 + kaddr[0] = 0x9DE3BFA8U; /* save */
4442 + flush_dcache_page(vmf->page);
4443 + kunmap(vmf->page);
4444 + return VM_FAULT_MAJOR;
4445 +}
4446 +
4447 +static const struct vm_operations_struct pax_vm_ops = {
4448 + .close = pax_emuplt_close,
4449 + .fault = pax_emuplt_fault
4450 +};
4451 +
4452 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453 +{
4454 + int ret;
4455 +
4456 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4457 + vma->vm_mm = current->mm;
4458 + vma->vm_start = addr;
4459 + vma->vm_end = addr + PAGE_SIZE;
4460 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462 + vma->vm_ops = &pax_vm_ops;
4463 +
4464 + ret = insert_vm_struct(current->mm, vma);
4465 + if (ret)
4466 + return ret;
4467 +
4468 + ++current->mm->total_vm;
4469 + return 0;
4470 +}
4471 +#endif
4472 +
4473 +/*
4474 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4475 + *
4476 + * returns 1 when task should be killed
4477 + * 2 when patched PLT trampoline was detected
4478 + * 3 when unpatched PLT trampoline was detected
4479 + */
4480 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4481 +{
4482 +
4483 +#ifdef CONFIG_PAX_EMUPLT
4484 + int err;
4485 +
4486 + do { /* PaX: patched PLT emulation #1 */
4487 + unsigned int sethi1, sethi2, jmpl;
4488 +
4489 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4490 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492 +
4493 + if (err)
4494 + break;
4495 +
4496 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499 + {
4500 + unsigned long addr;
4501 +
4502 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503 + addr = regs->u_regs[UREG_G1];
4504 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505 +
4506 + if (test_thread_flag(TIF_32BIT))
4507 + addr &= 0xFFFFFFFFUL;
4508 +
4509 + regs->tpc = addr;
4510 + regs->tnpc = addr+4;
4511 + return 2;
4512 + }
4513 + } while (0);
4514 +
4515 + { /* PaX: patched PLT emulation #2 */
4516 + unsigned int ba;
4517 +
4518 + err = get_user(ba, (unsigned int *)regs->tpc);
4519 +
4520 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521 + unsigned long addr;
4522 +
4523 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524 +
4525 + if (test_thread_flag(TIF_32BIT))
4526 + addr &= 0xFFFFFFFFUL;
4527 +
4528 + regs->tpc = addr;
4529 + regs->tnpc = addr+4;
4530 + return 2;
4531 + }
4532 + }
4533 +
4534 + do { /* PaX: patched PLT emulation #3 */
4535 + unsigned int sethi, jmpl, nop;
4536 +
4537 + err = get_user(sethi, (unsigned int *)regs->tpc);
4538 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540 +
4541 + if (err)
4542 + break;
4543 +
4544 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546 + nop == 0x01000000U)
4547 + {
4548 + unsigned long addr;
4549 +
4550 + addr = (sethi & 0x003FFFFFU) << 10;
4551 + regs->u_regs[UREG_G1] = addr;
4552 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553 +
4554 + if (test_thread_flag(TIF_32BIT))
4555 + addr &= 0xFFFFFFFFUL;
4556 +
4557 + regs->tpc = addr;
4558 + regs->tnpc = addr+4;
4559 + return 2;
4560 + }
4561 + } while (0);
4562 +
4563 + do { /* PaX: patched PLT emulation #4 */
4564 + unsigned int sethi, mov1, call, mov2;
4565 +
4566 + err = get_user(sethi, (unsigned int *)regs->tpc);
4567 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570 +
4571 + if (err)
4572 + break;
4573 +
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + mov1 == 0x8210000FU &&
4576 + (call & 0xC0000000U) == 0x40000000U &&
4577 + mov2 == 0x9E100001U)
4578 + {
4579 + unsigned long addr;
4580 +
4581 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583 +
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4586 +
4587 + regs->tpc = addr;
4588 + regs->tnpc = addr+4;
4589 + return 2;
4590 + }
4591 + } while (0);
4592 +
4593 + do { /* PaX: patched PLT emulation #5 */
4594 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595 +
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604 +
4605 + if (err)
4606 + break;
4607 +
4608 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4612 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613 + sllx == 0x83287020U &&
4614 + jmpl == 0x81C04005U &&
4615 + nop == 0x01000000U)
4616 + {
4617 + unsigned long addr;
4618 +
4619 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620 + regs->u_regs[UREG_G1] <<= 32;
4621 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623 + regs->tpc = addr;
4624 + regs->tnpc = addr+4;
4625 + return 2;
4626 + }
4627 + } while (0);
4628 +
4629 + do { /* PaX: patched PLT emulation #6 */
4630 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631 +
4632 + err = get_user(sethi, (unsigned int *)regs->tpc);
4633 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639 +
4640 + if (err)
4641 + break;
4642 +
4643 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646 + sllx == 0x83287020U &&
4647 + (or & 0xFFFFE000U) == 0x8A116000U &&
4648 + jmpl == 0x81C04005U &&
4649 + nop == 0x01000000U)
4650 + {
4651 + unsigned long addr;
4652 +
4653 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654 + regs->u_regs[UREG_G1] <<= 32;
4655 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657 + regs->tpc = addr;
4658 + regs->tnpc = addr+4;
4659 + return 2;
4660 + }
4661 + } while (0);
4662 +
4663 + do { /* PaX: unpatched PLT emulation step 1 */
4664 + unsigned int sethi, ba, nop;
4665 +
4666 + err = get_user(sethi, (unsigned int *)regs->tpc);
4667 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675 + nop == 0x01000000U)
4676 + {
4677 + unsigned long addr;
4678 + unsigned int save, call;
4679 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680 +
4681 + if ((ba & 0xFFC00000U) == 0x30800000U)
4682 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683 + else
4684 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685 +
4686 + if (test_thread_flag(TIF_32BIT))
4687 + addr &= 0xFFFFFFFFUL;
4688 +
4689 + err = get_user(save, (unsigned int *)addr);
4690 + err |= get_user(call, (unsigned int *)(addr+4));
4691 + err |= get_user(nop, (unsigned int *)(addr+8));
4692 + if (err)
4693 + break;
4694 +
4695 +#ifdef CONFIG_PAX_DLRESOLVE
4696 + if (save == 0x9DE3BFA8U &&
4697 + (call & 0xC0000000U) == 0x40000000U &&
4698 + nop == 0x01000000U)
4699 + {
4700 + struct vm_area_struct *vma;
4701 + unsigned long call_dl_resolve;
4702 +
4703 + down_read(&current->mm->mmap_sem);
4704 + call_dl_resolve = current->mm->call_dl_resolve;
4705 + up_read(&current->mm->mmap_sem);
4706 + if (likely(call_dl_resolve))
4707 + goto emulate;
4708 +
4709 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710 +
4711 + down_write(&current->mm->mmap_sem);
4712 + if (current->mm->call_dl_resolve) {
4713 + call_dl_resolve = current->mm->call_dl_resolve;
4714 + up_write(&current->mm->mmap_sem);
4715 + if (vma)
4716 + kmem_cache_free(vm_area_cachep, vma);
4717 + goto emulate;
4718 + }
4719 +
4720 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722 + up_write(&current->mm->mmap_sem);
4723 + if (vma)
4724 + kmem_cache_free(vm_area_cachep, vma);
4725 + return 1;
4726 + }
4727 +
4728 + if (pax_insert_vma(vma, call_dl_resolve)) {
4729 + up_write(&current->mm->mmap_sem);
4730 + kmem_cache_free(vm_area_cachep, vma);
4731 + return 1;
4732 + }
4733 +
4734 + current->mm->call_dl_resolve = call_dl_resolve;
4735 + up_write(&current->mm->mmap_sem);
4736 +
4737 +emulate:
4738 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739 + regs->tpc = call_dl_resolve;
4740 + regs->tnpc = addr+4;
4741 + return 3;
4742 + }
4743 +#endif
4744 +
4745 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746 + if ((save & 0xFFC00000U) == 0x05000000U &&
4747 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4748 + nop == 0x01000000U)
4749 + {
4750 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751 + regs->u_regs[UREG_G2] = addr + 4;
4752 + addr = (save & 0x003FFFFFU) << 10;
4753 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754 +
4755 + if (test_thread_flag(TIF_32BIT))
4756 + addr &= 0xFFFFFFFFUL;
4757 +
4758 + regs->tpc = addr;
4759 + regs->tnpc = addr+4;
4760 + return 3;
4761 + }
4762 +
4763 + /* PaX: 64-bit PLT stub */
4764 + err = get_user(sethi1, (unsigned int *)addr);
4765 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4766 + err |= get_user(or1, (unsigned int *)(addr+8));
4767 + err |= get_user(or2, (unsigned int *)(addr+12));
4768 + err |= get_user(sllx, (unsigned int *)(addr+16));
4769 + err |= get_user(add, (unsigned int *)(addr+20));
4770 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4771 + err |= get_user(nop, (unsigned int *)(addr+28));
4772 + if (err)
4773 + break;
4774 +
4775 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4778 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779 + sllx == 0x89293020U &&
4780 + add == 0x8A010005U &&
4781 + jmpl == 0x89C14000U &&
4782 + nop == 0x01000000U)
4783 + {
4784 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786 + regs->u_regs[UREG_G4] <<= 32;
4787 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789 + regs->u_regs[UREG_G4] = addr + 24;
4790 + addr = regs->u_regs[UREG_G5];
4791 + regs->tpc = addr;
4792 + regs->tnpc = addr+4;
4793 + return 3;
4794 + }
4795 + }
4796 + } while (0);
4797 +
4798 +#ifdef CONFIG_PAX_DLRESOLVE
4799 + do { /* PaX: unpatched PLT emulation step 2 */
4800 + unsigned int save, call, nop;
4801 +
4802 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4803 + err |= get_user(call, (unsigned int *)regs->tpc);
4804 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805 + if (err)
4806 + break;
4807 +
4808 + if (save == 0x9DE3BFA8U &&
4809 + (call & 0xC0000000U) == 0x40000000U &&
4810 + nop == 0x01000000U)
4811 + {
4812 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813 +
4814 + if (test_thread_flag(TIF_32BIT))
4815 + dl_resolve &= 0xFFFFFFFFUL;
4816 +
4817 + regs->u_regs[UREG_RETPC] = regs->tpc;
4818 + regs->tpc = dl_resolve;
4819 + regs->tnpc = dl_resolve+4;
4820 + return 3;
4821 + }
4822 + } while (0);
4823 +#endif
4824 +
4825 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826 + unsigned int sethi, ba, nop;
4827 +
4828 + err = get_user(sethi, (unsigned int *)regs->tpc);
4829 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831 +
4832 + if (err)
4833 + break;
4834 +
4835 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836 + (ba & 0xFFF00000U) == 0x30600000U &&
4837 + nop == 0x01000000U)
4838 + {
4839 + unsigned long addr;
4840 +
4841 + addr = (sethi & 0x003FFFFFU) << 10;
4842 + regs->u_regs[UREG_G1] = addr;
4843 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844 +
4845 + if (test_thread_flag(TIF_32BIT))
4846 + addr &= 0xFFFFFFFFUL;
4847 +
4848 + regs->tpc = addr;
4849 + regs->tnpc = addr+4;
4850 + return 2;
4851 + }
4852 + } while (0);
4853 +
4854 +#endif
4855 +
4856 + return 1;
4857 +}
4858 +
4859 +void pax_report_insns(void *pc, void *sp)
4860 +{
4861 + unsigned long i;
4862 +
4863 + printk(KERN_ERR "PAX: bytes at PC: ");
4864 + for (i = 0; i < 8; i++) {
4865 + unsigned int c;
4866 + if (get_user(c, (unsigned int *)pc+i))
4867 + printk(KERN_CONT "???????? ");
4868 + else
4869 + printk(KERN_CONT "%08x ", c);
4870 + }
4871 + printk("\n");
4872 +}
4873 +#endif
4874 +
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882 +#ifdef CONFIG_PAX_PAGEEXEC
4883 + /* PaX: detect ITLB misses on non-exec pages */
4884 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886 + {
4887 + if (address != regs->tpc)
4888 + goto good_area;
4889 +
4890 + up_read(&mm->mmap_sem);
4891 + switch (pax_handle_fetch_fault(regs)) {
4892 +
4893 +#ifdef CONFIG_PAX_EMUPLT
4894 + case 2:
4895 + case 3:
4896 + return;
4897 +#endif
4898 +
4899 + }
4900 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901 + do_group_exit(SIGKILL);
4902 + }
4903 +#endif
4904 +
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908 diff -urNp linux-3.0.4/arch/sparc/mm/hugetlbpage.c linux-3.0.4/arch/sparc/mm/hugetlbpage.c
4909 --- linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910 +++ linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911 @@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915 - if (likely(!vma || addr + len <= vma->vm_start)) {
4916 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924 - if (!vma || addr <= vma->vm_start) {
4925 + if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4934 + addr = mm->mmap_base - len;
4935
4936 do {
4937 + addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944 - if (likely(!vma || addr+len <= vma->vm_start)) {
4945 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953 - addr = (vma->vm_start-len) & HPAGE_MASK;
4954 - } while (likely(len < vma->vm_start));
4955 + addr = skip_heap_stack_gap(vma, len);
4956 + } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964 - if (task_size - len >= addr &&
4965 - (!vma || addr + len <= vma->vm_start))
4966 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970 diff -urNp linux-3.0.4/arch/sparc/mm/init_32.c linux-3.0.4/arch/sparc/mm/init_32.c
4971 --- linux-3.0.4/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972 +++ linux-3.0.4/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973 @@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979 +
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983 @@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987 - protection_map[1] = PAGE_READONLY;
4988 - protection_map[2] = PAGE_COPY;
4989 - protection_map[3] = PAGE_COPY;
4990 + protection_map[1] = PAGE_READONLY_NOEXEC;
4991 + protection_map[2] = PAGE_COPY_NOEXEC;
4992 + protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998 - protection_map[9] = PAGE_READONLY;
4999 - protection_map[10] = PAGE_SHARED;
5000 - protection_map[11] = PAGE_SHARED;
5001 + protection_map[9] = PAGE_READONLY_NOEXEC;
5002 + protection_map[10] = PAGE_SHARED_NOEXEC;
5003 + protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007 diff -urNp linux-3.0.4/arch/sparc/mm/Makefile linux-3.0.4/arch/sparc/mm/Makefile
5008 --- linux-3.0.4/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009 +++ linux-3.0.4/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010 @@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014 -ccflags-y := -Werror
5015 +#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019 diff -urNp linux-3.0.4/arch/sparc/mm/srmmu.c linux-3.0.4/arch/sparc/mm/srmmu.c
5020 --- linux-3.0.4/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021 +++ linux-3.0.4/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026 +
5027 +#ifdef CONFIG_PAX_PAGEEXEC
5028 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031 +#endif
5032 +
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036 diff -urNp linux-3.0.4/arch/um/include/asm/kmap_types.h linux-3.0.4/arch/um/include/asm/kmap_types.h
5037 --- linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038 +++ linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039 @@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043 + KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047 diff -urNp linux-3.0.4/arch/um/include/asm/page.h linux-3.0.4/arch/um/include/asm/page.h
5048 --- linux-3.0.4/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049 +++ linux-3.0.4/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050 @@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054 +#define ktla_ktva(addr) (addr)
5055 +#define ktva_ktla(addr) (addr)
5056 +
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060 diff -urNp linux-3.0.4/arch/um/kernel/process.c linux-3.0.4/arch/um/kernel/process.c
5061 --- linux-3.0.4/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062 +++ linux-3.0.4/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067 -/*
5068 - * Only x86 and x86_64 have an arch_align_stack().
5069 - * All other arches have "#define arch_align_stack(x) (x)"
5070 - * in their asm/system.h
5071 - * As this is included in UML from asm-um/system-generic.h,
5072 - * we can use it to behave as the subarch does.
5073 - */
5074 -#ifndef arch_align_stack
5075 -unsigned long arch_align_stack(unsigned long sp)
5076 -{
5077 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078 - sp -= get_random_int() % 8192;
5079 - return sp & ~0xf;
5080 -}
5081 -#endif
5082 -
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086 diff -urNp linux-3.0.4/arch/um/sys-i386/syscalls.c linux-3.0.4/arch/um/sys-i386/syscalls.c
5087 --- linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088 +++ linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089 @@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094 +{
5095 + unsigned long pax_task_size = TASK_SIZE;
5096 +
5097 +#ifdef CONFIG_PAX_SEGMEXEC
5098 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099 + pax_task_size = SEGMEXEC_TASK_SIZE;
5100 +#endif
5101 +
5102 + if (len > pax_task_size || addr > pax_task_size - len)
5103 + return -EINVAL;
5104 +
5105 + return 0;
5106 +}
5107 +
5108 /*
5109 * The prototype on i386 is:
5110 *
5111 diff -urNp linux-3.0.4/arch/x86/boot/bitops.h linux-3.0.4/arch/x86/boot/bitops.h
5112 --- linux-3.0.4/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113 +++ linux-3.0.4/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132 diff -urNp linux-3.0.4/arch/x86/boot/boot.h linux-3.0.4/arch/x86/boot/boot.h
5133 --- linux-3.0.4/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134 +++ linux-3.0.4/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139 - asm("movw %%ds,%0" : "=rm" (seg));
5140 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148 - asm("repe; cmpsb; setnz %0"
5149 + asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153 diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_32.S linux-3.0.4/arch/x86/boot/compressed/head_32.S
5154 --- linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155 +++ linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160 - movl $LOAD_PHYSICAL_ADDR, %ebx
5161 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165 @@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169 - subl $LOAD_PHYSICAL_ADDR, %ebx
5170 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174 @@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178 - testl %ecx, %ecx
5179 - jz 2f
5180 + jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184 diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_64.S linux-3.0.4/arch/x86/boot/compressed/head_64.S
5185 --- linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186 +++ linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191 - movl $LOAD_PHYSICAL_ADDR, %ebx
5192 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200 - movq $LOAD_PHYSICAL_ADDR, %rbp
5201 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205 diff -urNp linux-3.0.4/arch/x86/boot/compressed/Makefile linux-3.0.4/arch/x86/boot/compressed/Makefile
5206 --- linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207 +++ linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212 +ifdef CONSTIFY_PLUGIN
5213 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214 +endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218 diff -urNp linux-3.0.4/arch/x86/boot/compressed/misc.c linux-3.0.4/arch/x86/boot/compressed/misc.c
5219 --- linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220 +++ linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239 diff -urNp linux-3.0.4/arch/x86/boot/compressed/relocs.c linux-3.0.4/arch/x86/boot/compressed/relocs.c
5240 --- linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241 +++ linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242 @@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246 +#include "../../../../include/generated/autoconf.h"
5247 +
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250 +static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258 +static void read_phdrs(FILE *fp)
5259 +{
5260 + unsigned int i;
5261 +
5262 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263 + if (!phdr) {
5264 + die("Unable to allocate %d program headers\n",
5265 + ehdr.e_phnum);
5266 + }
5267 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268 + die("Seek to %d failed: %s\n",
5269 + ehdr.e_phoff, strerror(errno));
5270 + }
5271 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272 + die("Cannot read ELF program headers: %s\n",
5273 + strerror(errno));
5274 + }
5275 + for(i = 0; i < ehdr.e_phnum; i++) {
5276 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284 + }
5285 +
5286 +}
5287 +
5288 static void read_shdrs(FILE *fp)
5289 {
5290 - int i;
5291 + unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299 - int i;
5300 + unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308 - int i,j;
5309 + unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317 - int i,j;
5318 + unsigned int i,j;
5319 + uint32_t base;
5320 +
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328 + base = 0;
5329 + for (j = 0; j < ehdr.e_phnum; j++) {
5330 + if (phdr[j].p_type != PT_LOAD )
5331 + continue;
5332 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333 + continue;
5334 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335 + break;
5336 + }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5340 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348 - int i;
5349 + unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356 - int j;
5357 + unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365 - int i, printed = 0;
5366 + unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373 - int j;
5374 + unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382 - int i;
5383 + unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389 - int j;
5390 + unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400 + continue;
5401 +
5402 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405 + continue;
5406 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407 + continue;
5408 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409 + continue;
5410 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411 + continue;
5412 +#endif
5413 +
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421 - int i;
5422 + unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430 + read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434 diff -urNp linux-3.0.4/arch/x86/boot/cpucheck.c linux-3.0.4/arch/x86/boot/cpucheck.c
5435 --- linux-3.0.4/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436 +++ linux-3.0.4/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437 @@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441 - asm("movl %%cr0,%0" : "=r" (cr0));
5442 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450 - asm("pushfl ; "
5451 + asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455 @@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459 - asm("cpuid"
5460 + asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464 @@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468 - asm("cpuid"
5469 + asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473 @@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477 - asm("cpuid"
5478 + asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482 @@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486 - asm("cpuid"
5487 + asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521 - asm("cpuid"
5522 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524 + asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532 diff -urNp linux-3.0.4/arch/x86/boot/header.S linux-3.0.4/arch/x86/boot/header.S
5533 --- linux-3.0.4/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534 +++ linux-3.0.4/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544 diff -urNp linux-3.0.4/arch/x86/boot/Makefile linux-3.0.4/arch/x86/boot/Makefile
5545 --- linux-3.0.4/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546 +++ linux-3.0.4/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551 +ifdef CONSTIFY_PLUGIN
5552 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553 +endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557 diff -urNp linux-3.0.4/arch/x86/boot/memory.c linux-3.0.4/arch/x86/boot/memory.c
5558 --- linux-3.0.4/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559 +++ linux-3.0.4/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560 @@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564 - int count = 0;
5565 + unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569 diff -urNp linux-3.0.4/arch/x86/boot/video.c linux-3.0.4/arch/x86/boot/video.c
5570 --- linux-3.0.4/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571 +++ linux-3.0.4/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576 - int i, len = 0;
5577 + unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581 diff -urNp linux-3.0.4/arch/x86/boot/video-vesa.c linux-3.0.4/arch/x86/boot/video-vesa.c
5582 --- linux-3.0.4/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583 +++ linux-3.0.4/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588 + boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592 diff -urNp linux-3.0.4/arch/x86/ia32/ia32_aout.c linux-3.0.4/arch/x86/ia32/ia32_aout.c
5593 --- linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5594 +++ linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5595 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5596 unsigned long dump_start, dump_size;
5597 struct user32 dump;
5598
5599 + memset(&dump, 0, sizeof(dump));
5600 +
5601 fs = get_fs();
5602 set_fs(KERNEL_DS);
5603 has_dumped = 1;
5604 diff -urNp linux-3.0.4/arch/x86/ia32/ia32entry.S linux-3.0.4/arch/x86/ia32/ia32entry.S
5605 --- linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5606 +++ linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5607 @@ -13,6 +13,7 @@
5608 #include <asm/thread_info.h>
5609 #include <asm/segment.h>
5610 #include <asm/irqflags.h>
5611 +#include <asm/pgtable.h>
5612 #include <linux/linkage.h>
5613
5614 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5615 @@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5616 ENDPROC(native_irq_enable_sysexit)
5617 #endif
5618
5619 + .macro pax_enter_kernel_user
5620 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5621 + call pax_enter_kernel_user
5622 +#endif
5623 + .endm
5624 +
5625 + .macro pax_exit_kernel_user
5626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5627 + call pax_exit_kernel_user
5628 +#endif
5629 +#ifdef CONFIG_PAX_RANDKSTACK
5630 + pushq %rax
5631 + call pax_randomize_kstack
5632 + popq %rax
5633 +#endif
5634 + .endm
5635 +
5636 + .macro pax_erase_kstack
5637 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5638 + call pax_erase_kstack
5639 +#endif
5640 + .endm
5641 +
5642 /*
5643 * 32bit SYSENTER instruction entry.
5644 *
5645 @@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5646 CFI_REGISTER rsp,rbp
5647 SWAPGS_UNSAFE_STACK
5648 movq PER_CPU_VAR(kernel_stack), %rsp
5649 - addq $(KERNEL_STACK_OFFSET),%rsp
5650 + pax_enter_kernel_user
5651 /*
5652 * No need to follow this irqs on/off section: the syscall
5653 * disabled irqs, here we enable it straight after entry:
5654 @@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5655 CFI_REL_OFFSET rsp,0
5656 pushfq_cfi
5657 /*CFI_REL_OFFSET rflags,0*/
5658 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5659 + GET_THREAD_INFO(%r10)
5660 + movl TI_sysenter_return(%r10), %r10d
5661 CFI_REGISTER rip,r10
5662 pushq_cfi $__USER32_CS
5663 /*CFI_REL_OFFSET cs,0*/
5664 @@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5665 SAVE_ARGS 0,0,1
5666 /* no need to do an access_ok check here because rbp has been
5667 32bit zero extended */
5668 +
5669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5670 + mov $PAX_USER_SHADOW_BASE,%r10
5671 + add %r10,%rbp
5672 +#endif
5673 +
5674 1: movl (%rbp),%ebp
5675 .section __ex_table,"a"
5676 .quad 1b,ia32_badarg
5677 @@ -168,6 +199,8 @@ sysenter_dispatch:
5678 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5679 jnz sysexit_audit
5680 sysexit_from_sys_call:
5681 + pax_exit_kernel_user
5682 + pax_erase_kstack
5683 andl $~TS_COMPAT,TI_status(%r10)
5684 /* clear IF, that popfq doesn't enable interrupts early */
5685 andl $~0x200,EFLAGS-R11(%rsp)
5686 @@ -194,6 +227,9 @@ sysexit_from_sys_call:
5687 movl %eax,%esi /* 2nd arg: syscall number */
5688 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5689 call audit_syscall_entry
5690 +
5691 + pax_erase_kstack
5692 +
5693 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5694 cmpq $(IA32_NR_syscalls-1),%rax
5695 ja ia32_badsys
5696 @@ -246,6 +282,9 @@ sysenter_tracesys:
5697 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5698 movq %rsp,%rdi /* &pt_regs -> arg1 */
5699 call syscall_trace_enter
5700 +
5701 + pax_erase_kstack
5702 +
5703 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5704 RESTORE_REST
5705 cmpq $(IA32_NR_syscalls-1),%rax
5706 @@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5707 ENTRY(ia32_cstar_target)
5708 CFI_STARTPROC32 simple
5709 CFI_SIGNAL_FRAME
5710 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5711 + CFI_DEF_CFA rsp,0
5712 CFI_REGISTER rip,rcx
5713 /*CFI_REGISTER rflags,r11*/
5714 SWAPGS_UNSAFE_STACK
5715 movl %esp,%r8d
5716 CFI_REGISTER rsp,r8
5717 movq PER_CPU_VAR(kernel_stack),%rsp
5718 +
5719 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5720 + pax_enter_kernel_user
5721 +#endif
5722 +
5723 /*
5724 * No need to follow this irqs on/off section: the syscall
5725 * disabled irqs and here we enable it straight after entry:
5726 */
5727 ENABLE_INTERRUPTS(CLBR_NONE)
5728 - SAVE_ARGS 8,1,1
5729 + SAVE_ARGS 8*6,1,1
5730 movl %eax,%eax /* zero extension */
5731 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5732 movq %rcx,RIP-ARGOFFSET(%rsp)
5733 @@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5734 /* no need to do an access_ok check here because r8 has been
5735 32bit zero extended */
5736 /* hardware stack frame is complete now */
5737 +
5738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5739 + mov $PAX_USER_SHADOW_BASE,%r10
5740 + add %r10,%r8
5741 +#endif
5742 +
5743 1: movl (%r8),%r9d
5744 .section __ex_table,"a"
5745 .quad 1b,ia32_badarg
5746 @@ -327,6 +377,8 @@ cstar_dispatch:
5747 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5748 jnz sysretl_audit
5749 sysretl_from_sys_call:
5750 + pax_exit_kernel_user
5751 + pax_erase_kstack
5752 andl $~TS_COMPAT,TI_status(%r10)
5753 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5754 movl RIP-ARGOFFSET(%rsp),%ecx
5755 @@ -364,6 +416,9 @@ cstar_tracesys:
5756 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5757 movq %rsp,%rdi /* &pt_regs -> arg1 */
5758 call syscall_trace_enter
5759 +
5760 + pax_erase_kstack
5761 +
5762 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5763 RESTORE_REST
5764 xchgl %ebp,%r9d
5765 @@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5766 CFI_REL_OFFSET rip,RIP-RIP
5767 PARAVIRT_ADJUST_EXCEPTION_FRAME
5768 SWAPGS
5769 + pax_enter_kernel_user
5770 /*
5771 * No need to follow this irqs on/off section: the syscall
5772 * disabled irqs and here we enable it straight after entry:
5773 @@ -441,6 +497,9 @@ ia32_tracesys:
5774 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5775 movq %rsp,%rdi /* &pt_regs -> arg1 */
5776 call syscall_trace_enter
5777 +
5778 + pax_erase_kstack
5779 +
5780 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5781 RESTORE_REST
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783 diff -urNp linux-3.0.4/arch/x86/ia32/ia32_signal.c linux-3.0.4/arch/x86/ia32/ia32_signal.c
5784 --- linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5785 +++ linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5786 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5787 sp -= frame_size;
5788 /* Align the stack pointer according to the i386 ABI,
5789 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5790 - sp = ((sp + 4) & -16ul) - 4;
5791 + sp = ((sp - 12) & -16ul) - 4;
5792 return (void __user *) sp;
5793 }
5794
5795 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5796 * These are actually not used anymore, but left because some
5797 * gdb versions depend on them as a marker.
5798 */
5799 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5800 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5801 } put_user_catch(err);
5802
5803 if (err)
5804 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5805 0xb8,
5806 __NR_ia32_rt_sigreturn,
5807 0x80cd,
5808 - 0,
5809 + 0
5810 };
5811
5812 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5813 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5814
5815 if (ka->sa.sa_flags & SA_RESTORER)
5816 restorer = ka->sa.sa_restorer;
5817 + else if (current->mm->context.vdso)
5818 + /* Return stub is in 32bit vsyscall page */
5819 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5820 else
5821 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5822 - rt_sigreturn);
5823 + restorer = &frame->retcode;
5824 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5825
5826 /*
5827 * Not actually used anymore, but left because some gdb
5828 * versions need it.
5829 */
5830 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835 diff -urNp linux-3.0.4/arch/x86/include/asm/alternative.h linux-3.0.4/arch/x86/include/asm/alternative.h
5836 --- linux-3.0.4/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5837 +++ linux-3.0.4/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5838 @@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5839 ".section .discard,\"aw\",@progbits\n" \
5840 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5841 ".previous\n" \
5842 - ".section .altinstr_replacement, \"ax\"\n" \
5843 + ".section .altinstr_replacement, \"a\"\n" \
5844 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5845 ".previous"
5846
5847 diff -urNp linux-3.0.4/arch/x86/include/asm/apic.h linux-3.0.4/arch/x86/include/asm/apic.h
5848 --- linux-3.0.4/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5849 +++ linux-3.0.4/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5850 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5851
5852 #ifdef CONFIG_X86_LOCAL_APIC
5853
5854 -extern unsigned int apic_verbosity;
5855 +extern int apic_verbosity;
5856 extern int local_apic_timer_c2_ok;
5857
5858 extern int disable_apic;
5859 diff -urNp linux-3.0.4/arch/x86/include/asm/apm.h linux-3.0.4/arch/x86/include/asm/apm.h
5860 --- linux-3.0.4/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5861 +++ linux-3.0.4/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5862 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5863 __asm__ __volatile__(APM_DO_ZERO_SEGS
5864 "pushl %%edi\n\t"
5865 "pushl %%ebp\n\t"
5866 - "lcall *%%cs:apm_bios_entry\n\t"
5867 + "lcall *%%ss:apm_bios_entry\n\t"
5868 "setc %%al\n\t"
5869 "popl %%ebp\n\t"
5870 "popl %%edi\n\t"
5871 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5872 __asm__ __volatile__(APM_DO_ZERO_SEGS
5873 "pushl %%edi\n\t"
5874 "pushl %%ebp\n\t"
5875 - "lcall *%%cs:apm_bios_entry\n\t"
5876 + "lcall *%%ss:apm_bios_entry\n\t"
5877 "setc %%bl\n\t"
5878 "popl %%ebp\n\t"
5879 "popl %%edi\n\t"
5880 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_32.h linux-3.0.4/arch/x86/include/asm/atomic64_32.h
5881 --- linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5882 +++ linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5883 @@ -12,6 +12,14 @@ typedef struct {
5884 u64 __aligned(8) counter;
5885 } atomic64_t;
5886
5887 +#ifdef CONFIG_PAX_REFCOUNT
5888 +typedef struct {
5889 + u64 __aligned(8) counter;
5890 +} atomic64_unchecked_t;
5891 +#else
5892 +typedef atomic64_t atomic64_unchecked_t;
5893 +#endif
5894 +
5895 #define ATOMIC64_INIT(val) { (val) }
5896
5897 #ifdef CONFIG_X86_CMPXCHG64
5898 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5899 }
5900
5901 /**
5902 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5903 + * @p: pointer to type atomic64_unchecked_t
5904 + * @o: expected value
5905 + * @n: new value
5906 + *
5907 + * Atomically sets @v to @n if it was equal to @o and returns
5908 + * the old value.
5909 + */
5910 +
5911 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5912 +{
5913 + return cmpxchg64(&v->counter, o, n);
5914 +}
5915 +
5916 +/**
5917 * atomic64_xchg - xchg atomic64 variable
5918 * @v: pointer to type atomic64_t
5919 * @n: value to assign
5920 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5921 }
5922
5923 /**
5924 + * atomic64_set_unchecked - set atomic64 variable
5925 + * @v: pointer to type atomic64_unchecked_t
5926 + * @n: value to assign
5927 + *
5928 + * Atomically sets the value of @v to @n.
5929 + */
5930 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5931 +{
5932 + unsigned high = (unsigned)(i >> 32);
5933 + unsigned low = (unsigned)i;
5934 + asm volatile(ATOMIC64_ALTERNATIVE(set)
5935 + : "+b" (low), "+c" (high)
5936 + : "S" (v)
5937 + : "eax", "edx", "memory"
5938 + );
5939 +}
5940 +
5941 +/**
5942 * atomic64_read - read atomic64 variable
5943 * @v: pointer to type atomic64_t
5944 *
5945 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5946 }
5947
5948 /**
5949 + * atomic64_read_unchecked - read atomic64 variable
5950 + * @v: pointer to type atomic64_unchecked_t
5951 + *
5952 + * Atomically reads the value of @v and returns it.
5953 + */
5954 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5955 +{
5956 + long long r;
5957 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5958 + : "=A" (r), "+c" (v)
5959 + : : "memory"
5960 + );
5961 + return r;
5962 + }
5963 +
5964 +/**
5965 * atomic64_add_return - add and return
5966 * @i: integer value to add
5967 * @v: pointer to type atomic64_t
5968 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5969 return i;
5970 }
5971
5972 +/**
5973 + * atomic64_add_return_unchecked - add and return
5974 + * @i: integer value to add
5975 + * @v: pointer to type atomic64_unchecked_t
5976 + *
5977 + * Atomically adds @i to @v and returns @i + *@v
5978 + */
5979 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5980 +{
5981 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
5982 + : "+A" (i), "+c" (v)
5983 + : : "memory"
5984 + );
5985 + return i;
5986 +}
5987 +
5988 /*
5989 * Other variants with different arithmetic operators:
5990 */
5991 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
5992 return a;
5993 }
5994
5995 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5996 +{
5997 + long long a;
5998 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
5999 + : "=A" (a)
6000 + : "S" (v)
6001 + : "memory", "ecx"
6002 + );
6003 + return a;
6004 +}
6005 +
6006 static inline long long atomic64_dec_return(atomic64_t *v)
6007 {
6008 long long a;
6009 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6010 }
6011
6012 /**
6013 + * atomic64_add_unchecked - add integer to atomic64 variable
6014 + * @i: integer value to add
6015 + * @v: pointer to type atomic64_unchecked_t
6016 + *
6017 + * Atomically adds @i to @v.
6018 + */
6019 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6020 +{
6021 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6022 + : "+A" (i), "+c" (v)
6023 + : : "memory"
6024 + );
6025 + return i;
6026 +}
6027 +
6028 +/**
6029 * atomic64_sub - subtract the atomic64 variable
6030 * @i: integer value to subtract
6031 * @v: pointer to type atomic64_t
6032 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_64.h linux-3.0.4/arch/x86/include/asm/atomic64_64.h
6033 --- linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6034 +++ linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6035 @@ -18,7 +18,19 @@
6036 */
6037 static inline long atomic64_read(const atomic64_t *v)
6038 {
6039 - return (*(volatile long *)&(v)->counter);
6040 + return (*(volatile const long *)&(v)->counter);
6041 +}
6042 +
6043 +/**
6044 + * atomic64_read_unchecked - read atomic64 variable
6045 + * @v: pointer of type atomic64_unchecked_t
6046 + *
6047 + * Atomically reads the value of @v.
6048 + * Doesn't imply a read memory barrier.
6049 + */
6050 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6051 +{
6052 + return (*(volatile const long *)&(v)->counter);
6053 }
6054
6055 /**
6056 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6057 }
6058
6059 /**
6060 + * atomic64_set_unchecked - set atomic64 variable
6061 + * @v: pointer to type atomic64_unchecked_t
6062 + * @i: required value
6063 + *
6064 + * Atomically sets the value of @v to @i.
6065 + */
6066 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6067 +{
6068 + v->counter = i;
6069 +}
6070 +
6071 +/**
6072 * atomic64_add - add integer to atomic64 variable
6073 * @i: integer value to add
6074 * @v: pointer to type atomic64_t
6075 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6076 */
6077 static inline void atomic64_add(long i, atomic64_t *v)
6078 {
6079 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6080 +
6081 +#ifdef CONFIG_PAX_REFCOUNT
6082 + "jno 0f\n"
6083 + LOCK_PREFIX "subq %1,%0\n"
6084 + "int $4\n0:\n"
6085 + _ASM_EXTABLE(0b, 0b)
6086 +#endif
6087 +
6088 + : "=m" (v->counter)
6089 + : "er" (i), "m" (v->counter));
6090 +}
6091 +
6092 +/**
6093 + * atomic64_add_unchecked - add integer to atomic64 variable
6094 + * @i: integer value to add
6095 + * @v: pointer to type atomic64_unchecked_t
6096 + *
6097 + * Atomically adds @i to @v.
6098 + */
6099 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6100 +{
6101 asm volatile(LOCK_PREFIX "addq %1,%0"
6102 : "=m" (v->counter)
6103 : "er" (i), "m" (v->counter));
6104 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6105 */
6106 static inline void atomic64_sub(long i, atomic64_t *v)
6107 {
6108 - asm volatile(LOCK_PREFIX "subq %1,%0"
6109 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6110 +
6111 +#ifdef CONFIG_PAX_REFCOUNT
6112 + "jno 0f\n"
6113 + LOCK_PREFIX "addq %1,%0\n"
6114 + "int $4\n0:\n"
6115 + _ASM_EXTABLE(0b, 0b)
6116 +#endif
6117 +
6118 + : "=m" (v->counter)
6119 + : "er" (i), "m" (v->counter));
6120 +}
6121 +
6122 +/**
6123 + * atomic64_sub_unchecked - subtract the atomic64 variable
6124 + * @i: integer value to subtract
6125 + * @v: pointer to type atomic64_unchecked_t
6126 + *
6127 + * Atomically subtracts @i from @v.
6128 + */
6129 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6130 +{
6131 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6132 : "=m" (v->counter)
6133 : "er" (i), "m" (v->counter));
6134 }
6135 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6136 {
6137 unsigned char c;
6138
6139 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6140 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6141 +
6142 +#ifdef CONFIG_PAX_REFCOUNT
6143 + "jno 0f\n"
6144 + LOCK_PREFIX "addq %2,%0\n"
6145 + "int $4\n0:\n"
6146 + _ASM_EXTABLE(0b, 0b)
6147 +#endif
6148 +
6149 + "sete %1\n"
6150 : "=m" (v->counter), "=qm" (c)
6151 : "er" (i), "m" (v->counter) : "memory");
6152 return c;
6153 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6154 */
6155 static inline void atomic64_inc(atomic64_t *v)
6156 {
6157 + asm volatile(LOCK_PREFIX "incq %0\n"
6158 +
6159 +#ifdef CONFIG_PAX_REFCOUNT
6160 + "jno 0f\n"
6161 + LOCK_PREFIX "decq %0\n"
6162 + "int $4\n0:\n"
6163 + _ASM_EXTABLE(0b, 0b)
6164 +#endif
6165 +
6166 + : "=m" (v->counter)
6167 + : "m" (v->counter));
6168 +}
6169 +
6170 +/**
6171 + * atomic64_inc_unchecked - increment atomic64 variable
6172 + * @v: pointer to type atomic64_unchecked_t
6173 + *
6174 + * Atomically increments @v by 1.
6175 + */
6176 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6177 +{
6178 asm volatile(LOCK_PREFIX "incq %0"
6179 : "=m" (v->counter)
6180 : "m" (v->counter));
6181 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6182 */
6183 static inline void atomic64_dec(atomic64_t *v)
6184 {
6185 - asm volatile(LOCK_PREFIX "decq %0"
6186 + asm volatile(LOCK_PREFIX "decq %0\n"
6187 +
6188 +#ifdef CONFIG_PAX_REFCOUNT
6189 + "jno 0f\n"
6190 + LOCK_PREFIX "incq %0\n"
6191 + "int $4\n0:\n"
6192 + _ASM_EXTABLE(0b, 0b)
6193 +#endif
6194 +
6195 + : "=m" (v->counter)
6196 + : "m" (v->counter));
6197 +}
6198 +
6199 +/**
6200 + * atomic64_dec_unchecked - decrement atomic64 variable
6201 + * @v: pointer to type atomic64_t
6202 + *
6203 + * Atomically decrements @v by 1.
6204 + */
6205 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6206 +{
6207 + asm volatile(LOCK_PREFIX "decq %0\n"
6208 : "=m" (v->counter)
6209 : "m" (v->counter));
6210 }
6211 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6212 {
6213 unsigned char c;
6214
6215 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6216 + asm volatile(LOCK_PREFIX "decq %0\n"
6217 +
6218 +#ifdef CONFIG_PAX_REFCOUNT
6219 + "jno 0f\n"
6220 + LOCK_PREFIX "incq %0\n"
6221 + "int $4\n0:\n"
6222 + _ASM_EXTABLE(0b, 0b)
6223 +#endif
6224 +
6225 + "sete %1\n"
6226 : "=m" (v->counter), "=qm" (c)
6227 : "m" (v->counter) : "memory");
6228 return c != 0;
6229 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6230 {
6231 unsigned char c;
6232
6233 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6234 + asm volatile(LOCK_PREFIX "incq %0\n"
6235 +
6236 +#ifdef CONFIG_PAX_REFCOUNT
6237 + "jno 0f\n"
6238 + LOCK_PREFIX "decq %0\n"
6239 + "int $4\n0:\n"
6240 + _ASM_EXTABLE(0b, 0b)
6241 +#endif
6242 +
6243 + "sete %1\n"
6244 : "=m" (v->counter), "=qm" (c)
6245 : "m" (v->counter) : "memory");
6246 return c != 0;
6247 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6248 {
6249 unsigned char c;
6250
6251 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6252 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6253 +
6254 +#ifdef CONFIG_PAX_REFCOUNT
6255 + "jno 0f\n"
6256 + LOCK_PREFIX "subq %2,%0\n"
6257 + "int $4\n0:\n"
6258 + _ASM_EXTABLE(0b, 0b)
6259 +#endif
6260 +
6261 + "sets %1\n"
6262 : "=m" (v->counter), "=qm" (c)
6263 : "er" (i), "m" (v->counter) : "memory");
6264 return c;
6265 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6266 static inline long atomic64_add_return(long i, atomic64_t *v)
6267 {
6268 long __i = i;
6269 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6270 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6271 +
6272 +#ifdef CONFIG_PAX_REFCOUNT
6273 + "jno 0f\n"
6274 + "movq %0, %1\n"
6275 + "int $4\n0:\n"
6276 + _ASM_EXTABLE(0b, 0b)
6277 +#endif
6278 +
6279 + : "+r" (i), "+m" (v->counter)
6280 + : : "memory");
6281 + return i + __i;
6282 +}
6283 +
6284 +/**
6285 + * atomic64_add_return_unchecked - add and return
6286 + * @i: integer value to add
6287 + * @v: pointer to type atomic64_unchecked_t
6288 + *
6289 + * Atomically adds @i to @v and returns @i + @v
6290 + */
6291 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6292 +{
6293 + long __i = i;
6294 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6295 : "+r" (i), "+m" (v->counter)
6296 : : "memory");
6297 return i + __i;
6298 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6299 }
6300
6301 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6302 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6303 +{
6304 + return atomic64_add_return_unchecked(1, v);
6305 +}
6306 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6307
6308 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6309 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6310 return cmpxchg(&v->counter, old, new);
6311 }
6312
6313 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6314 +{
6315 + return cmpxchg(&v->counter, old, new);
6316 +}
6317 +
6318 static inline long atomic64_xchg(atomic64_t *v, long new)
6319 {
6320 return xchg(&v->counter, new);
6321 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6322 */
6323 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6324 {
6325 - long c, old;
6326 + long c, old, new;
6327 c = atomic64_read(v);
6328 for (;;) {
6329 - if (unlikely(c == (u)))
6330 + if (unlikely(c == u))
6331 break;
6332 - old = atomic64_cmpxchg((v), c, c + (a));
6333 +
6334 + asm volatile("add %2,%0\n"
6335 +
6336 +#ifdef CONFIG_PAX_REFCOUNT
6337 + "jno 0f\n"
6338 + "sub %2,%0\n"
6339 + "int $4\n0:\n"
6340 + _ASM_EXTABLE(0b, 0b)
6341 +#endif
6342 +
6343 + : "=r" (new)
6344 + : "0" (c), "ir" (a));
6345 +
6346 + old = atomic64_cmpxchg(v, c, new);
6347 if (likely(old == c))
6348 break;
6349 c = old;
6350 }
6351 - return c != (u);
6352 + return c != u;
6353 }
6354
6355 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6356 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic.h linux-3.0.4/arch/x86/include/asm/atomic.h
6357 --- linux-3.0.4/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6358 +++ linux-3.0.4/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6359 @@ -22,7 +22,18 @@
6360 */
6361 static inline int atomic_read(const atomic_t *v)
6362 {
6363 - return (*(volatile int *)&(v)->counter);
6364 + return (*(volatile const int *)&(v)->counter);
6365 +}
6366 +
6367 +/**
6368 + * atomic_read_unchecked - read atomic variable
6369 + * @v: pointer of type atomic_unchecked_t
6370 + *
6371 + * Atomically reads the value of @v.
6372 + */
6373 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6374 +{
6375 + return (*(volatile const int *)&(v)->counter);
6376 }
6377
6378 /**
6379 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6380 }
6381
6382 /**
6383 + * atomic_set_unchecked - set atomic variable
6384 + * @v: pointer of type atomic_unchecked_t
6385 + * @i: required value
6386 + *
6387 + * Atomically sets the value of @v to @i.
6388 + */
6389 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6390 +{
6391 + v->counter = i;
6392 +}
6393 +
6394 +/**
6395 * atomic_add - add integer to atomic variable
6396 * @i: integer value to add
6397 * @v: pointer of type atomic_t
6398 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6399 */
6400 static inline void atomic_add(int i, atomic_t *v)
6401 {
6402 - asm volatile(LOCK_PREFIX "addl %1,%0"
6403 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6404 +
6405 +#ifdef CONFIG_PAX_REFCOUNT
6406 + "jno 0f\n"
6407 + LOCK_PREFIX "subl %1,%0\n"
6408 + "int $4\n0:\n"
6409 + _ASM_EXTABLE(0b, 0b)
6410 +#endif
6411 +
6412 + : "+m" (v->counter)
6413 + : "ir" (i));
6414 +}
6415 +
6416 +/**
6417 + * atomic_add_unchecked - add integer to atomic variable
6418 + * @i: integer value to add
6419 + * @v: pointer of type atomic_unchecked_t
6420 + *
6421 + * Atomically adds @i to @v.
6422 + */
6423 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6424 +{
6425 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6426 : "+m" (v->counter)
6427 : "ir" (i));
6428 }
6429 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6430 */
6431 static inline void atomic_sub(int i, atomic_t *v)
6432 {
6433 - asm volatile(LOCK_PREFIX "subl %1,%0"
6434 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6435 +
6436 +#ifdef CONFIG_PAX_REFCOUNT
6437 + "jno 0f\n"
6438 + LOCK_PREFIX "addl %1,%0\n"
6439 + "int $4\n0:\n"
6440 + _ASM_EXTABLE(0b, 0b)
6441 +#endif
6442 +
6443 + : "+m" (v->counter)
6444 + : "ir" (i));
6445 +}
6446 +
6447 +/**
6448 + * atomic_sub_unchecked - subtract integer from atomic variable
6449 + * @i: integer value to subtract
6450 + * @v: pointer of type atomic_unchecked_t
6451 + *
6452 + * Atomically subtracts @i from @v.
6453 + */
6454 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6455 +{
6456 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6457 : "+m" (v->counter)
6458 : "ir" (i));
6459 }
6460 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6461 {
6462 unsigned char c;
6463
6464 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6465 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6466 +
6467 +#ifdef CONFIG_PAX_REFCOUNT
6468 + "jno 0f\n"
6469 + LOCK_PREFIX "addl %2,%0\n"
6470 + "int $4\n0:\n"
6471 + _ASM_EXTABLE(0b, 0b)
6472 +#endif
6473 +
6474 + "sete %1\n"
6475 : "+m" (v->counter), "=qm" (c)
6476 : "ir" (i) : "memory");
6477 return c;
6478 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6479 */
6480 static inline void atomic_inc(atomic_t *v)
6481 {
6482 - asm volatile(LOCK_PREFIX "incl %0"
6483 + asm volatile(LOCK_PREFIX "incl %0\n"
6484 +
6485 +#ifdef CONFIG_PAX_REFCOUNT
6486 + "jno 0f\n"
6487 + LOCK_PREFIX "decl %0\n"
6488 + "int $4\n0:\n"
6489 + _ASM_EXTABLE(0b, 0b)
6490 +#endif
6491 +
6492 + : "+m" (v->counter));
6493 +}
6494 +
6495 +/**
6496 + * atomic_inc_unchecked - increment atomic variable
6497 + * @v: pointer of type atomic_unchecked_t
6498 + *
6499 + * Atomically increments @v by 1.
6500 + */
6501 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6502 +{
6503 + asm volatile(LOCK_PREFIX "incl %0\n"
6504 : "+m" (v->counter));
6505 }
6506
6507 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6508 */
6509 static inline void atomic_dec(atomic_t *v)
6510 {
6511 - asm volatile(LOCK_PREFIX "decl %0"
6512 + asm volatile(LOCK_PREFIX "decl %0\n"
6513 +
6514 +#ifdef CONFIG_PAX_REFCOUNT
6515 + "jno 0f\n"
6516 + LOCK_PREFIX "incl %0\n"
6517 + "int $4\n0:\n"
6518 + _ASM_EXTABLE(0b, 0b)
6519 +#endif
6520 +
6521 + : "+m" (v->counter));
6522 +}
6523 +
6524 +/**
6525 + * atomic_dec_unchecked - decrement atomic variable
6526 + * @v: pointer of type atomic_unchecked_t
6527 + *
6528 + * Atomically decrements @v by 1.
6529 + */
6530 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6531 +{
6532 + asm volatile(LOCK_PREFIX "decl %0\n"
6533 : "+m" (v->counter));
6534 }
6535
6536 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6537 {
6538 unsigned char c;
6539
6540 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6541 + asm volatile(LOCK_PREFIX "decl %0\n"
6542 +
6543 +#ifdef CONFIG_PAX_REFCOUNT
6544 + "jno 0f\n"
6545 + LOCK_PREFIX "incl %0\n"
6546 + "int $4\n0:\n"
6547 + _ASM_EXTABLE(0b, 0b)
6548 +#endif
6549 +
6550 + "sete %1\n"
6551 : "+m" (v->counter), "=qm" (c)
6552 : : "memory");
6553 return c != 0;
6554 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6555 {
6556 unsigned char c;
6557
6558 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6559 + asm volatile(LOCK_PREFIX "incl %0\n"
6560 +
6561 +#ifdef CONFIG_PAX_REFCOUNT
6562 + "jno 0f\n"
6563 + LOCK_PREFIX "decl %0\n"
6564 + "int $4\n0:\n"
6565 + _ASM_EXTABLE(0b, 0b)
6566 +#endif
6567 +
6568 + "sete %1\n"
6569 + : "+m" (v->counter), "=qm" (c)
6570 + : : "memory");
6571 + return c != 0;
6572 +}
6573 +
6574 +/**
6575 + * atomic_inc_and_test_unchecked - increment and test
6576 + * @v: pointer of type atomic_unchecked_t
6577 + *
6578 + * Atomically increments @v by 1
6579 + * and returns true if the result is zero, or false for all
6580 + * other cases.
6581 + */
6582 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6583 +{
6584 + unsigned char c;
6585 +
6586 + asm volatile(LOCK_PREFIX "incl %0\n"
6587 + "sete %1\n"
6588 : "+m" (v->counter), "=qm" (c)
6589 : : "memory");
6590 return c != 0;
6591 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6592 {
6593 unsigned char c;
6594
6595 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6596 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6597 +
6598 +#ifdef CONFIG_PAX_REFCOUNT
6599 + "jno 0f\n"
6600 + LOCK_PREFIX "subl %2,%0\n"
6601 + "int $4\n0:\n"
6602 + _ASM_EXTABLE(0b, 0b)
6603 +#endif
6604 +
6605 + "sets %1\n"
6606 : "+m" (v->counter), "=qm" (c)
6607 : "ir" (i) : "memory");
6608 return c;
6609 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6610 #endif
6611 /* Modern 486+ processor */
6612 __i = i;
6613 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6614 +
6615 +#ifdef CONFIG_PAX_REFCOUNT
6616 + "jno 0f\n"
6617 + "movl %0, %1\n"
6618 + "int $4\n0:\n"
6619 + _ASM_EXTABLE(0b, 0b)
6620 +#endif
6621 +
6622 + : "+r" (i), "+m" (v->counter)
6623 + : : "memory");
6624 + return i + __i;
6625 +
6626 +#ifdef CONFIG_M386
6627 +no_xadd: /* Legacy 386 processor */
6628 + local_irq_save(flags);
6629 + __i = atomic_read(v);
6630 + atomic_set(v, i + __i);
6631 + local_irq_restore(flags);
6632 + return i + __i;
6633 +#endif
6634 +}
6635 +
6636 +/**
6637 + * atomic_add_return_unchecked - add integer and return
6638 + * @v: pointer of type atomic_unchecked_t
6639 + * @i: integer value to add
6640 + *
6641 + * Atomically adds @i to @v and returns @i + @v
6642 + */
6643 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6644 +{
6645 + int __i;
6646 +#ifdef CONFIG_M386
6647 + unsigned long flags;
6648 + if (unlikely(boot_cpu_data.x86 <= 3))
6649 + goto no_xadd;
6650 +#endif
6651 + /* Modern 486+ processor */
6652 + __i = i;
6653 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6654 : "+r" (i), "+m" (v->counter)
6655 : : "memory");
6656 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6657 }
6658
6659 #define atomic_inc_return(v) (atomic_add_return(1, v))
6660 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6661 +{
6662 + return atomic_add_return_unchecked(1, v);
6663 +}
6664 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6665
6666 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6667 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6668 return cmpxchg(&v->counter, old, new);
6669 }
6670
6671 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6672 +{
6673 + return cmpxchg(&v->counter, old, new);
6674 +}
6675 +
6676 static inline int atomic_xchg(atomic_t *v, int new)
6677 {
6678 return xchg(&v->counter, new);
6679 }
6680
6681 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6682 +{
6683 + return xchg(&v->counter, new);
6684 +}
6685 +
6686 /**
6687 * atomic_add_unless - add unless the number is already a given value
6688 * @v: pointer of type atomic_t
6689 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6690 */
6691 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6692 {
6693 - int c, old;
6694 + int c, old, new;
6695 c = atomic_read(v);
6696 for (;;) {
6697 - if (unlikely(c == (u)))
6698 + if (unlikely(c == u))
6699 break;
6700 - old = atomic_cmpxchg((v), c, c + (a));
6701 +
6702 + asm volatile("addl %2,%0\n"
6703 +
6704 +#ifdef CONFIG_PAX_REFCOUNT
6705 + "jno 0f\n"
6706 + "subl %2,%0\n"
6707 + "int $4\n0:\n"
6708 + _ASM_EXTABLE(0b, 0b)
6709 +#endif
6710 +
6711 + : "=r" (new)
6712 + : "0" (c), "ir" (a));
6713 +
6714 + old = atomic_cmpxchg(v, c, new);
6715 if (likely(old == c))
6716 break;
6717 c = old;
6718 }
6719 - return c != (u);
6720 + return c != u;
6721 }
6722
6723 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6724
6725 +/**
6726 + * atomic_inc_not_zero_hint - increment if not null
6727 + * @v: pointer of type atomic_t
6728 + * @hint: probable value of the atomic before the increment
6729 + *
6730 + * This version of atomic_inc_not_zero() gives a hint of probable
6731 + * value of the atomic. This helps processor to not read the memory
6732 + * before doing the atomic read/modify/write cycle, lowering
6733 + * number of bus transactions on some arches.
6734 + *
6735 + * Returns: 0 if increment was not done, 1 otherwise.
6736 + */
6737 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6738 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6739 +{
6740 + int val, c = hint, new;
6741 +
6742 + /* sanity test, should be removed by compiler if hint is a constant */
6743 + if (!hint)
6744 + return atomic_inc_not_zero(v);
6745 +
6746 + do {
6747 + asm volatile("incl %0\n"
6748 +
6749 +#ifdef CONFIG_PAX_REFCOUNT
6750 + "jno 0f\n"
6751 + "decl %0\n"
6752 + "int $4\n0:\n"
6753 + _ASM_EXTABLE(0b, 0b)
6754 +#endif
6755 +
6756 + : "=r" (new)
6757 + : "0" (c));
6758 +
6759 + val = atomic_cmpxchg(v, c, new);
6760 + if (val == c)
6761 + return 1;
6762 + c = val;
6763 + } while (c);
6764 +
6765 + return 0;
6766 +}
6767 +
6768 /*
6769 * atomic_dec_if_positive - decrement by 1 if old value positive
6770 * @v: pointer of type atomic_t
6771 diff -urNp linux-3.0.4/arch/x86/include/asm/bitops.h linux-3.0.4/arch/x86/include/asm/bitops.h
6772 --- linux-3.0.4/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6773 +++ linux-3.0.4/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6774 @@ -38,7 +38,7 @@
6775 * a mask operation on a byte.
6776 */
6777 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6778 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6779 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6780 #define CONST_MASK(nr) (1 << ((nr) & 7))
6781
6782 /**
6783 diff -urNp linux-3.0.4/arch/x86/include/asm/boot.h linux-3.0.4/arch/x86/include/asm/boot.h
6784 --- linux-3.0.4/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6785 +++ linux-3.0.4/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6786 @@ -11,10 +11,15 @@
6787 #include <asm/pgtable_types.h>
6788
6789 /* Physical address where kernel should be loaded. */
6790 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6791 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6792 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6793 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6794
6795 +#ifndef __ASSEMBLY__
6796 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6797 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6798 +#endif
6799 +
6800 /* Minimum kernel alignment, as a power of two */
6801 #ifdef CONFIG_X86_64
6802 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6803 diff -urNp linux-3.0.4/arch/x86/include/asm/cacheflush.h linux-3.0.4/arch/x86/include/asm/cacheflush.h
6804 --- linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6805 +++ linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6806 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6807 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6808
6809 if (pg_flags == _PGMT_DEFAULT)
6810 - return -1;
6811 + return ~0UL;
6812 else if (pg_flags == _PGMT_WC)
6813 return _PAGE_CACHE_WC;
6814 else if (pg_flags == _PGMT_UC_MINUS)
6815 diff -urNp linux-3.0.4/arch/x86/include/asm/cache.h linux-3.0.4/arch/x86/include/asm/cache.h
6816 --- linux-3.0.4/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6817 +++ linux-3.0.4/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6818 @@ -5,12 +5,13 @@
6819
6820 /* L1 cache line size */
6821 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6822 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6823 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6824
6825 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6826 +#define __read_only __attribute__((__section__(".data..read_only")))
6827
6828 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6829 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6830 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6831
6832 #ifdef CONFIG_X86_VSMP
6833 #ifdef CONFIG_SMP
6834 diff -urNp linux-3.0.4/arch/x86/include/asm/checksum_32.h linux-3.0.4/arch/x86/include/asm/checksum_32.h
6835 --- linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6836 +++ linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6837 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6838 int len, __wsum sum,
6839 int *src_err_ptr, int *dst_err_ptr);
6840
6841 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6842 + int len, __wsum sum,
6843 + int *src_err_ptr, int *dst_err_ptr);
6844 +
6845 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6846 + int len, __wsum sum,
6847 + int *src_err_ptr, int *dst_err_ptr);
6848 +
6849 /*
6850 * Note: when you get a NULL pointer exception here this means someone
6851 * passed in an incorrect kernel address to one of these functions.
6852 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6853 int *err_ptr)
6854 {
6855 might_sleep();
6856 - return csum_partial_copy_generic((__force void *)src, dst,
6857 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6858 len, sum, err_ptr, NULL);
6859 }
6860
6861 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6862 {
6863 might_sleep();
6864 if (access_ok(VERIFY_WRITE, dst, len))
6865 - return csum_partial_copy_generic(src, (__force void *)dst,
6866 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6867 len, sum, NULL, err_ptr);
6868
6869 if (len)
6870 diff -urNp linux-3.0.4/arch/x86/include/asm/cpufeature.h linux-3.0.4/arch/x86/include/asm/cpufeature.h
6871 --- linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6872 +++ linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6873 @@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6874 ".section .discard,\"aw\",@progbits\n"
6875 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6876 ".previous\n"
6877 - ".section .altinstr_replacement,\"ax\"\n"
6878 + ".section .altinstr_replacement,\"a\"\n"
6879 "3: movb $1,%0\n"
6880 "4:\n"
6881 ".previous\n"
6882 diff -urNp linux-3.0.4/arch/x86/include/asm/desc_defs.h linux-3.0.4/arch/x86/include/asm/desc_defs.h
6883 --- linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6884 +++ linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6885 @@ -31,6 +31,12 @@ struct desc_struct {
6886 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6887 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6888 };
6889 + struct {
6890 + u16 offset_low;
6891 + u16 seg;
6892 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6893 + unsigned offset_high: 16;
6894 + } gate;
6895 };
6896 } __attribute__((packed));
6897
6898 diff -urNp linux-3.0.4/arch/x86/include/asm/desc.h linux-3.0.4/arch/x86/include/asm/desc.h
6899 --- linux-3.0.4/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6900 +++ linux-3.0.4/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6901 @@ -4,6 +4,7 @@
6902 #include <asm/desc_defs.h>
6903 #include <asm/ldt.h>
6904 #include <asm/mmu.h>
6905 +#include <asm/pgtable.h>
6906
6907 #include <linux/smp.h>
6908
6909 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6910
6911 desc->type = (info->read_exec_only ^ 1) << 1;
6912 desc->type |= info->contents << 2;
6913 + desc->type |= info->seg_not_present ^ 1;
6914
6915 desc->s = 1;
6916 desc->dpl = 0x3;
6917 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6918 }
6919
6920 extern struct desc_ptr idt_descr;
6921 -extern gate_desc idt_table[];
6922 -
6923 -struct gdt_page {
6924 - struct desc_struct gdt[GDT_ENTRIES];
6925 -} __attribute__((aligned(PAGE_SIZE)));
6926 -
6927 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6928 +extern gate_desc idt_table[256];
6929
6930 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6931 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6932 {
6933 - return per_cpu(gdt_page, cpu).gdt;
6934 + return cpu_gdt_table[cpu];
6935 }
6936
6937 #ifdef CONFIG_X86_64
6938 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
6939 unsigned long base, unsigned dpl, unsigned flags,
6940 unsigned short seg)
6941 {
6942 - gate->a = (seg << 16) | (base & 0xffff);
6943 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6944 + gate->gate.offset_low = base;
6945 + gate->gate.seg = seg;
6946 + gate->gate.reserved = 0;
6947 + gate->gate.type = type;
6948 + gate->gate.s = 0;
6949 + gate->gate.dpl = dpl;
6950 + gate->gate.p = 1;
6951 + gate->gate.offset_high = base >> 16;
6952 }
6953
6954 #endif
6955 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
6956
6957 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
6958 {
6959 + pax_open_kernel();
6960 memcpy(&idt[entry], gate, sizeof(*gate));
6961 + pax_close_kernel();
6962 }
6963
6964 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
6965 {
6966 + pax_open_kernel();
6967 memcpy(&ldt[entry], desc, 8);
6968 + pax_close_kernel();
6969 }
6970
6971 static inline void
6972 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
6973 default: size = sizeof(*gdt); break;
6974 }
6975
6976 + pax_open_kernel();
6977 memcpy(&gdt[entry], desc, size);
6978 + pax_close_kernel();
6979 }
6980
6981 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6982 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const
6983
6984 static inline void native_load_tr_desc(void)
6985 {
6986 + pax_open_kernel();
6987 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6988 + pax_close_kernel();
6989 }
6990
6991 static inline void native_load_gdt(const struct desc_ptr *dtr)
6992 @@ -244,8 +255,10 @@ static inline void native_load_tls(struc
6993 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6994 unsigned int i;
6995
6996 + pax_open_kernel();
6997 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6998 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
6999 + pax_close_kernel();
7000 }
7001
7002 #define _LDT_empty(info) \
7003 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7004 desc->limit = (limit >> 16) & 0xf;
7005 }
7006
7007 -static inline void _set_gate(int gate, unsigned type, void *addr,
7008 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7009 unsigned dpl, unsigned ist, unsigned seg)
7010 {
7011 gate_desc s;
7012 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7013 * Pentium F0 0F bugfix can have resulted in the mapped
7014 * IDT being write-protected.
7015 */
7016 -static inline void set_intr_gate(unsigned int n, void *addr)
7017 +static inline void set_intr_gate(unsigned int n, const void *addr)
7018 {
7019 BUG_ON((unsigned)n > 0xFF);
7020 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7021 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7022 /*
7023 * This routine sets up an interrupt gate at directory privilege level 3.
7024 */
7025 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7026 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7027 {
7028 BUG_ON((unsigned)n > 0xFF);
7029 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7030 }
7031
7032 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7033 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7034 {
7035 BUG_ON((unsigned)n > 0xFF);
7036 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7037 }
7038
7039 -static inline void set_trap_gate(unsigned int n, void *addr)
7040 +static inline void set_trap_gate(unsigned int n, const void *addr)
7041 {
7042 BUG_ON((unsigned)n > 0xFF);
7043 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7044 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7045 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7046 {
7047 BUG_ON((unsigned)n > 0xFF);
7048 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7049 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7050 }
7051
7052 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7053 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7054 {
7055 BUG_ON((unsigned)n > 0xFF);
7056 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7057 }
7058
7059 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7060 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7061 {
7062 BUG_ON((unsigned)n > 0xFF);
7063 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7064 }
7065
7066 +#ifdef CONFIG_X86_32
7067 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7068 +{
7069 + struct desc_struct d;
7070 +
7071 + if (likely(limit))
7072 + limit = (limit - 1UL) >> PAGE_SHIFT;
7073 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7074 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7075 +}
7076 +#endif
7077 +
7078 #endif /* _ASM_X86_DESC_H */
7079 diff -urNp linux-3.0.4/arch/x86/include/asm/e820.h linux-3.0.4/arch/x86/include/asm/e820.h
7080 --- linux-3.0.4/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7081 +++ linux-3.0.4/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7082 @@ -69,7 +69,7 @@ struct e820map {
7083 #define ISA_START_ADDRESS 0xa0000
7084 #define ISA_END_ADDRESS 0x100000
7085
7086 -#define BIOS_BEGIN 0x000a0000
7087 +#define BIOS_BEGIN 0x000c0000
7088 #define BIOS_END 0x00100000
7089
7090 #define BIOS_ROM_BASE 0xffe00000
7091 diff -urNp linux-3.0.4/arch/x86/include/asm/elf.h linux-3.0.4/arch/x86/include/asm/elf.h
7092 --- linux-3.0.4/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7093 +++ linux-3.0.4/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7094 @@ -237,7 +237,25 @@ extern int force_personality32;
7095 the loader. We need to make sure that it is out of the way of the program
7096 that it will "exec", and that there is sufficient room for the brk. */
7097
7098 +#ifdef CONFIG_PAX_SEGMEXEC
7099 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7100 +#else
7101 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7102 +#endif
7103 +
7104 +#ifdef CONFIG_PAX_ASLR
7105 +#ifdef CONFIG_X86_32
7106 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7107 +
7108 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7109 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7110 +#else
7111 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7112 +
7113 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7114 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7115 +#endif
7116 +#endif
7117
7118 /* This yields a mask that user programs can use to figure out what
7119 instruction set this CPU supports. This could be done in user space,
7120 @@ -290,9 +308,7 @@ do { \
7121
7122 #define ARCH_DLINFO \
7123 do { \
7124 - if (vdso_enabled) \
7125 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7126 - (unsigned long)current->mm->context.vdso); \
7127 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7128 } while (0)
7129
7130 #define AT_SYSINFO 32
7131 @@ -303,7 +319,7 @@ do { \
7132
7133 #endif /* !CONFIG_X86_32 */
7134
7135 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7136 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7137
7138 #define VDSO_ENTRY \
7139 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7140 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7141 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7142 #define compat_arch_setup_additional_pages syscall32_setup_pages
7143
7144 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7145 -#define arch_randomize_brk arch_randomize_brk
7146 -
7147 #endif /* _ASM_X86_ELF_H */
7148 diff -urNp linux-3.0.4/arch/x86/include/asm/emergency-restart.h linux-3.0.4/arch/x86/include/asm/emergency-restart.h
7149 --- linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7150 +++ linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7151 @@ -15,6 +15,6 @@ enum reboot_type {
7152
7153 extern enum reboot_type reboot_type;
7154
7155 -extern void machine_emergency_restart(void);
7156 +extern void machine_emergency_restart(void) __noreturn;
7157
7158 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7159 diff -urNp linux-3.0.4/arch/x86/include/asm/futex.h linux-3.0.4/arch/x86/include/asm/futex.h
7160 --- linux-3.0.4/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7161 +++ linux-3.0.4/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7162 @@ -12,16 +12,18 @@
7163 #include <asm/system.h>
7164
7165 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7166 + typecheck(u32 *, uaddr); \
7167 asm volatile("1:\t" insn "\n" \
7168 "2:\t.section .fixup,\"ax\"\n" \
7169 "3:\tmov\t%3, %1\n" \
7170 "\tjmp\t2b\n" \
7171 "\t.previous\n" \
7172 _ASM_EXTABLE(1b, 3b) \
7173 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7174 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7175 : "i" (-EFAULT), "0" (oparg), "1" (0))
7176
7177 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7178 + typecheck(u32 *, uaddr); \
7179 asm volatile("1:\tmovl %2, %0\n" \
7180 "\tmovl\t%0, %3\n" \
7181 "\t" insn "\n" \
7182 @@ -34,7 +36,7 @@
7183 _ASM_EXTABLE(1b, 4b) \
7184 _ASM_EXTABLE(2b, 4b) \
7185 : "=&a" (oldval), "=&r" (ret), \
7186 - "+m" (*uaddr), "=&r" (tem) \
7187 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7188 : "r" (oparg), "i" (-EFAULT), "1" (0))
7189
7190 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7191 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7192
7193 switch (op) {
7194 case FUTEX_OP_SET:
7195 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7196 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7197 break;
7198 case FUTEX_OP_ADD:
7199 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7200 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7201 uaddr, oparg);
7202 break;
7203 case FUTEX_OP_OR:
7204 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7205 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7206 return -EFAULT;
7207
7208 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7209 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7210 "2:\t.section .fixup, \"ax\"\n"
7211 "3:\tmov %3, %0\n"
7212 "\tjmp 2b\n"
7213 "\t.previous\n"
7214 _ASM_EXTABLE(1b, 3b)
7215 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7216 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7217 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7218 : "memory"
7219 );
7220 diff -urNp linux-3.0.4/arch/x86/include/asm/hw_irq.h linux-3.0.4/arch/x86/include/asm/hw_irq.h
7221 --- linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7222 +++ linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7223 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7224 extern void enable_IO_APIC(void);
7225
7226 /* Statistics */
7227 -extern atomic_t irq_err_count;
7228 -extern atomic_t irq_mis_count;
7229 +extern atomic_unchecked_t irq_err_count;
7230 +extern atomic_unchecked_t irq_mis_count;
7231
7232 /* EISA */
7233 extern void eisa_set_level_irq(unsigned int irq);
7234 diff -urNp linux-3.0.4/arch/x86/include/asm/i387.h linux-3.0.4/arch/x86/include/asm/i387.h
7235 --- linux-3.0.4/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7236 +++ linux-3.0.4/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7237 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7238 {
7239 int err;
7240
7241 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7242 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7243 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7244 +#endif
7245 +
7246 /* See comment in fxsave() below. */
7247 #ifdef CONFIG_AS_FXSAVEQ
7248 asm volatile("1: fxrstorq %[fx]\n\t"
7249 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7250 {
7251 int err;
7252
7253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7254 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7255 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7256 +#endif
7257 +
7258 /*
7259 * Clear the bytes not touched by the fxsave and reserved
7260 * for the SW usage.
7261 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7262 #endif /* CONFIG_X86_64 */
7263
7264 /* We need a safe address that is cheap to find and that is already
7265 - in L1 during context switch. The best choices are unfortunately
7266 - different for UP and SMP */
7267 -#ifdef CONFIG_SMP
7268 -#define safe_address (__per_cpu_offset[0])
7269 -#else
7270 -#define safe_address (kstat_cpu(0).cpustat.user)
7271 -#endif
7272 + in L1 during context switch. */
7273 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7274
7275 /*
7276 * These must be called with preempt disabled
7277 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7278 struct thread_info *me = current_thread_info();
7279 preempt_disable();
7280 if (me->status & TS_USEDFPU)
7281 - __save_init_fpu(me->task);
7282 + __save_init_fpu(current);
7283 else
7284 clts();
7285 }
7286 diff -urNp linux-3.0.4/arch/x86/include/asm/io.h linux-3.0.4/arch/x86/include/asm/io.h
7287 --- linux-3.0.4/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7288 +++ linux-3.0.4/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7289 @@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7290
7291 #include <linux/vmalloc.h>
7292
7293 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7294 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7295 +{
7296 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7297 +}
7298 +
7299 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7300 +{
7301 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7302 +}
7303 +
7304 /*
7305 * Convert a virtual cached pointer to an uncached pointer
7306 */
7307 diff -urNp linux-3.0.4/arch/x86/include/asm/irqflags.h linux-3.0.4/arch/x86/include/asm/irqflags.h
7308 --- linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7309 +++ linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7310 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7311 sti; \
7312 sysexit
7313
7314 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7315 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7316 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7317 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7318 +
7319 #else
7320 #define INTERRUPT_RETURN iret
7321 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7322 diff -urNp linux-3.0.4/arch/x86/include/asm/kprobes.h linux-3.0.4/arch/x86/include/asm/kprobes.h
7323 --- linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7324 +++ linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7325 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7326 #define RELATIVEJUMP_SIZE 5
7327 #define RELATIVECALL_OPCODE 0xe8
7328 #define RELATIVE_ADDR_SIZE 4
7329 -#define MAX_STACK_SIZE 64
7330 -#define MIN_STACK_SIZE(ADDR) \
7331 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7332 - THREAD_SIZE - (unsigned long)(ADDR))) \
7333 - ? (MAX_STACK_SIZE) \
7334 - : (((unsigned long)current_thread_info()) + \
7335 - THREAD_SIZE - (unsigned long)(ADDR)))
7336 +#define MAX_STACK_SIZE 64UL
7337 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7338
7339 #define flush_insn_slot(p) do { } while (0)
7340
7341 diff -urNp linux-3.0.4/arch/x86/include/asm/kvm_host.h linux-3.0.4/arch/x86/include/asm/kvm_host.h
7342 --- linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7343 +++ linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7344 @@ -441,7 +441,7 @@ struct kvm_arch {
7345 unsigned int n_used_mmu_pages;
7346 unsigned int n_requested_mmu_pages;
7347 unsigned int n_max_mmu_pages;
7348 - atomic_t invlpg_counter;
7349 + atomic_unchecked_t invlpg_counter;
7350 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7351 /*
7352 * Hash table of struct kvm_mmu_page.
7353 @@ -619,7 +619,7 @@ struct kvm_x86_ops {
7354 enum x86_intercept_stage stage);
7355
7356 const struct trace_print_flags *exit_reasons_str;
7357 -};
7358 +} __do_const;
7359
7360 struct kvm_arch_async_pf {
7361 u32 token;
7362 diff -urNp linux-3.0.4/arch/x86/include/asm/local.h linux-3.0.4/arch/x86/include/asm/local.h
7363 --- linux-3.0.4/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7364 +++ linux-3.0.4/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7365 @@ -18,26 +18,58 @@ typedef struct {
7366
7367 static inline void local_inc(local_t *l)
7368 {
7369 - asm volatile(_ASM_INC "%0"
7370 + asm volatile(_ASM_INC "%0\n"
7371 +
7372 +#ifdef CONFIG_PAX_REFCOUNT
7373 + "jno 0f\n"
7374 + _ASM_DEC "%0\n"
7375 + "int $4\n0:\n"
7376 + _ASM_EXTABLE(0b, 0b)
7377 +#endif
7378 +
7379 : "+m" (l->a.counter));
7380 }
7381
7382 static inline void local_dec(local_t *l)
7383 {
7384 - asm volatile(_ASM_DEC "%0"
7385 + asm volatile(_ASM_DEC "%0\n"
7386 +
7387 +#ifdef CONFIG_PAX_REFCOUNT
7388 + "jno 0f\n"
7389 + _ASM_INC "%0\n"
7390 + "int $4\n0:\n"
7391 + _ASM_EXTABLE(0b, 0b)
7392 +#endif
7393 +
7394 : "+m" (l->a.counter));
7395 }
7396
7397 static inline void local_add(long i, local_t *l)
7398 {
7399 - asm volatile(_ASM_ADD "%1,%0"
7400 + asm volatile(_ASM_ADD "%1,%0\n"
7401 +
7402 +#ifdef CONFIG_PAX_REFCOUNT
7403 + "jno 0f\n"
7404 + _ASM_SUB "%1,%0\n"
7405 + "int $4\n0:\n"
7406 + _ASM_EXTABLE(0b, 0b)
7407 +#endif
7408 +
7409 : "+m" (l->a.counter)
7410 : "ir" (i));
7411 }
7412
7413 static inline void local_sub(long i, local_t *l)
7414 {
7415 - asm volatile(_ASM_SUB "%1,%0"
7416 + asm volatile(_ASM_SUB "%1,%0\n"
7417 +
7418 +#ifdef CONFIG_PAX_REFCOUNT
7419 + "jno 0f\n"
7420 + _ASM_ADD "%1,%0\n"
7421 + "int $4\n0:\n"
7422 + _ASM_EXTABLE(0b, 0b)
7423 +#endif
7424 +
7425 : "+m" (l->a.counter)
7426 : "ir" (i));
7427 }
7428 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7429 {
7430 unsigned char c;
7431
7432 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7433 + asm volatile(_ASM_SUB "%2,%0\n"
7434 +
7435 +#ifdef CONFIG_PAX_REFCOUNT
7436 + "jno 0f\n"
7437 + _ASM_ADD "%2,%0\n"
7438 + "int $4\n0:\n"
7439 + _ASM_EXTABLE(0b, 0b)
7440 +#endif
7441 +
7442 + "sete %1\n"
7443 : "+m" (l->a.counter), "=qm" (c)
7444 : "ir" (i) : "memory");
7445 return c;
7446 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7447 {
7448 unsigned char c;
7449
7450 - asm volatile(_ASM_DEC "%0; sete %1"
7451 + asm volatile(_ASM_DEC "%0\n"
7452 +
7453 +#ifdef CONFIG_PAX_REFCOUNT
7454 + "jno 0f\n"
7455 + _ASM_INC "%0\n"
7456 + "int $4\n0:\n"
7457 + _ASM_EXTABLE(0b, 0b)
7458 +#endif
7459 +
7460 + "sete %1\n"
7461 : "+m" (l->a.counter), "=qm" (c)
7462 : : "memory");
7463 return c != 0;
7464 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7465 {
7466 unsigned char c;
7467
7468 - asm volatile(_ASM_INC "%0; sete %1"
7469 + asm volatile(_ASM_INC "%0\n"
7470 +
7471 +#ifdef CONFIG_PAX_REFCOUNT
7472 + "jno 0f\n"
7473 + _ASM_DEC "%0\n"
7474 + "int $4\n0:\n"
7475 + _ASM_EXTABLE(0b, 0b)
7476 +#endif
7477 +
7478 + "sete %1\n"
7479 : "+m" (l->a.counter), "=qm" (c)
7480 : : "memory");
7481 return c != 0;
7482 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7483 {
7484 unsigned char c;
7485
7486 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7487 + asm volatile(_ASM_ADD "%2,%0\n"
7488 +
7489 +#ifdef CONFIG_PAX_REFCOUNT
7490 + "jno 0f\n"
7491 + _ASM_SUB "%2,%0\n"
7492 + "int $4\n0:\n"
7493 + _ASM_EXTABLE(0b, 0b)
7494 +#endif
7495 +
7496 + "sets %1\n"
7497 : "+m" (l->a.counter), "=qm" (c)
7498 : "ir" (i) : "memory");
7499 return c;
7500 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7501 #endif
7502 /* Modern 486+ processor */
7503 __i = i;
7504 - asm volatile(_ASM_XADD "%0, %1;"
7505 + asm volatile(_ASM_XADD "%0, %1\n"
7506 +
7507 +#ifdef CONFIG_PAX_REFCOUNT
7508 + "jno 0f\n"
7509 + _ASM_MOV "%0,%1\n"
7510 + "int $4\n0:\n"
7511 + _ASM_EXTABLE(0b, 0b)
7512 +#endif
7513 +
7514 : "+r" (i), "+m" (l->a.counter)
7515 : : "memory");
7516 return i + __i;
7517 diff -urNp linux-3.0.4/arch/x86/include/asm/mman.h linux-3.0.4/arch/x86/include/asm/mman.h
7518 --- linux-3.0.4/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7519 +++ linux-3.0.4/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7520 @@ -5,4 +5,14 @@
7521
7522 #include <asm-generic/mman.h>
7523
7524 +#ifdef __KERNEL__
7525 +#ifndef __ASSEMBLY__
7526 +#ifdef CONFIG_X86_32
7527 +#define arch_mmap_check i386_mmap_check
7528 +int i386_mmap_check(unsigned long addr, unsigned long len,
7529 + unsigned long flags);
7530 +#endif
7531 +#endif
7532 +#endif
7533 +
7534 #endif /* _ASM_X86_MMAN_H */
7535 diff -urNp linux-3.0.4/arch/x86/include/asm/mmu_context.h linux-3.0.4/arch/x86/include/asm/mmu_context.h
7536 --- linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7537 +++ linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7538 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7539
7540 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7541 {
7542 +
7543 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7544 + unsigned int i;
7545 + pgd_t *pgd;
7546 +
7547 + pax_open_kernel();
7548 + pgd = get_cpu_pgd(smp_processor_id());
7549 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7550 + set_pgd_batched(pgd+i, native_make_pgd(0));
7551 + pax_close_kernel();
7552 +#endif
7553 +
7554 #ifdef CONFIG_SMP
7555 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7556 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7557 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7558 struct task_struct *tsk)
7559 {
7560 unsigned cpu = smp_processor_id();
7561 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7562 + int tlbstate = TLBSTATE_OK;
7563 +#endif
7564
7565 if (likely(prev != next)) {
7566 #ifdef CONFIG_SMP
7567 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7568 + tlbstate = percpu_read(cpu_tlbstate.state);
7569 +#endif
7570 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7571 percpu_write(cpu_tlbstate.active_mm, next);
7572 #endif
7573 cpumask_set_cpu(cpu, mm_cpumask(next));
7574
7575 /* Re-load page tables */
7576 +#ifdef CONFIG_PAX_PER_CPU_PGD
7577 + pax_open_kernel();
7578 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7579 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7580 + pax_close_kernel();
7581 + load_cr3(get_cpu_pgd(cpu));
7582 +#else
7583 load_cr3(next->pgd);
7584 +#endif
7585
7586 /* stop flush ipis for the previous mm */
7587 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7588 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7589 */
7590 if (unlikely(prev->context.ldt != next->context.ldt))
7591 load_LDT_nolock(&next->context);
7592 - }
7593 +
7594 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7595 + if (!(__supported_pte_mask & _PAGE_NX)) {
7596 + smp_mb__before_clear_bit();
7597 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7598 + smp_mb__after_clear_bit();
7599 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7600 + }
7601 +#endif
7602 +
7603 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7604 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7605 + prev->context.user_cs_limit != next->context.user_cs_limit))
7606 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7607 #ifdef CONFIG_SMP
7608 + else if (unlikely(tlbstate != TLBSTATE_OK))
7609 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7610 +#endif
7611 +#endif
7612 +
7613 + }
7614 else {
7615 +
7616 +#ifdef CONFIG_PAX_PER_CPU_PGD
7617 + pax_open_kernel();
7618 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7619 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7620 + pax_close_kernel();
7621 + load_cr3(get_cpu_pgd(cpu));
7622 +#endif
7623 +
7624 +#ifdef CONFIG_SMP
7625 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7626 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7627
7628 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7629 * tlb flush IPI delivery. We must reload CR3
7630 * to make sure to use no freed page tables.
7631 */
7632 +
7633 +#ifndef CONFIG_PAX_PER_CPU_PGD
7634 load_cr3(next->pgd);
7635 +#endif
7636 +
7637 load_LDT_nolock(&next->context);
7638 +
7639 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7640 + if (!(__supported_pte_mask & _PAGE_NX))
7641 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7642 +#endif
7643 +
7644 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7645 +#ifdef CONFIG_PAX_PAGEEXEC
7646 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7647 +#endif
7648 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7649 +#endif
7650 +
7651 }
7652 - }
7653 #endif
7654 + }
7655 }
7656
7657 #define activate_mm(prev, next) \
7658 diff -urNp linux-3.0.4/arch/x86/include/asm/mmu.h linux-3.0.4/arch/x86/include/asm/mmu.h
7659 --- linux-3.0.4/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7660 +++ linux-3.0.4/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7661 @@ -9,7 +9,7 @@
7662 * we put the segment information here.
7663 */
7664 typedef struct {
7665 - void *ldt;
7666 + struct desc_struct *ldt;
7667 int size;
7668
7669 #ifdef CONFIG_X86_64
7670 @@ -18,7 +18,19 @@ typedef struct {
7671 #endif
7672
7673 struct mutex lock;
7674 - void *vdso;
7675 + unsigned long vdso;
7676 +
7677 +#ifdef CONFIG_X86_32
7678 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7679 + unsigned long user_cs_base;
7680 + unsigned long user_cs_limit;
7681 +
7682 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7683 + cpumask_t cpu_user_cs_mask;
7684 +#endif
7685 +
7686 +#endif
7687 +#endif
7688 } mm_context_t;
7689
7690 #ifdef CONFIG_SMP
7691 diff -urNp linux-3.0.4/arch/x86/include/asm/module.h linux-3.0.4/arch/x86/include/asm/module.h
7692 --- linux-3.0.4/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7693 +++ linux-3.0.4/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7694 @@ -5,6 +5,7 @@
7695
7696 #ifdef CONFIG_X86_64
7697 /* X86_64 does not define MODULE_PROC_FAMILY */
7698 +#define MODULE_PROC_FAMILY ""
7699 #elif defined CONFIG_M386
7700 #define MODULE_PROC_FAMILY "386 "
7701 #elif defined CONFIG_M486
7702 @@ -59,8 +60,30 @@
7703 #error unknown processor family
7704 #endif
7705
7706 -#ifdef CONFIG_X86_32
7707 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7708 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7709 +#define MODULE_PAX_UDEREF "UDEREF "
7710 +#else
7711 +#define MODULE_PAX_UDEREF ""
7712 +#endif
7713 +
7714 +#ifdef CONFIG_PAX_KERNEXEC
7715 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7716 +#else
7717 +#define MODULE_PAX_KERNEXEC ""
7718 #endif
7719
7720 +#ifdef CONFIG_PAX_REFCOUNT
7721 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7722 +#else
7723 +#define MODULE_PAX_REFCOUNT ""
7724 +#endif
7725 +
7726 +#ifdef CONFIG_GRKERNSEC
7727 +#define MODULE_GRSEC "GRSECURITY "
7728 +#else
7729 +#define MODULE_GRSEC ""
7730 +#endif
7731 +
7732 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7733 +
7734 #endif /* _ASM_X86_MODULE_H */
7735 diff -urNp linux-3.0.4/arch/x86/include/asm/page_64_types.h linux-3.0.4/arch/x86/include/asm/page_64_types.h
7736 --- linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7737 +++ linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7738 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7739
7740 /* duplicated to the one in bootmem.h */
7741 extern unsigned long max_pfn;
7742 -extern unsigned long phys_base;
7743 +extern const unsigned long phys_base;
7744
7745 extern unsigned long __phys_addr(unsigned long);
7746 #define __phys_reloc_hide(x) (x)
7747 diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt.h linux-3.0.4/arch/x86/include/asm/paravirt.h
7748 --- linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7749 +++ linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7750 @@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7751 val);
7752 }
7753
7754 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7755 +{
7756 + pgdval_t val = native_pgd_val(pgd);
7757 +
7758 + if (sizeof(pgdval_t) > sizeof(long))
7759 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7760 + val, (u64)val >> 32);
7761 + else
7762 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7763 + val);
7764 +}
7765 +
7766 static inline void pgd_clear(pgd_t *pgdp)
7767 {
7768 set_pgd(pgdp, __pgd(0));
7769 @@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7770 pv_mmu_ops.set_fixmap(idx, phys, flags);
7771 }
7772
7773 +#ifdef CONFIG_PAX_KERNEXEC
7774 +static inline unsigned long pax_open_kernel(void)
7775 +{
7776 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7777 +}
7778 +
7779 +static inline unsigned long pax_close_kernel(void)
7780 +{
7781 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7782 +}
7783 +#else
7784 +static inline unsigned long pax_open_kernel(void) { return 0; }
7785 +static inline unsigned long pax_close_kernel(void) { return 0; }
7786 +#endif
7787 +
7788 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7789
7790 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7791 @@ -955,7 +982,7 @@ extern void default_banner(void);
7792
7793 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7794 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7795 -#define PARA_INDIRECT(addr) *%cs:addr
7796 +#define PARA_INDIRECT(addr) *%ss:addr
7797 #endif
7798
7799 #define INTERRUPT_RETURN \
7800 @@ -1032,6 +1059,21 @@ extern void default_banner(void);
7801 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7802 CLBR_NONE, \
7803 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7804 +
7805 +#define GET_CR0_INTO_RDI \
7806 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7807 + mov %rax,%rdi
7808 +
7809 +#define SET_RDI_INTO_CR0 \
7810 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7811 +
7812 +#define GET_CR3_INTO_RDI \
7813 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7814 + mov %rax,%rdi
7815 +
7816 +#define SET_RDI_INTO_CR3 \
7817 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7818 +
7819 #endif /* CONFIG_X86_32 */
7820
7821 #endif /* __ASSEMBLY__ */
7822 diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt_types.h linux-3.0.4/arch/x86/include/asm/paravirt_types.h
7823 --- linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7824 +++ linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7825 @@ -78,19 +78,19 @@ struct pv_init_ops {
7826 */
7827 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7828 unsigned long addr, unsigned len);
7829 -};
7830 +} __no_const;
7831
7832
7833 struct pv_lazy_ops {
7834 /* Set deferred update mode, used for batching operations. */
7835 void (*enter)(void);
7836 void (*leave)(void);
7837 -};
7838 +} __no_const;
7839
7840 struct pv_time_ops {
7841 unsigned long long (*sched_clock)(void);
7842 unsigned long (*get_tsc_khz)(void);
7843 -};
7844 +} __no_const;
7845
7846 struct pv_cpu_ops {
7847 /* hooks for various privileged instructions */
7848 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7849
7850 void (*start_context_switch)(struct task_struct *prev);
7851 void (*end_context_switch)(struct task_struct *next);
7852 -};
7853 +} __no_const;
7854
7855 struct pv_irq_ops {
7856 /*
7857 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7858 unsigned long start_eip,
7859 unsigned long start_esp);
7860 #endif
7861 -};
7862 +} __no_const;
7863
7864 struct pv_mmu_ops {
7865 unsigned long (*read_cr2)(void);
7866 @@ -306,6 +306,7 @@ struct pv_mmu_ops {
7867 struct paravirt_callee_save make_pud;
7868
7869 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7870 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7871 #endif /* PAGETABLE_LEVELS == 4 */
7872 #endif /* PAGETABLE_LEVELS >= 3 */
7873
7874 @@ -317,6 +318,12 @@ struct pv_mmu_ops {
7875 an mfn. We can tell which is which from the index. */
7876 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7877 phys_addr_t phys, pgprot_t flags);
7878 +
7879 +#ifdef CONFIG_PAX_KERNEXEC
7880 + unsigned long (*pax_open_kernel)(void);
7881 + unsigned long (*pax_close_kernel)(void);
7882 +#endif
7883 +
7884 };
7885
7886 struct arch_spinlock;
7887 @@ -327,7 +334,7 @@ struct pv_lock_ops {
7888 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7889 int (*spin_trylock)(struct arch_spinlock *lock);
7890 void (*spin_unlock)(struct arch_spinlock *lock);
7891 -};
7892 +} __no_const;
7893
7894 /* This contains all the paravirt structures: we get a convenient
7895 * number for each function using the offset which we use to indicate
7896 diff -urNp linux-3.0.4/arch/x86/include/asm/pgalloc.h linux-3.0.4/arch/x86/include/asm/pgalloc.h
7897 --- linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7898 +++ linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7899 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7900 pmd_t *pmd, pte_t *pte)
7901 {
7902 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7903 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7904 +}
7905 +
7906 +static inline void pmd_populate_user(struct mm_struct *mm,
7907 + pmd_t *pmd, pte_t *pte)
7908 +{
7909 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7910 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7911 }
7912
7913 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-2level.h linux-3.0.4/arch/x86/include/asm/pgtable-2level.h
7914 --- linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7915 +++ linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7916 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7917
7918 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7919 {
7920 + pax_open_kernel();
7921 *pmdp = pmd;
7922 + pax_close_kernel();
7923 }
7924
7925 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7926 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32.h linux-3.0.4/arch/x86/include/asm/pgtable_32.h
7927 --- linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7928 +++ linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7929 @@ -25,9 +25,6 @@
7930 struct mm_struct;
7931 struct vm_area_struct;
7932
7933 -extern pgd_t swapper_pg_dir[1024];
7934 -extern pgd_t initial_page_table[1024];
7935 -
7936 static inline void pgtable_cache_init(void) { }
7937 static inline void check_pgt_cache(void) { }
7938 void paging_init(void);
7939 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7940 # include <asm/pgtable-2level.h>
7941 #endif
7942
7943 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7944 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7945 +#ifdef CONFIG_X86_PAE
7946 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7947 +#endif
7948 +
7949 #if defined(CONFIG_HIGHPTE)
7950 #define pte_offset_map(dir, address) \
7951 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7952 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7953 /* Clear a kernel PTE and flush it from the TLB */
7954 #define kpte_clear_flush(ptep, vaddr) \
7955 do { \
7956 + pax_open_kernel(); \
7957 pte_clear(&init_mm, (vaddr), (ptep)); \
7958 + pax_close_kernel(); \
7959 __flush_tlb_one((vaddr)); \
7960 } while (0)
7961
7962 @@ -74,6 +79,9 @@ do { \
7963
7964 #endif /* !__ASSEMBLY__ */
7965
7966 +#define HAVE_ARCH_UNMAPPED_AREA
7967 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7968 +
7969 /*
7970 * kern_addr_valid() is (1) for FLATMEM and (0) for
7971 * SPARSEMEM and DISCONTIGMEM
7972 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h
7973 --- linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
7974 +++ linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
7975 @@ -8,7 +8,7 @@
7976 */
7977 #ifdef CONFIG_X86_PAE
7978 # include <asm/pgtable-3level_types.h>
7979 -# define PMD_SIZE (1UL << PMD_SHIFT)
7980 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7981 # define PMD_MASK (~(PMD_SIZE - 1))
7982 #else
7983 # include <asm/pgtable-2level_types.h>
7984 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7985 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7986 #endif
7987
7988 +#ifdef CONFIG_PAX_KERNEXEC
7989 +#ifndef __ASSEMBLY__
7990 +extern unsigned char MODULES_EXEC_VADDR[];
7991 +extern unsigned char MODULES_EXEC_END[];
7992 +#endif
7993 +#include <asm/boot.h>
7994 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7995 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7996 +#else
7997 +#define ktla_ktva(addr) (addr)
7998 +#define ktva_ktla(addr) (addr)
7999 +#endif
8000 +
8001 #define MODULES_VADDR VMALLOC_START
8002 #define MODULES_END VMALLOC_END
8003 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8004 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-3level.h linux-3.0.4/arch/x86/include/asm/pgtable-3level.h
8005 --- linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8006 +++ linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8007 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8008
8009 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8010 {
8011 + pax_open_kernel();
8012 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8013 + pax_close_kernel();
8014 }
8015
8016 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8017 {
8018 + pax_open_kernel();
8019 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8020 + pax_close_kernel();
8021 }
8022
8023 /*
8024 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64.h linux-3.0.4/arch/x86/include/asm/pgtable_64.h
8025 --- linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8026 +++ linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8027 @@ -16,10 +16,13 @@
8028
8029 extern pud_t level3_kernel_pgt[512];
8030 extern pud_t level3_ident_pgt[512];
8031 +extern pud_t level3_vmalloc_pgt[512];
8032 +extern pud_t level3_vmemmap_pgt[512];
8033 +extern pud_t level2_vmemmap_pgt[512];
8034 extern pmd_t level2_kernel_pgt[512];
8035 extern pmd_t level2_fixmap_pgt[512];
8036 -extern pmd_t level2_ident_pgt[512];
8037 -extern pgd_t init_level4_pgt[];
8038 +extern pmd_t level2_ident_pgt[512*2];
8039 +extern pgd_t init_level4_pgt[512];
8040
8041 #define swapper_pg_dir init_level4_pgt
8042
8043 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8044
8045 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8046 {
8047 + pax_open_kernel();
8048 *pmdp = pmd;
8049 + pax_close_kernel();
8050 }
8051
8052 static inline void native_pmd_clear(pmd_t *pmd)
8053 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8054
8055 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8056 {
8057 + pax_open_kernel();
8058 + *pgdp = pgd;
8059 + pax_close_kernel();
8060 +}
8061 +
8062 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8063 +{
8064 *pgdp = pgd;
8065 }
8066
8067 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h
8068 --- linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8069 +++ linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8070 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8071 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8072 #define MODULES_END _AC(0xffffffffff000000, UL)
8073 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8074 +#define MODULES_EXEC_VADDR MODULES_VADDR
8075 +#define MODULES_EXEC_END MODULES_END
8076 +
8077 +#define ktla_ktva(addr) (addr)
8078 +#define ktva_ktla(addr) (addr)
8079
8080 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8081 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable.h linux-3.0.4/arch/x86/include/asm/pgtable.h
8082 --- linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8083 +++ linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8084 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8085
8086 #ifndef __PAGETABLE_PUD_FOLDED
8087 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8088 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8089 #define pgd_clear(pgd) native_pgd_clear(pgd)
8090 #endif
8091
8092 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8093
8094 #define arch_end_context_switch(prev) do {} while(0)
8095
8096 +#define pax_open_kernel() native_pax_open_kernel()
8097 +#define pax_close_kernel() native_pax_close_kernel()
8098 #endif /* CONFIG_PARAVIRT */
8099
8100 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8101 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8102 +
8103 +#ifdef CONFIG_PAX_KERNEXEC
8104 +static inline unsigned long native_pax_open_kernel(void)
8105 +{
8106 + unsigned long cr0;
8107 +
8108 + preempt_disable();
8109 + barrier();
8110 + cr0 = read_cr0() ^ X86_CR0_WP;
8111 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8112 + write_cr0(cr0);
8113 + return cr0 ^ X86_CR0_WP;
8114 +}
8115 +
8116 +static inline unsigned long native_pax_close_kernel(void)
8117 +{
8118 + unsigned long cr0;
8119 +
8120 + cr0 = read_cr0() ^ X86_CR0_WP;
8121 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8122 + write_cr0(cr0);
8123 + barrier();
8124 + preempt_enable_no_resched();
8125 + return cr0 ^ X86_CR0_WP;
8126 +}
8127 +#else
8128 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8129 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8130 +#endif
8131 +
8132 /*
8133 * The following only work if pte_present() is true.
8134 * Undefined behaviour if not..
8135 */
8136 +static inline int pte_user(pte_t pte)
8137 +{
8138 + return pte_val(pte) & _PAGE_USER;
8139 +}
8140 +
8141 static inline int pte_dirty(pte_t pte)
8142 {
8143 return pte_flags(pte) & _PAGE_DIRTY;
8144 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8145 return pte_clear_flags(pte, _PAGE_RW);
8146 }
8147
8148 +static inline pte_t pte_mkread(pte_t pte)
8149 +{
8150 + return __pte(pte_val(pte) | _PAGE_USER);
8151 +}
8152 +
8153 static inline pte_t pte_mkexec(pte_t pte)
8154 {
8155 - return pte_clear_flags(pte, _PAGE_NX);
8156 +#ifdef CONFIG_X86_PAE
8157 + if (__supported_pte_mask & _PAGE_NX)
8158 + return pte_clear_flags(pte, _PAGE_NX);
8159 + else
8160 +#endif
8161 + return pte_set_flags(pte, _PAGE_USER);
8162 +}
8163 +
8164 +static inline pte_t pte_exprotect(pte_t pte)
8165 +{
8166 +#ifdef CONFIG_X86_PAE
8167 + if (__supported_pte_mask & _PAGE_NX)
8168 + return pte_set_flags(pte, _PAGE_NX);
8169 + else
8170 +#endif
8171 + return pte_clear_flags(pte, _PAGE_USER);
8172 }
8173
8174 static inline pte_t pte_mkdirty(pte_t pte)
8175 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8176 #endif
8177
8178 #ifndef __ASSEMBLY__
8179 +
8180 +#ifdef CONFIG_PAX_PER_CPU_PGD
8181 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8182 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8183 +{
8184 + return cpu_pgd[cpu];
8185 +}
8186 +#endif
8187 +
8188 #include <linux/mm_types.h>
8189
8190 static inline int pte_none(pte_t pte)
8191 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8192
8193 static inline int pgd_bad(pgd_t pgd)
8194 {
8195 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8196 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8197 }
8198
8199 static inline int pgd_none(pgd_t pgd)
8200 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8201 * pgd_offset() returns a (pgd_t *)
8202 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8203 */
8204 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8205 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8206 +
8207 +#ifdef CONFIG_PAX_PER_CPU_PGD
8208 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8209 +#endif
8210 +
8211 /*
8212 * a shortcut which implies the use of the kernel's pgd, instead
8213 * of a process's
8214 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8215 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8216 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8217
8218 +#ifdef CONFIG_X86_32
8219 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8220 +#else
8221 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8222 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8223 +
8224 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8225 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8226 +#else
8227 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8228 +#endif
8229 +
8230 +#endif
8231 +
8232 #ifndef __ASSEMBLY__
8233
8234 extern int direct_gbpages;
8235 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8236 * dst and src can be on the same page, but the range must not overlap,
8237 * and must not cross a page boundary.
8238 */
8239 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8240 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8241 {
8242 - memcpy(dst, src, count * sizeof(pgd_t));
8243 + pax_open_kernel();
8244 + while (count--)
8245 + *dst++ = *src++;
8246 + pax_close_kernel();
8247 }
8248
8249 +#ifdef CONFIG_PAX_PER_CPU_PGD
8250 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8251 +#endif
8252 +
8253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8254 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8255 +#else
8256 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8257 +#endif
8258
8259 #include <asm-generic/pgtable.h>
8260 #endif /* __ASSEMBLY__ */
8261 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_types.h linux-3.0.4/arch/x86/include/asm/pgtable_types.h
8262 --- linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8263 +++ linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8264 @@ -16,13 +16,12 @@
8265 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8266 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8267 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8268 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8269 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8270 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8271 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8272 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8273 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8274 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8275 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8276 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8277 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8278 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8279
8280 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8281 @@ -40,7 +39,6 @@
8282 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8283 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8284 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8285 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8286 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8287 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8288 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8289 @@ -57,8 +55,10 @@
8290
8291 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8292 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8293 -#else
8294 +#elif defined(CONFIG_KMEMCHECK)
8295 #define _PAGE_NX (_AT(pteval_t, 0))
8296 +#else
8297 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8298 #endif
8299
8300 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8301 @@ -96,6 +96,9 @@
8302 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8303 _PAGE_ACCESSED)
8304
8305 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8306 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8307 +
8308 #define __PAGE_KERNEL_EXEC \
8309 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8310 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8311 @@ -106,8 +109,8 @@
8312 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8313 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8314 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8315 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8316 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8317 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8318 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8319 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8320 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8321 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8322 @@ -166,8 +169,8 @@
8323 * bits are combined, this will alow user to access the high address mapped
8324 * VDSO in the presence of CONFIG_COMPAT_VDSO
8325 */
8326 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8327 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8328 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8329 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8330 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8331 #endif
8332
8333 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8334 {
8335 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8336 }
8337 +#endif
8338
8339 +#if PAGETABLE_LEVELS == 3
8340 +#include <asm-generic/pgtable-nopud.h>
8341 +#endif
8342 +
8343 +#if PAGETABLE_LEVELS == 2
8344 +#include <asm-generic/pgtable-nopmd.h>
8345 +#endif
8346 +
8347 +#ifndef __ASSEMBLY__
8348 #if PAGETABLE_LEVELS > 3
8349 typedef struct { pudval_t pud; } pud_t;
8350
8351 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8352 return pud.pud;
8353 }
8354 #else
8355 -#include <asm-generic/pgtable-nopud.h>
8356 -
8357 static inline pudval_t native_pud_val(pud_t pud)
8358 {
8359 return native_pgd_val(pud.pgd);
8360 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8361 return pmd.pmd;
8362 }
8363 #else
8364 -#include <asm-generic/pgtable-nopmd.h>
8365 -
8366 static inline pmdval_t native_pmd_val(pmd_t pmd)
8367 {
8368 return native_pgd_val(pmd.pud.pgd);
8369 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8370
8371 extern pteval_t __supported_pte_mask;
8372 extern void set_nx(void);
8373 -extern int nx_enabled;
8374
8375 #define pgprot_writecombine pgprot_writecombine
8376 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8377 diff -urNp linux-3.0.4/arch/x86/include/asm/processor.h linux-3.0.4/arch/x86/include/asm/processor.h
8378 --- linux-3.0.4/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8379 +++ linux-3.0.4/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8380 @@ -266,7 +266,7 @@ struct tss_struct {
8381
8382 } ____cacheline_aligned;
8383
8384 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8385 +extern struct tss_struct init_tss[NR_CPUS];
8386
8387 /*
8388 * Save the original ist values for checking stack pointers during debugging
8389 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8390 */
8391 #define TASK_SIZE PAGE_OFFSET
8392 #define TASK_SIZE_MAX TASK_SIZE
8393 +
8394 +#ifdef CONFIG_PAX_SEGMEXEC
8395 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8396 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8397 +#else
8398 #define STACK_TOP TASK_SIZE
8399 -#define STACK_TOP_MAX STACK_TOP
8400 +#endif
8401 +
8402 +#define STACK_TOP_MAX TASK_SIZE
8403
8404 #define INIT_THREAD { \
8405 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8406 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8407 .vm86_info = NULL, \
8408 .sysenter_cs = __KERNEL_CS, \
8409 .io_bitmap_ptr = NULL, \
8410 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8411 */
8412 #define INIT_TSS { \
8413 .x86_tss = { \
8414 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8415 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8416 .ss0 = __KERNEL_DS, \
8417 .ss1 = __KERNEL_CS, \
8418 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8419 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8420 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8421
8422 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8423 -#define KSTK_TOP(info) \
8424 -({ \
8425 - unsigned long *__ptr = (unsigned long *)(info); \
8426 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8427 -})
8428 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8429
8430 /*
8431 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8432 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8433 #define task_pt_regs(task) \
8434 ({ \
8435 struct pt_regs *__regs__; \
8436 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8437 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8438 __regs__ - 1; \
8439 })
8440
8441 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8442 /*
8443 * User space process size. 47bits minus one guard page.
8444 */
8445 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8446 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8447
8448 /* This decides where the kernel will search for a free chunk of vm
8449 * space during mmap's.
8450 */
8451 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8452 - 0xc0000000 : 0xFFFFe000)
8453 + 0xc0000000 : 0xFFFFf000)
8454
8455 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8456 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8457 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8458 #define STACK_TOP_MAX TASK_SIZE_MAX
8459
8460 #define INIT_THREAD { \
8461 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8462 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8463 }
8464
8465 #define INIT_TSS { \
8466 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8467 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8468 }
8469
8470 /*
8471 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8472 */
8473 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8474
8475 +#ifdef CONFIG_PAX_SEGMEXEC
8476 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8477 +#endif
8478 +
8479 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8480
8481 /* Get/set a process' ability to use the timestamp counter instruction */
8482 diff -urNp linux-3.0.4/arch/x86/include/asm/ptrace.h linux-3.0.4/arch/x86/include/asm/ptrace.h
8483 --- linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8484 +++ linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8485 @@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8486 }
8487
8488 /*
8489 - * user_mode_vm(regs) determines whether a register set came from user mode.
8490 + * user_mode(regs) determines whether a register set came from user mode.
8491 * This is true if V8086 mode was enabled OR if the register set was from
8492 * protected mode with RPL-3 CS value. This tricky test checks that with
8493 * one comparison. Many places in the kernel can bypass this full check
8494 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8495 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8496 + * be used.
8497 */
8498 -static inline int user_mode(struct pt_regs *regs)
8499 +static inline int user_mode_novm(struct pt_regs *regs)
8500 {
8501 #ifdef CONFIG_X86_32
8502 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8503 #else
8504 - return !!(regs->cs & 3);
8505 + return !!(regs->cs & SEGMENT_RPL_MASK);
8506 #endif
8507 }
8508
8509 -static inline int user_mode_vm(struct pt_regs *regs)
8510 +static inline int user_mode(struct pt_regs *regs)
8511 {
8512 #ifdef CONFIG_X86_32
8513 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8514 USER_RPL;
8515 #else
8516 - return user_mode(regs);
8517 + return user_mode_novm(regs);
8518 #endif
8519 }
8520
8521 diff -urNp linux-3.0.4/arch/x86/include/asm/reboot.h linux-3.0.4/arch/x86/include/asm/reboot.h
8522 --- linux-3.0.4/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8523 +++ linux-3.0.4/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8524 @@ -6,19 +6,19 @@
8525 struct pt_regs;
8526
8527 struct machine_ops {
8528 - void (*restart)(char *cmd);
8529 - void (*halt)(void);
8530 - void (*power_off)(void);
8531 + void (* __noreturn restart)(char *cmd);
8532 + void (* __noreturn halt)(void);
8533 + void (* __noreturn power_off)(void);
8534 void (*shutdown)(void);
8535 void (*crash_shutdown)(struct pt_regs *);
8536 - void (*emergency_restart)(void);
8537 -};
8538 + void (* __noreturn emergency_restart)(void);
8539 +} __no_const;
8540
8541 extern struct machine_ops machine_ops;
8542
8543 void native_machine_crash_shutdown(struct pt_regs *regs);
8544 void native_machine_shutdown(void);
8545 -void machine_real_restart(unsigned int type);
8546 +void machine_real_restart(unsigned int type) __noreturn;
8547 /* These must match dispatch_table in reboot_32.S */
8548 #define MRR_BIOS 0
8549 #define MRR_APM 1
8550 diff -urNp linux-3.0.4/arch/x86/include/asm/rwsem.h linux-3.0.4/arch/x86/include/asm/rwsem.h
8551 --- linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8552 +++ linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8553 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8554 {
8555 asm volatile("# beginning down_read\n\t"
8556 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8557 +
8558 +#ifdef CONFIG_PAX_REFCOUNT
8559 + "jno 0f\n"
8560 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8561 + "int $4\n0:\n"
8562 + _ASM_EXTABLE(0b, 0b)
8563 +#endif
8564 +
8565 /* adds 0x00000001 */
8566 " jns 1f\n"
8567 " call call_rwsem_down_read_failed\n"
8568 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8569 "1:\n\t"
8570 " mov %1,%2\n\t"
8571 " add %3,%2\n\t"
8572 +
8573 +#ifdef CONFIG_PAX_REFCOUNT
8574 + "jno 0f\n"
8575 + "sub %3,%2\n"
8576 + "int $4\n0:\n"
8577 + _ASM_EXTABLE(0b, 0b)
8578 +#endif
8579 +
8580 " jle 2f\n\t"
8581 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8582 " jnz 1b\n\t"
8583 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8584 long tmp;
8585 asm volatile("# beginning down_write\n\t"
8586 LOCK_PREFIX " xadd %1,(%2)\n\t"
8587 +
8588 +#ifdef CONFIG_PAX_REFCOUNT
8589 + "jno 0f\n"
8590 + "mov %1,(%2)\n"
8591 + "int $4\n0:\n"
8592 + _ASM_EXTABLE(0b, 0b)
8593 +#endif
8594 +
8595 /* adds 0xffff0001, returns the old value */
8596 " test %1,%1\n\t"
8597 /* was the count 0 before? */
8598 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8599 long tmp;
8600 asm volatile("# beginning __up_read\n\t"
8601 LOCK_PREFIX " xadd %1,(%2)\n\t"
8602 +
8603 +#ifdef CONFIG_PAX_REFCOUNT
8604 + "jno 0f\n"
8605 + "mov %1,(%2)\n"
8606 + "int $4\n0:\n"
8607 + _ASM_EXTABLE(0b, 0b)
8608 +#endif
8609 +
8610 /* subtracts 1, returns the old value */
8611 " jns 1f\n\t"
8612 " call call_rwsem_wake\n" /* expects old value in %edx */
8613 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8614 long tmp;
8615 asm volatile("# beginning __up_write\n\t"
8616 LOCK_PREFIX " xadd %1,(%2)\n\t"
8617 +
8618 +#ifdef CONFIG_PAX_REFCOUNT
8619 + "jno 0f\n"
8620 + "mov %1,(%2)\n"
8621 + "int $4\n0:\n"
8622 + _ASM_EXTABLE(0b, 0b)
8623 +#endif
8624 +
8625 /* subtracts 0xffff0001, returns the old value */
8626 " jns 1f\n\t"
8627 " call call_rwsem_wake\n" /* expects old value in %edx */
8628 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8629 {
8630 asm volatile("# beginning __downgrade_write\n\t"
8631 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8632 +
8633 +#ifdef CONFIG_PAX_REFCOUNT
8634 + "jno 0f\n"
8635 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8636 + "int $4\n0:\n"
8637 + _ASM_EXTABLE(0b, 0b)
8638 +#endif
8639 +
8640 /*
8641 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8642 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8643 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8644 */
8645 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8646 {
8647 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8648 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8649 +
8650 +#ifdef CONFIG_PAX_REFCOUNT
8651 + "jno 0f\n"
8652 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8653 + "int $4\n0:\n"
8654 + _ASM_EXTABLE(0b, 0b)
8655 +#endif
8656 +
8657 : "+m" (sem->count)
8658 : "er" (delta));
8659 }
8660 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8661 {
8662 long tmp = delta;
8663
8664 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8665 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8666 +
8667 +#ifdef CONFIG_PAX_REFCOUNT
8668 + "jno 0f\n"
8669 + "mov %0,%1\n"
8670 + "int $4\n0:\n"
8671 + _ASM_EXTABLE(0b, 0b)
8672 +#endif
8673 +
8674 : "+r" (tmp), "+m" (sem->count)
8675 : : "memory");
8676
8677 diff -urNp linux-3.0.4/arch/x86/include/asm/segment.h linux-3.0.4/arch/x86/include/asm/segment.h
8678 --- linux-3.0.4/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8679 +++ linux-3.0.4/arch/x86/include/asm/segment.h 2011-08-23 21:47:55.000000000 -0400
8680 @@ -64,8 +64,8 @@
8681 * 26 - ESPFIX small SS
8682 * 27 - per-cpu [ offset to per-cpu data area ]
8683 * 28 - stack_canary-20 [ for stack protector ]
8684 - * 29 - unused
8685 - * 30 - unused
8686 + * 29 - PCI BIOS CS
8687 + * 30 - PCI BIOS DS
8688 * 31 - TSS for double fault handler
8689 */
8690 #define GDT_ENTRY_TLS_MIN 6
8691 @@ -79,6 +79,8 @@
8692
8693 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8694
8695 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8696 +
8697 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8698
8699 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8700 @@ -104,6 +106,12 @@
8701 #define __KERNEL_STACK_CANARY 0
8702 #endif
8703
8704 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8705 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8706 +
8707 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8708 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8709 +
8710 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8711
8712 /*
8713 @@ -141,7 +149,7 @@
8714 */
8715
8716 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8717 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8718 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8719
8720
8721 #else
8722 @@ -165,6 +173,8 @@
8723 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8724 #define __USER32_DS __USER_DS
8725
8726 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8727 +
8728 #define GDT_ENTRY_TSS 8 /* needs two entries */
8729 #define GDT_ENTRY_LDT 10 /* needs two entries */
8730 #define GDT_ENTRY_TLS_MIN 12
8731 @@ -185,6 +195,7 @@
8732 #endif
8733
8734 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8735 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8736 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8737 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8738 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8739 diff -urNp linux-3.0.4/arch/x86/include/asm/smp.h linux-3.0.4/arch/x86/include/asm/smp.h
8740 --- linux-3.0.4/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8741 +++ linux-3.0.4/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8742 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8743 /* cpus sharing the last level cache: */
8744 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8745 DECLARE_PER_CPU(u16, cpu_llc_id);
8746 -DECLARE_PER_CPU(int, cpu_number);
8747 +DECLARE_PER_CPU(unsigned int, cpu_number);
8748
8749 static inline struct cpumask *cpu_sibling_mask(int cpu)
8750 {
8751 @@ -77,7 +77,7 @@ struct smp_ops {
8752
8753 void (*send_call_func_ipi)(const struct cpumask *mask);
8754 void (*send_call_func_single_ipi)(int cpu);
8755 -};
8756 +} __no_const;
8757
8758 /* Globals due to paravirt */
8759 extern void set_cpu_sibling_map(int cpu);
8760 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8761 extern int safe_smp_processor_id(void);
8762
8763 #elif defined(CONFIG_X86_64_SMP)
8764 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8765 -
8766 -#define stack_smp_processor_id() \
8767 -({ \
8768 - struct thread_info *ti; \
8769 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8770 - ti->cpu; \
8771 -})
8772 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8773 +#define stack_smp_processor_id() raw_smp_processor_id()
8774 #define safe_smp_processor_id() smp_processor_id()
8775
8776 #endif
8777 diff -urNp linux-3.0.4/arch/x86/include/asm/spinlock.h linux-3.0.4/arch/x86/include/asm/spinlock.h
8778 --- linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8779 +++ linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8780 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8781 static inline void arch_read_lock(arch_rwlock_t *rw)
8782 {
8783 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8784 +
8785 +#ifdef CONFIG_PAX_REFCOUNT
8786 + "jno 0f\n"
8787 + LOCK_PREFIX " addl $1,(%0)\n"
8788 + "int $4\n0:\n"
8789 + _ASM_EXTABLE(0b, 0b)
8790 +#endif
8791 +
8792 "jns 1f\n"
8793 "call __read_lock_failed\n\t"
8794 "1:\n"
8795 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8796 static inline void arch_write_lock(arch_rwlock_t *rw)
8797 {
8798 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8799 +
8800 +#ifdef CONFIG_PAX_REFCOUNT
8801 + "jno 0f\n"
8802 + LOCK_PREFIX " addl %1,(%0)\n"
8803 + "int $4\n0:\n"
8804 + _ASM_EXTABLE(0b, 0b)
8805 +#endif
8806 +
8807 "jz 1f\n"
8808 "call __write_lock_failed\n\t"
8809 "1:\n"
8810 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8811
8812 static inline void arch_read_unlock(arch_rwlock_t *rw)
8813 {
8814 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8815 + asm volatile(LOCK_PREFIX "incl %0\n"
8816 +
8817 +#ifdef CONFIG_PAX_REFCOUNT
8818 + "jno 0f\n"
8819 + LOCK_PREFIX "decl %0\n"
8820 + "int $4\n0:\n"
8821 + _ASM_EXTABLE(0b, 0b)
8822 +#endif
8823 +
8824 + :"+m" (rw->lock) : : "memory");
8825 }
8826
8827 static inline void arch_write_unlock(arch_rwlock_t *rw)
8828 {
8829 - asm volatile(LOCK_PREFIX "addl %1, %0"
8830 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8831 +
8832 +#ifdef CONFIG_PAX_REFCOUNT
8833 + "jno 0f\n"
8834 + LOCK_PREFIX "subl %1, %0\n"
8835 + "int $4\n0:\n"
8836 + _ASM_EXTABLE(0b, 0b)
8837 +#endif
8838 +
8839 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8840 }
8841
8842 diff -urNp linux-3.0.4/arch/x86/include/asm/stackprotector.h linux-3.0.4/arch/x86/include/asm/stackprotector.h
8843 --- linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8844 +++ linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8845 @@ -48,7 +48,7 @@
8846 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8847 */
8848 #define GDT_STACK_CANARY_INIT \
8849 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8850 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8851
8852 /*
8853 * Initialize the stackprotector canary value.
8854 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8855
8856 static inline void load_stack_canary_segment(void)
8857 {
8858 -#ifdef CONFIG_X86_32
8859 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8860 asm volatile ("mov %0, %%gs" : : "r" (0));
8861 #endif
8862 }
8863 diff -urNp linux-3.0.4/arch/x86/include/asm/stacktrace.h linux-3.0.4/arch/x86/include/asm/stacktrace.h
8864 --- linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8865 +++ linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8866 @@ -11,28 +11,20 @@
8867
8868 extern int kstack_depth_to_print;
8869
8870 -struct thread_info;
8871 +struct task_struct;
8872 struct stacktrace_ops;
8873
8874 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8875 - unsigned long *stack,
8876 - unsigned long bp,
8877 - const struct stacktrace_ops *ops,
8878 - void *data,
8879 - unsigned long *end,
8880 - int *graph);
8881 -
8882 -extern unsigned long
8883 -print_context_stack(struct thread_info *tinfo,
8884 - unsigned long *stack, unsigned long bp,
8885 - const struct stacktrace_ops *ops, void *data,
8886 - unsigned long *end, int *graph);
8887 -
8888 -extern unsigned long
8889 -print_context_stack_bp(struct thread_info *tinfo,
8890 - unsigned long *stack, unsigned long bp,
8891 - const struct stacktrace_ops *ops, void *data,
8892 - unsigned long *end, int *graph);
8893 +typedef unsigned long walk_stack_t(struct task_struct *task,
8894 + void *stack_start,
8895 + unsigned long *stack,
8896 + unsigned long bp,
8897 + const struct stacktrace_ops *ops,
8898 + void *data,
8899 + unsigned long *end,
8900 + int *graph);
8901 +
8902 +extern walk_stack_t print_context_stack;
8903 +extern walk_stack_t print_context_stack_bp;
8904
8905 /* Generic stack tracer with callbacks */
8906
8907 @@ -40,7 +32,7 @@ struct stacktrace_ops {
8908 void (*address)(void *data, unsigned long address, int reliable);
8909 /* On negative return stop dumping */
8910 int (*stack)(void *data, char *name);
8911 - walk_stack_t walk_stack;
8912 + walk_stack_t *walk_stack;
8913 };
8914
8915 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8916 diff -urNp linux-3.0.4/arch/x86/include/asm/system.h linux-3.0.4/arch/x86/include/asm/system.h
8917 --- linux-3.0.4/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8918 +++ linux-3.0.4/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8919 @@ -129,7 +129,7 @@ do { \
8920 "call __switch_to\n\t" \
8921 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8922 __switch_canary \
8923 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8924 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8925 "movq %%rax,%%rdi\n\t" \
8926 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8927 "jnz ret_from_fork\n\t" \
8928 @@ -140,7 +140,7 @@ do { \
8929 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8930 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8931 [_tif_fork] "i" (_TIF_FORK), \
8932 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
8933 + [thread_info] "m" (current_tinfo), \
8934 [current_task] "m" (current_task) \
8935 __switch_canary_iparam \
8936 : "memory", "cc" __EXTRA_CLOBBER)
8937 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8938 {
8939 unsigned long __limit;
8940 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8941 - return __limit + 1;
8942 + return __limit;
8943 }
8944
8945 static inline void native_clts(void)
8946 @@ -397,12 +397,12 @@ void enable_hlt(void);
8947
8948 void cpu_idle_wait(void);
8949
8950 -extern unsigned long arch_align_stack(unsigned long sp);
8951 +#define arch_align_stack(x) ((x) & ~0xfUL)
8952 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8953
8954 void default_idle(void);
8955
8956 -void stop_this_cpu(void *dummy);
8957 +void stop_this_cpu(void *dummy) __noreturn;
8958
8959 /*
8960 * Force strict CPU ordering.
8961 diff -urNp linux-3.0.4/arch/x86/include/asm/thread_info.h linux-3.0.4/arch/x86/include/asm/thread_info.h
8962 --- linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
8963 +++ linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
8964 @@ -10,6 +10,7 @@
8965 #include <linux/compiler.h>
8966 #include <asm/page.h>
8967 #include <asm/types.h>
8968 +#include <asm/percpu.h>
8969
8970 /*
8971 * low level task data that entry.S needs immediate access to
8972 @@ -24,7 +25,6 @@ struct exec_domain;
8973 #include <asm/atomic.h>
8974
8975 struct thread_info {
8976 - struct task_struct *task; /* main task structure */
8977 struct exec_domain *exec_domain; /* execution domain */
8978 __u32 flags; /* low level flags */
8979 __u32 status; /* thread synchronous flags */
8980 @@ -34,18 +34,12 @@ struct thread_info {
8981 mm_segment_t addr_limit;
8982 struct restart_block restart_block;
8983 void __user *sysenter_return;
8984 -#ifdef CONFIG_X86_32
8985 - unsigned long previous_esp; /* ESP of the previous stack in
8986 - case of nested (IRQ) stacks
8987 - */
8988 - __u8 supervisor_stack[0];
8989 -#endif
8990 + unsigned long lowest_stack;
8991 int uaccess_err;
8992 };
8993
8994 -#define INIT_THREAD_INFO(tsk) \
8995 +#define INIT_THREAD_INFO \
8996 { \
8997 - .task = &tsk, \
8998 .exec_domain = &default_exec_domain, \
8999 .flags = 0, \
9000 .cpu = 0, \
9001 @@ -56,7 +50,7 @@ struct thread_info {
9002 }, \
9003 }
9004
9005 -#define init_thread_info (init_thread_union.thread_info)
9006 +#define init_thread_info (init_thread_union.stack)
9007 #define init_stack (init_thread_union.stack)
9008
9009 #else /* !__ASSEMBLY__ */
9010 @@ -170,6 +164,23 @@ struct thread_info {
9011 ret; \
9012 })
9013
9014 +#ifdef __ASSEMBLY__
9015 +/* how to get the thread information struct from ASM */
9016 +#define GET_THREAD_INFO(reg) \
9017 + mov PER_CPU_VAR(current_tinfo), reg
9018 +
9019 +/* use this one if reg already contains %esp */
9020 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9021 +#else
9022 +/* how to get the thread information struct from C */
9023 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9024 +
9025 +static __always_inline struct thread_info *current_thread_info(void)
9026 +{
9027 + return percpu_read_stable(current_tinfo);
9028 +}
9029 +#endif
9030 +
9031 #ifdef CONFIG_X86_32
9032
9033 #define STACK_WARN (THREAD_SIZE/8)
9034 @@ -180,35 +191,13 @@ struct thread_info {
9035 */
9036 #ifndef __ASSEMBLY__
9037
9038 -
9039 /* how to get the current stack pointer from C */
9040 register unsigned long current_stack_pointer asm("esp") __used;
9041
9042 -/* how to get the thread information struct from C */
9043 -static inline struct thread_info *current_thread_info(void)
9044 -{
9045 - return (struct thread_info *)
9046 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9047 -}
9048 -
9049 -#else /* !__ASSEMBLY__ */
9050 -
9051 -/* how to get the thread information struct from ASM */
9052 -#define GET_THREAD_INFO(reg) \
9053 - movl $-THREAD_SIZE, reg; \
9054 - andl %esp, reg
9055 -
9056 -/* use this one if reg already contains %esp */
9057 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9058 - andl $-THREAD_SIZE, reg
9059 -
9060 #endif
9061
9062 #else /* X86_32 */
9063
9064 -#include <asm/percpu.h>
9065 -#define KERNEL_STACK_OFFSET (5*8)
9066 -
9067 /*
9068 * macros/functions for gaining access to the thread information structure
9069 * preempt_count needs to be 1 initially, until the scheduler is functional.
9070 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9071 #ifndef __ASSEMBLY__
9072 DECLARE_PER_CPU(unsigned long, kernel_stack);
9073
9074 -static inline struct thread_info *current_thread_info(void)
9075 -{
9076 - struct thread_info *ti;
9077 - ti = (void *)(percpu_read_stable(kernel_stack) +
9078 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9079 - return ti;
9080 -}
9081 -
9082 -#else /* !__ASSEMBLY__ */
9083 -
9084 -/* how to get the thread information struct from ASM */
9085 -#define GET_THREAD_INFO(reg) \
9086 - movq PER_CPU_VAR(kernel_stack),reg ; \
9087 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9088 -
9089 +/* how to get the current stack pointer from C */
9090 +register unsigned long current_stack_pointer asm("rsp") __used;
9091 #endif
9092
9093 #endif /* !X86_32 */
9094 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9095 extern void free_thread_info(struct thread_info *ti);
9096 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9097 #define arch_task_cache_init arch_task_cache_init
9098 +
9099 +#define __HAVE_THREAD_FUNCTIONS
9100 +#define task_thread_info(task) (&(task)->tinfo)
9101 +#define task_stack_page(task) ((task)->stack)
9102 +#define setup_thread_stack(p, org) do {} while (0)
9103 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9104 +
9105 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9106 +extern struct task_struct *alloc_task_struct_node(int node);
9107 +extern void free_task_struct(struct task_struct *);
9108 +
9109 #endif
9110 #endif /* _ASM_X86_THREAD_INFO_H */
9111 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_32.h linux-3.0.4/arch/x86/include/asm/uaccess_32.h
9112 --- linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9113 +++ linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9114 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9115 static __always_inline unsigned long __must_check
9116 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9117 {
9118 + pax_track_stack();
9119 +
9120 + if ((long)n < 0)
9121 + return n;
9122 +
9123 if (__builtin_constant_p(n)) {
9124 unsigned long ret;
9125
9126 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9127 return ret;
9128 }
9129 }
9130 + if (!__builtin_constant_p(n))
9131 + check_object_size(from, n, true);
9132 return __copy_to_user_ll(to, from, n);
9133 }
9134
9135 @@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9136 __copy_to_user(void __user *to, const void *from, unsigned long n)
9137 {
9138 might_fault();
9139 +
9140 return __copy_to_user_inatomic(to, from, n);
9141 }
9142
9143 static __always_inline unsigned long
9144 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9145 {
9146 + if ((long)n < 0)
9147 + return n;
9148 +
9149 /* Avoid zeroing the tail if the copy fails..
9150 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9151 * but as the zeroing behaviour is only significant when n is not
9152 @@ -137,6 +148,12 @@ static __always_inline unsigned long
9153 __copy_from_user(void *to, const void __user *from, unsigned long n)
9154 {
9155 might_fault();
9156 +
9157 + pax_track_stack();
9158 +
9159 + if ((long)n < 0)
9160 + return n;
9161 +
9162 if (__builtin_constant_p(n)) {
9163 unsigned long ret;
9164
9165 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9166 return ret;
9167 }
9168 }
9169 + if (!__builtin_constant_p(n))
9170 + check_object_size(to, n, false);
9171 return __copy_from_user_ll(to, from, n);
9172 }
9173
9174 @@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9175 const void __user *from, unsigned long n)
9176 {
9177 might_fault();
9178 +
9179 + if ((long)n < 0)
9180 + return n;
9181 +
9182 if (__builtin_constant_p(n)) {
9183 unsigned long ret;
9184
9185 @@ -181,15 +204,19 @@ static __always_inline unsigned long
9186 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9187 unsigned long n)
9188 {
9189 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9190 -}
9191 + if ((long)n < 0)
9192 + return n;
9193
9194 -unsigned long __must_check copy_to_user(void __user *to,
9195 - const void *from, unsigned long n);
9196 -unsigned long __must_check _copy_from_user(void *to,
9197 - const void __user *from,
9198 - unsigned long n);
9199 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9200 +}
9201
9202 +extern void copy_to_user_overflow(void)
9203 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9204 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9205 +#else
9206 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9207 +#endif
9208 +;
9209
9210 extern void copy_from_user_overflow(void)
9211 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9212 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9213 #endif
9214 ;
9215
9216 -static inline unsigned long __must_check copy_from_user(void *to,
9217 - const void __user *from,
9218 - unsigned long n)
9219 +/**
9220 + * copy_to_user: - Copy a block of data into user space.
9221 + * @to: Destination address, in user space.
9222 + * @from: Source address, in kernel space.
9223 + * @n: Number of bytes to copy.
9224 + *
9225 + * Context: User context only. This function may sleep.
9226 + *
9227 + * Copy data from kernel space to user space.
9228 + *
9229 + * Returns number of bytes that could not be copied.
9230 + * On success, this will be zero.
9231 + */
9232 +static inline unsigned long __must_check
9233 +copy_to_user(void __user *to, const void *from, unsigned long n)
9234 +{
9235 + int sz = __compiletime_object_size(from);
9236 +
9237 + if (unlikely(sz != -1 && sz < n))
9238 + copy_to_user_overflow();
9239 + else if (access_ok(VERIFY_WRITE, to, n))
9240 + n = __copy_to_user(to, from, n);
9241 + return n;
9242 +}
9243 +
9244 +/**
9245 + * copy_from_user: - Copy a block of data from user space.
9246 + * @to: Destination address, in kernel space.
9247 + * @from: Source address, in user space.
9248 + * @n: Number of bytes to copy.
9249 + *
9250 + * Context: User context only. This function may sleep.
9251 + *
9252 + * Copy data from user space to kernel space.
9253 + *
9254 + * Returns number of bytes that could not be copied.
9255 + * On success, this will be zero.
9256 + *
9257 + * If some data could not be copied, this function will pad the copied
9258 + * data to the requested size using zero bytes.
9259 + */
9260 +static inline unsigned long __must_check
9261 +copy_from_user(void *to, const void __user *from, unsigned long n)
9262 {
9263 int sz = __compiletime_object_size(to);
9264
9265 - if (likely(sz == -1 || sz >= n))
9266 - n = _copy_from_user(to, from, n);
9267 - else
9268 + if (unlikely(sz != -1 && sz < n))
9269 copy_from_user_overflow();
9270 -
9271 + else if (access_ok(VERIFY_READ, from, n))
9272 + n = __copy_from_user(to, from, n);
9273 + else if ((long)n > 0) {
9274 + if (!__builtin_constant_p(n))
9275 + check_object_size(to, n, false);
9276 + memset(to, 0, n);
9277 + }
9278 return n;
9279 }
9280
9281 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_64.h linux-3.0.4/arch/x86/include/asm/uaccess_64.h
9282 --- linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9283 +++ linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9284 @@ -10,6 +10,9 @@
9285 #include <asm/alternative.h>
9286 #include <asm/cpufeature.h>
9287 #include <asm/page.h>
9288 +#include <asm/pgtable.h>
9289 +
9290 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9291
9292 /*
9293 * Copy To/From Userspace
9294 @@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9295 return ret;
9296 }
9297
9298 -__must_check unsigned long
9299 -_copy_to_user(void __user *to, const void *from, unsigned len);
9300 -__must_check unsigned long
9301 -_copy_from_user(void *to, const void __user *from, unsigned len);
9302 +static __always_inline __must_check unsigned long
9303 +__copy_to_user(void __user *to, const void *from, unsigned len);
9304 +static __always_inline __must_check unsigned long
9305 +__copy_from_user(void *to, const void __user *from, unsigned len);
9306 __must_check unsigned long
9307 copy_in_user(void __user *to, const void __user *from, unsigned len);
9308
9309 static inline unsigned long __must_check copy_from_user(void *to,
9310 const void __user *from,
9311 - unsigned long n)
9312 + unsigned n)
9313 {
9314 - int sz = __compiletime_object_size(to);
9315 -
9316 might_fault();
9317 - if (likely(sz == -1 || sz >= n))
9318 - n = _copy_from_user(to, from, n);
9319 -#ifdef CONFIG_DEBUG_VM
9320 - else
9321 - WARN(1, "Buffer overflow detected!\n");
9322 -#endif
9323 +
9324 + if (access_ok(VERIFY_READ, from, n))
9325 + n = __copy_from_user(to, from, n);
9326 + else if ((int)n > 0) {
9327 + if (!__builtin_constant_p(n))
9328 + check_object_size(to, n, false);
9329 + memset(to, 0, n);
9330 + }
9331 return n;
9332 }
9333
9334 @@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9335 {
9336 might_fault();
9337
9338 - return _copy_to_user(dst, src, size);
9339 + if (access_ok(VERIFY_WRITE, dst, size))
9340 + size = __copy_to_user(dst, src, size);
9341 + return size;
9342 }
9343
9344 static __always_inline __must_check
9345 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9346 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9347 {
9348 - int ret = 0;
9349 + int sz = __compiletime_object_size(dst);
9350 + unsigned ret = 0;
9351
9352 might_fault();
9353 - if (!__builtin_constant_p(size))
9354 - return copy_user_generic(dst, (__force void *)src, size);
9355 +
9356 + pax_track_stack();
9357 +
9358 + if ((int)size < 0)
9359 + return size;
9360 +
9361 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9362 + if (!__access_ok(VERIFY_READ, src, size))
9363 + return size;
9364 +#endif
9365 +
9366 + if (unlikely(sz != -1 && sz < size)) {
9367 +#ifdef CONFIG_DEBUG_VM
9368 + WARN(1, "Buffer overflow detected!\n");
9369 +#endif
9370 + return size;
9371 + }
9372 +
9373 + if (!__builtin_constant_p(size)) {
9374 + check_object_size(dst, size, false);
9375 +
9376 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9377 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9378 + src += PAX_USER_SHADOW_BASE;
9379 +#endif
9380 +
9381 + return copy_user_generic(dst, (__force const void *)src, size);
9382 + }
9383 switch (size) {
9384 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9385 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9386 ret, "b", "b", "=q", 1);
9387 return ret;
9388 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9389 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9390 ret, "w", "w", "=r", 2);
9391 return ret;
9392 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9393 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9394 ret, "l", "k", "=r", 4);
9395 return ret;
9396 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9397 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9398 ret, "q", "", "=r", 8);
9399 return ret;
9400 case 10:
9401 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9402 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9403 ret, "q", "", "=r", 10);
9404 if (unlikely(ret))
9405 return ret;
9406 __get_user_asm(*(u16 *)(8 + (char *)dst),
9407 - (u16 __user *)(8 + (char __user *)src),
9408 + (const u16 __user *)(8 + (const char __user *)src),
9409 ret, "w", "w", "=r", 2);
9410 return ret;
9411 case 16:
9412 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9413 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9414 ret, "q", "", "=r", 16);
9415 if (unlikely(ret))
9416 return ret;
9417 __get_user_asm(*(u64 *)(8 + (char *)dst),
9418 - (u64 __user *)(8 + (char __user *)src),
9419 + (const u64 __user *)(8 + (const char __user *)src),
9420 ret, "q", "", "=r", 8);
9421 return ret;
9422 default:
9423 - return copy_user_generic(dst, (__force void *)src, size);
9424 +
9425 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9426 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9427 + src += PAX_USER_SHADOW_BASE;
9428 +#endif
9429 +
9430 + return copy_user_generic(dst, (__force const void *)src, size);
9431 }
9432 }
9433
9434 static __always_inline __must_check
9435 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9436 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9437 {
9438 - int ret = 0;
9439 + int sz = __compiletime_object_size(src);
9440 + unsigned ret = 0;
9441
9442 might_fault();
9443 - if (!__builtin_constant_p(size))
9444 +
9445 + pax_track_stack();
9446 +
9447 + if ((int)size < 0)
9448 + return size;
9449 +
9450 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9451 + if (!__access_ok(VERIFY_WRITE, dst, size))
9452 + return size;
9453 +#endif
9454 +
9455 + if (unlikely(sz != -1 && sz < size)) {
9456 +#ifdef CONFIG_DEBUG_VM
9457 + WARN(1, "Buffer overflow detected!\n");
9458 +#endif
9459 + return size;
9460 + }
9461 +
9462 + if (!__builtin_constant_p(size)) {
9463 + check_object_size(src, size, true);
9464 +
9465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9466 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9467 + dst += PAX_USER_SHADOW_BASE;
9468 +#endif
9469 +
9470 return copy_user_generic((__force void *)dst, src, size);
9471 + }
9472 switch (size) {
9473 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9474 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9475 ret, "b", "b", "iq", 1);
9476 return ret;
9477 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9478 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9479 ret, "w", "w", "ir", 2);
9480 return ret;
9481 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9482 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9483 ret, "l", "k", "ir", 4);
9484 return ret;
9485 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9486 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9487 ret, "q", "", "er", 8);
9488 return ret;
9489 case 10:
9490 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9491 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9492 ret, "q", "", "er", 10);
9493 if (unlikely(ret))
9494 return ret;
9495 asm("":::"memory");
9496 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9497 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9498 ret, "w", "w", "ir", 2);
9499 return ret;
9500 case 16:
9501 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9502 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9503 ret, "q", "", "er", 16);
9504 if (unlikely(ret))
9505 return ret;
9506 asm("":::"memory");
9507 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9508 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9509 ret, "q", "", "er", 8);
9510 return ret;
9511 default:
9512 +
9513 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9514 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9515 + dst += PAX_USER_SHADOW_BASE;
9516 +#endif
9517 +
9518 return copy_user_generic((__force void *)dst, src, size);
9519 }
9520 }
9521
9522 static __always_inline __must_check
9523 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9524 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9525 {
9526 - int ret = 0;
9527 + unsigned ret = 0;
9528
9529 might_fault();
9530 - if (!__builtin_constant_p(size))
9531 +
9532 + if ((int)size < 0)
9533 + return size;
9534 +
9535 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9536 + if (!__access_ok(VERIFY_READ, src, size))
9537 + return size;
9538 + if (!__access_ok(VERIFY_WRITE, dst, size))
9539 + return size;
9540 +#endif
9541 +
9542 + if (!__builtin_constant_p(size)) {
9543 +
9544 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9545 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9546 + src += PAX_USER_SHADOW_BASE;
9547 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9548 + dst += PAX_USER_SHADOW_BASE;
9549 +#endif
9550 +
9551 return copy_user_generic((__force void *)dst,
9552 - (__force void *)src, size);
9553 + (__force const void *)src, size);
9554 + }
9555 switch (size) {
9556 case 1: {
9557 u8 tmp;
9558 - __get_user_asm(tmp, (u8 __user *)src,
9559 + __get_user_asm(tmp, (const u8 __user *)src,
9560 ret, "b", "b", "=q", 1);
9561 if (likely(!ret))
9562 __put_user_asm(tmp, (u8 __user *)dst,
9563 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9564 }
9565 case 2: {
9566 u16 tmp;
9567 - __get_user_asm(tmp, (u16 __user *)src,
9568 + __get_user_asm(tmp, (const u16 __user *)src,
9569 ret, "w", "w", "=r", 2);
9570 if (likely(!ret))
9571 __put_user_asm(tmp, (u16 __user *)dst,
9572 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9573
9574 case 4: {
9575 u32 tmp;
9576 - __get_user_asm(tmp, (u32 __user *)src,
9577 + __get_user_asm(tmp, (const u32 __user *)src,
9578 ret, "l", "k", "=r", 4);
9579 if (likely(!ret))
9580 __put_user_asm(tmp, (u32 __user *)dst,
9581 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9582 }
9583 case 8: {
9584 u64 tmp;
9585 - __get_user_asm(tmp, (u64 __user *)src,
9586 + __get_user_asm(tmp, (const u64 __user *)src,
9587 ret, "q", "", "=r", 8);
9588 if (likely(!ret))
9589 __put_user_asm(tmp, (u64 __user *)dst,
9590 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9591 return ret;
9592 }
9593 default:
9594 +
9595 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9596 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9597 + src += PAX_USER_SHADOW_BASE;
9598 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9599 + dst += PAX_USER_SHADOW_BASE;
9600 +#endif
9601 +
9602 return copy_user_generic((__force void *)dst,
9603 - (__force void *)src, size);
9604 + (__force const void *)src, size);
9605 }
9606 }
9607
9608 @@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9609 static __must_check __always_inline int
9610 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9611 {
9612 + pax_track_stack();
9613 +
9614 + if ((int)size < 0)
9615 + return size;
9616 +
9617 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9618 + if (!__access_ok(VERIFY_READ, src, size))
9619 + return size;
9620 +
9621 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9622 + src += PAX_USER_SHADOW_BASE;
9623 +#endif
9624 +
9625 return copy_user_generic(dst, (__force const void *)src, size);
9626 }
9627
9628 -static __must_check __always_inline int
9629 +static __must_check __always_inline unsigned long
9630 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9631 {
9632 + if ((int)size < 0)
9633 + return size;
9634 +
9635 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9636 + if (!__access_ok(VERIFY_WRITE, dst, size))
9637 + return size;
9638 +
9639 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9640 + dst += PAX_USER_SHADOW_BASE;
9641 +#endif
9642 +
9643 return copy_user_generic((__force void *)dst, src, size);
9644 }
9645
9646 -extern long __copy_user_nocache(void *dst, const void __user *src,
9647 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9648 unsigned size, int zerorest);
9649
9650 -static inline int
9651 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9652 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9653 {
9654 might_sleep();
9655 +
9656 + if ((int)size < 0)
9657 + return size;
9658 +
9659 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9660 + if (!__access_ok(VERIFY_READ, src, size))
9661 + return size;
9662 +#endif
9663 +
9664 return __copy_user_nocache(dst, src, size, 1);
9665 }
9666
9667 -static inline int
9668 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9669 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9670 unsigned size)
9671 {
9672 + if ((int)size < 0)
9673 + return size;
9674 +
9675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9676 + if (!__access_ok(VERIFY_READ, src, size))
9677 + return size;
9678 +#endif
9679 +
9680 return __copy_user_nocache(dst, src, size, 0);
9681 }
9682
9683 -unsigned long
9684 +extern unsigned long
9685 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9686
9687 #endif /* _ASM_X86_UACCESS_64_H */
9688 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess.h linux-3.0.4/arch/x86/include/asm/uaccess.h
9689 --- linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9690 +++ linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9691 @@ -7,12 +7,15 @@
9692 #include <linux/compiler.h>
9693 #include <linux/thread_info.h>
9694 #include <linux/string.h>
9695 +#include <linux/sched.h>
9696 #include <asm/asm.h>
9697 #include <asm/page.h>
9698
9699 #define VERIFY_READ 0
9700 #define VERIFY_WRITE 1
9701
9702 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9703 +
9704 /*
9705 * The fs value determines whether argument validity checking should be
9706 * performed or not. If get_fs() == USER_DS, checking is performed, with
9707 @@ -28,7 +31,12 @@
9708
9709 #define get_ds() (KERNEL_DS)
9710 #define get_fs() (current_thread_info()->addr_limit)
9711 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9712 +void __set_fs(mm_segment_t x);
9713 +void set_fs(mm_segment_t x);
9714 +#else
9715 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9716 +#endif
9717
9718 #define segment_eq(a, b) ((a).seg == (b).seg)
9719
9720 @@ -76,7 +84,33 @@
9721 * checks that the pointer is in the user space range - after calling
9722 * this function, memory access functions may still return -EFAULT.
9723 */
9724 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9725 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9726 +#define access_ok(type, addr, size) \
9727 +({ \
9728 + long __size = size; \
9729 + unsigned long __addr = (unsigned long)addr; \
9730 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9731 + unsigned long __end_ao = __addr + __size - 1; \
9732 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9733 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9734 + while(__addr_ao <= __end_ao) { \
9735 + char __c_ao; \
9736 + __addr_ao += PAGE_SIZE; \
9737 + if (__size > PAGE_SIZE) \
9738 + cond_resched(); \
9739 + if (__get_user(__c_ao, (char __user *)__addr)) \
9740 + break; \
9741 + if (type != VERIFY_WRITE) { \
9742 + __addr = __addr_ao; \
9743 + continue; \
9744 + } \
9745 + if (__put_user(__c_ao, (char __user *)__addr)) \
9746 + break; \
9747 + __addr = __addr_ao; \
9748 + } \
9749 + } \
9750 + __ret_ao; \
9751 +})
9752
9753 /*
9754 * The exception table consists of pairs of addresses: the first is the
9755 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9756 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9757 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9758
9759 -
9760 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9761 +#define __copyuser_seg "gs;"
9762 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9763 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9764 +#else
9765 +#define __copyuser_seg
9766 +#define __COPYUSER_SET_ES
9767 +#define __COPYUSER_RESTORE_ES
9768 +#endif
9769
9770 #ifdef CONFIG_X86_32
9771 #define __put_user_asm_u64(x, addr, err, errret) \
9772 - asm volatile("1: movl %%eax,0(%2)\n" \
9773 - "2: movl %%edx,4(%2)\n" \
9774 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9775 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9776 "3:\n" \
9777 ".section .fixup,\"ax\"\n" \
9778 "4: movl %3,%0\n" \
9779 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9780 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9781
9782 #define __put_user_asm_ex_u64(x, addr) \
9783 - asm volatile("1: movl %%eax,0(%1)\n" \
9784 - "2: movl %%edx,4(%1)\n" \
9785 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9786 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9787 "3:\n" \
9788 _ASM_EXTABLE(1b, 2b - 1b) \
9789 _ASM_EXTABLE(2b, 3b - 2b) \
9790 @@ -373,7 +415,7 @@ do { \
9791 } while (0)
9792
9793 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9794 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9795 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9796 "2:\n" \
9797 ".section .fixup,\"ax\"\n" \
9798 "3: mov %3,%0\n" \
9799 @@ -381,7 +423,7 @@ do { \
9800 " jmp 2b\n" \
9801 ".previous\n" \
9802 _ASM_EXTABLE(1b, 3b) \
9803 - : "=r" (err), ltype(x) \
9804 + : "=r" (err), ltype (x) \
9805 : "m" (__m(addr)), "i" (errret), "0" (err))
9806
9807 #define __get_user_size_ex(x, ptr, size) \
9808 @@ -406,7 +448,7 @@ do { \
9809 } while (0)
9810
9811 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9812 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9813 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9814 "2:\n" \
9815 _ASM_EXTABLE(1b, 2b - 1b) \
9816 : ltype(x) : "m" (__m(addr)))
9817 @@ -423,13 +465,24 @@ do { \
9818 int __gu_err; \
9819 unsigned long __gu_val; \
9820 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9821 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9822 + (x) = (__typeof__(*(ptr)))__gu_val; \
9823 __gu_err; \
9824 })
9825
9826 /* FIXME: this hack is definitely wrong -AK */
9827 struct __large_struct { unsigned long buf[100]; };
9828 -#define __m(x) (*(struct __large_struct __user *)(x))
9829 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9830 +#define ____m(x) \
9831 +({ \
9832 + unsigned long ____x = (unsigned long)(x); \
9833 + if (____x < PAX_USER_SHADOW_BASE) \
9834 + ____x += PAX_USER_SHADOW_BASE; \
9835 + (void __user *)____x; \
9836 +})
9837 +#else
9838 +#define ____m(x) (x)
9839 +#endif
9840 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9841
9842 /*
9843 * Tell gcc we read from memory instead of writing: this is because
9844 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9845 * aliasing issues.
9846 */
9847 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9848 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9849 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9850 "2:\n" \
9851 ".section .fixup,\"ax\"\n" \
9852 "3: mov %3,%0\n" \
9853 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9854 ".previous\n" \
9855 _ASM_EXTABLE(1b, 3b) \
9856 : "=r"(err) \
9857 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9858 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9859
9860 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9861 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9862 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9863 "2:\n" \
9864 _ASM_EXTABLE(1b, 2b - 1b) \
9865 : : ltype(x), "m" (__m(addr)))
9866 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9867 * On error, the variable @x is set to zero.
9868 */
9869
9870 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9871 +#define __get_user(x, ptr) get_user((x), (ptr))
9872 +#else
9873 #define __get_user(x, ptr) \
9874 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9875 +#endif
9876
9877 /**
9878 * __put_user: - Write a simple value into user space, with less checking.
9879 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9880 * Returns zero on success, or -EFAULT on error.
9881 */
9882
9883 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9884 +#define __put_user(x, ptr) put_user((x), (ptr))
9885 +#else
9886 #define __put_user(x, ptr) \
9887 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9888 +#endif
9889
9890 #define __get_user_unaligned __get_user
9891 #define __put_user_unaligned __put_user
9892 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9893 #define get_user_ex(x, ptr) do { \
9894 unsigned long __gue_val; \
9895 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9896 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9897 + (x) = (__typeof__(*(ptr)))__gue_val; \
9898 } while (0)
9899
9900 #ifdef CONFIG_X86_WP_WORKS_OK
9901 diff -urNp linux-3.0.4/arch/x86/include/asm/vgtod.h linux-3.0.4/arch/x86/include/asm/vgtod.h
9902 --- linux-3.0.4/arch/x86/include/asm/vgtod.h 2011-07-21 22:17:23.000000000 -0400
9903 +++ linux-3.0.4/arch/x86/include/asm/vgtod.h 2011-08-23 21:47:55.000000000 -0400
9904 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9905 int sysctl_enabled;
9906 struct timezone sys_tz;
9907 struct { /* extract of a clocksource struct */
9908 + char name[8];
9909 cycle_t (*vread)(void);
9910 cycle_t cycle_last;
9911 cycle_t mask;
9912 diff -urNp linux-3.0.4/arch/x86/include/asm/x86_init.h linux-3.0.4/arch/x86/include/asm/x86_init.h
9913 --- linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9914 +++ linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9915 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9916 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9917 void (*find_smp_config)(void);
9918 void (*get_smp_config)(unsigned int early);
9919 -};
9920 +} __no_const;
9921
9922 /**
9923 * struct x86_init_resources - platform specific resource related ops
9924 @@ -42,7 +42,7 @@ struct x86_init_resources {
9925 void (*probe_roms)(void);
9926 void (*reserve_resources)(void);
9927 char *(*memory_setup)(void);
9928 -};
9929 +} __no_const;
9930
9931 /**
9932 * struct x86_init_irqs - platform specific interrupt setup
9933 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9934 void (*pre_vector_init)(void);
9935 void (*intr_init)(void);
9936 void (*trap_init)(void);
9937 -};
9938 +} __no_const;
9939
9940 /**
9941 * struct x86_init_oem - oem platform specific customizing functions
9942 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9943 struct x86_init_oem {
9944 void (*arch_setup)(void);
9945 void (*banner)(void);
9946 -};
9947 +} __no_const;
9948
9949 /**
9950 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9951 @@ -76,7 +76,7 @@ struct x86_init_oem {
9952 */
9953 struct x86_init_mapping {
9954 void (*pagetable_reserve)(u64 start, u64 end);
9955 -};
9956 +} __no_const;
9957
9958 /**
9959 * struct x86_init_paging - platform specific paging functions
9960 @@ -86,7 +86,7 @@ struct x86_init_mapping {
9961 struct x86_init_paging {
9962 void (*pagetable_setup_start)(pgd_t *base);
9963 void (*pagetable_setup_done)(pgd_t *base);
9964 -};
9965 +} __no_const;
9966
9967 /**
9968 * struct x86_init_timers - platform specific timer setup
9969 @@ -101,7 +101,7 @@ struct x86_init_timers {
9970 void (*tsc_pre_init)(void);
9971 void (*timer_init)(void);
9972 void (*wallclock_init)(void);
9973 -};
9974 +} __no_const;
9975
9976 /**
9977 * struct x86_init_iommu - platform specific iommu setup
9978 @@ -109,7 +109,7 @@ struct x86_init_timers {
9979 */
9980 struct x86_init_iommu {
9981 int (*iommu_init)(void);
9982 -};
9983 +} __no_const;
9984
9985 /**
9986 * struct x86_init_pci - platform specific pci init functions
9987 @@ -123,7 +123,7 @@ struct x86_init_pci {
9988 int (*init)(void);
9989 void (*init_irq)(void);
9990 void (*fixup_irqs)(void);
9991 -};
9992 +} __no_const;
9993
9994 /**
9995 * struct x86_init_ops - functions for platform specific setup
9996 @@ -139,7 +139,7 @@ struct x86_init_ops {
9997 struct x86_init_timers timers;
9998 struct x86_init_iommu iommu;
9999 struct x86_init_pci pci;
10000 -};
10001 +} __no_const;
10002
10003 /**
10004 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10005 @@ -147,7 +147,7 @@ struct x86_init_ops {
10006 */
10007 struct x86_cpuinit_ops {
10008 void (*setup_percpu_clockev)(void);
10009 -};
10010 +} __no_const;
10011
10012 /**
10013 * struct x86_platform_ops - platform specific runtime functions
10014 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10015 bool (*is_untracked_pat_range)(u64 start, u64 end);
10016 void (*nmi_init)(void);
10017 int (*i8042_detect)(void);
10018 -};
10019 +} __no_const;
10020
10021 struct pci_dev;
10022
10023 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10024 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10025 void (*teardown_msi_irq)(unsigned int irq);
10026 void (*teardown_msi_irqs)(struct pci_dev *dev);
10027 -};
10028 +} __no_const;
10029
10030 extern struct x86_init_ops x86_init;
10031 extern struct x86_cpuinit_ops x86_cpuinit;
10032 diff -urNp linux-3.0.4/arch/x86/include/asm/xsave.h linux-3.0.4/arch/x86/include/asm/xsave.h
10033 --- linux-3.0.4/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10034 +++ linux-3.0.4/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10035 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10036 {
10037 int err;
10038
10039 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10040 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10041 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10042 +#endif
10043 +
10044 /*
10045 * Clear the xsave header first, so that reserved fields are
10046 * initialized to zero.
10047 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10048 u32 lmask = mask;
10049 u32 hmask = mask >> 32;
10050
10051 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10052 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10053 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10054 +#endif
10055 +
10056 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10057 "2:\n"
10058 ".section .fixup,\"ax\"\n"
10059 diff -urNp linux-3.0.4/arch/x86/Kconfig linux-3.0.4/arch/x86/Kconfig
10060 --- linux-3.0.4/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10061 +++ linux-3.0.4/arch/x86/Kconfig 2011-08-23 21:48:14.000000000 -0400
10062 @@ -229,7 +229,7 @@ config X86_HT
10063
10064 config X86_32_LAZY_GS
10065 def_bool y
10066 - depends on X86_32 && !CC_STACKPROTECTOR
10067 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10068
10069 config ARCH_HWEIGHT_CFLAGS
10070 string
10071 @@ -1018,7 +1018,7 @@ choice
10072
10073 config NOHIGHMEM
10074 bool "off"
10075 - depends on !X86_NUMAQ
10076 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10077 ---help---
10078 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10079 However, the address space of 32-bit x86 processors is only 4
10080 @@ -1055,7 +1055,7 @@ config NOHIGHMEM
10081
10082 config HIGHMEM4G
10083 bool "4GB"
10084 - depends on !X86_NUMAQ
10085 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10086 ---help---
10087 Select this if you have a 32-bit processor and between 1 and 4
10088 gigabytes of physical RAM.
10089 @@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10090 hex
10091 default 0xB0000000 if VMSPLIT_3G_OPT
10092 default 0x80000000 if VMSPLIT_2G
10093 - default 0x78000000 if VMSPLIT_2G_OPT
10094 + default 0x70000000 if VMSPLIT_2G_OPT
10095 default 0x40000000 if VMSPLIT_1G
10096 default 0xC0000000
10097 depends on X86_32
10098 @@ -1453,7 +1453,7 @@ config ARCH_USES_PG_UNCACHED
10099
10100 config EFI
10101 bool "EFI runtime service support"
10102 - depends on ACPI
10103 + depends on ACPI && !PAX_KERNEXEC
10104 ---help---
10105 This enables the kernel to use EFI runtime services that are
10106 available (such as the EFI variable services).
10107 @@ -1483,6 +1483,7 @@ config SECCOMP
10108
10109 config CC_STACKPROTECTOR
10110 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10111 + depends on X86_64 || !PAX_MEMORY_UDEREF
10112 ---help---
10113 This option turns on the -fstack-protector GCC feature. This
10114 feature puts, at the beginning of functions, a canary value on
10115 @@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10116 config PHYSICAL_START
10117 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10118 default "0x1000000"
10119 + range 0x400000 0x40000000
10120 ---help---
10121 This gives the physical address where the kernel is loaded.
10122
10123 @@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10124 config PHYSICAL_ALIGN
10125 hex "Alignment value to which kernel should be aligned" if X86_32
10126 default "0x1000000"
10127 + range 0x400000 0x1000000 if PAX_KERNEXEC
10128 range 0x2000 0x1000000
10129 ---help---
10130 This value puts the alignment restrictions on physical address
10131 @@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10132 Say N if you want to disable CPU hotplug.
10133
10134 config COMPAT_VDSO
10135 - def_bool y
10136 + def_bool n
10137 prompt "Compat VDSO support"
10138 depends on X86_32 || IA32_EMULATION
10139 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10140 ---help---
10141 Map the 32-bit VDSO to the predictable old-style address too.
10142
10143 diff -urNp linux-3.0.4/arch/x86/Kconfig.cpu linux-3.0.4/arch/x86/Kconfig.cpu
10144 --- linux-3.0.4/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10145 +++ linux-3.0.4/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10146 @@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10147
10148 config X86_F00F_BUG
10149 def_bool y
10150 - depends on M586MMX || M586TSC || M586 || M486 || M386
10151 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10152
10153 config X86_INVD_BUG
10154 def_bool y
10155 @@ -362,7 +362,7 @@ config X86_POPAD_OK
10156
10157 config X86_ALIGNMENT_16
10158 def_bool y
10159 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10160 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10161
10162 config X86_INTEL_USERCOPY
10163 def_bool y
10164 @@ -408,7 +408,7 @@ config X86_CMPXCHG64
10165 # generates cmov.
10166 config X86_CMOV
10167 def_bool y
10168 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10169 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10170
10171 config X86_MINIMUM_CPU_FAMILY
10172 int
10173 diff -urNp linux-3.0.4/arch/x86/Kconfig.debug linux-3.0.4/arch/x86/Kconfig.debug
10174 --- linux-3.0.4/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10175 +++ linux-3.0.4/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10176 @@ -81,7 +81,7 @@ config X86_PTDUMP
10177 config DEBUG_RODATA
10178 bool "Write protect kernel read-only data structures"
10179 default y
10180 - depends on DEBUG_KERNEL
10181 + depends on DEBUG_KERNEL && BROKEN
10182 ---help---
10183 Mark the kernel read-only data as write-protected in the pagetables,
10184 in order to catch accidental (and incorrect) writes to such const
10185 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10186
10187 config DEBUG_SET_MODULE_RONX
10188 bool "Set loadable kernel module data as NX and text as RO"
10189 - depends on MODULES
10190 + depends on MODULES && BROKEN
10191 ---help---
10192 This option helps catch unintended modifications to loadable
10193 kernel module's text and read-only data. It also prevents execution
10194 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile
10195 --- linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10196 +++ linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10197 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10198 $(call cc-option, -fno-stack-protector) \
10199 $(call cc-option, -mpreferred-stack-boundary=2)
10200 KBUILD_CFLAGS += $(call cc-option, -m32)
10201 +ifdef CONSTIFY_PLUGIN
10202 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10203 +endif
10204 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10205 GCOV_PROFILE := n
10206
10207 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S
10208 --- linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10209 +++ linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10210 @@ -108,6 +108,9 @@ wakeup_code:
10211 /* Do any other stuff... */
10212
10213 #ifndef CONFIG_64BIT
10214 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10215 + call verify_cpu
10216 +
10217 /* This could also be done in C code... */
10218 movl pmode_cr3, %eax
10219 movl %eax, %cr3
10220 @@ -131,6 +134,7 @@ wakeup_code:
10221 movl pmode_cr0, %eax
10222 movl %eax, %cr0
10223 jmp pmode_return
10224 +# include "../../verify_cpu.S"
10225 #else
10226 pushw $0
10227 pushw trampoline_segment
10228 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/sleep.c linux-3.0.4/arch/x86/kernel/acpi/sleep.c
10229 --- linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10230 +++ linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10231 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10232 header->trampoline_segment = trampoline_address() >> 4;
10233 #ifdef CONFIG_SMP
10234 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10235 +
10236 + pax_open_kernel();
10237 early_gdt_descr.address =
10238 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10239 + pax_close_kernel();
10240 +
10241 initial_gs = per_cpu_offset(smp_processor_id());
10242 #endif
10243 initial_code = (unsigned long)wakeup_long64;
10244 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S
10245 --- linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10246 +++ linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10247 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10248 # and restore the stack ... but you need gdt for this to work
10249 movl saved_context_esp, %esp
10250
10251 - movl %cs:saved_magic, %eax
10252 - cmpl $0x12345678, %eax
10253 + cmpl $0x12345678, saved_magic
10254 jne bogus_magic
10255
10256 # jump to place where we left off
10257 - movl saved_eip, %eax
10258 - jmp *%eax
10259 + jmp *(saved_eip)
10260
10261 bogus_magic:
10262 jmp bogus_magic
10263 diff -urNp linux-3.0.4/arch/x86/kernel/alternative.c linux-3.0.4/arch/x86/kernel/alternative.c
10264 --- linux-3.0.4/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10265 +++ linux-3.0.4/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10266 @@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10267 if (!*poff || ptr < text || ptr >= text_end)
10268 continue;
10269 /* turn DS segment override prefix into lock prefix */
10270 - if (*ptr == 0x3e)
10271 + if (*ktla_ktva(ptr) == 0x3e)
10272 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10273 };
10274 mutex_unlock(&text_mutex);
10275 @@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10276 if (!*poff || ptr < text || ptr >= text_end)
10277 continue;
10278 /* turn lock prefix into DS segment override prefix */
10279 - if (*ptr == 0xf0)
10280 + if (*ktla_ktva(ptr) == 0xf0)
10281 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10282 };
10283 mutex_unlock(&text_mutex);
10284 @@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10285
10286 BUG_ON(p->len > MAX_PATCH_LEN);
10287 /* prep the buffer with the original instructions */
10288 - memcpy(insnbuf, p->instr, p->len);
10289 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10290 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10291 (unsigned long)p->instr, p->len);
10292
10293 @@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10294 if (smp_alt_once)
10295 free_init_pages("SMP alternatives",
10296 (unsigned long)__smp_locks,
10297 - (unsigned long)__smp_locks_end);
10298 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10299
10300 restart_nmi();
10301 }
10302 @@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10303 * instructions. And on the local CPU you need to be protected again NMI or MCE
10304 * handlers seeing an inconsistent instruction while you patch.
10305 */
10306 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10307 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10308 size_t len)
10309 {
10310 unsigned long flags;
10311 local_irq_save(flags);
10312 - memcpy(addr, opcode, len);
10313 +
10314 + pax_open_kernel();
10315 + memcpy(ktla_ktva(addr), opcode, len);
10316 sync_core();
10317 + pax_close_kernel();
10318 +
10319 local_irq_restore(flags);
10320 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10321 that causes hangs on some VIA CPUs. */
10322 @@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10323 */
10324 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10325 {
10326 - unsigned long flags;
10327 - char *vaddr;
10328 + unsigned char *vaddr = ktla_ktva(addr);
10329 struct page *pages[2];
10330 - int i;
10331 + size_t i;
10332
10333 if (!core_kernel_text((unsigned long)addr)) {
10334 - pages[0] = vmalloc_to_page(addr);
10335 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10336 + pages[0] = vmalloc_to_page(vaddr);
10337 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10338 } else {
10339 - pages[0] = virt_to_page(addr);
10340 + pages[0] = virt_to_page(vaddr);
10341 WARN_ON(!PageReserved(pages[0]));
10342 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10343 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10344 }
10345 BUG_ON(!pages[0]);
10346 - local_irq_save(flags);
10347 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10348 - if (pages[1])
10349 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10350 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10351 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10352 - clear_fixmap(FIX_TEXT_POKE0);
10353 - if (pages[1])
10354 - clear_fixmap(FIX_TEXT_POKE1);
10355 - local_flush_tlb();
10356 - sync_core();
10357 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10358 - that causes hangs on some VIA CPUs. */
10359 + text_poke_early(addr, opcode, len);
10360 for (i = 0; i < len; i++)
10361 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10362 - local_irq_restore(flags);
10363 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10364 return addr;
10365 }
10366
10367 diff -urNp linux-3.0.4/arch/x86/kernel/apic/apic.c linux-3.0.4/arch/x86/kernel/apic/apic.c
10368 --- linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10369 +++ linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10370 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10371 /*
10372 * Debug level, exported for io_apic.c
10373 */
10374 -unsigned int apic_verbosity;
10375 +int apic_verbosity;
10376
10377 int pic_mode;
10378
10379 @@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10380 apic_write(APIC_ESR, 0);
10381 v1 = apic_read(APIC_ESR);
10382 ack_APIC_irq();
10383 - atomic_inc(&irq_err_count);
10384 + atomic_inc_unchecked(&irq_err_count);
10385
10386 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10387 smp_processor_id(), v0 , v1);
10388 @@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10389 u16 *bios_cpu_apicid;
10390 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10391
10392 + pax_track_stack();
10393 +
10394 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10395 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10396
10397 diff -urNp linux-3.0.4/arch/x86/kernel/apic/io_apic.c linux-3.0.4/arch/x86/kernel/apic/io_apic.c
10398 --- linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10399 +++ linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10400 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10401 }
10402 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10403
10404 -void lock_vector_lock(void)
10405 +void lock_vector_lock(void) __acquires(vector_lock)
10406 {
10407 /* Used to the online set of cpus does not change
10408 * during assign_irq_vector.
10409 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10410 raw_spin_lock(&vector_lock);
10411 }
10412
10413 -void unlock_vector_lock(void)
10414 +void unlock_vector_lock(void) __releases(vector_lock)
10415 {
10416 raw_spin_unlock(&vector_lock);
10417 }
10418 @@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10419 ack_APIC_irq();
10420 }
10421
10422 -atomic_t irq_mis_count;
10423 +atomic_unchecked_t irq_mis_count;
10424
10425 /*
10426 * IO-APIC versions below 0x20 don't support EOI register.
10427 @@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10428 * at the cpu.
10429 */
10430 if (!(v & (1 << (i & 0x1f)))) {
10431 - atomic_inc(&irq_mis_count);
10432 + atomic_inc_unchecked(&irq_mis_count);
10433
10434 eoi_ioapic_irq(irq, cfg);
10435 }
10436 diff -urNp linux-3.0.4/arch/x86/kernel/apm_32.c linux-3.0.4/arch/x86/kernel/apm_32.c
10437 --- linux-3.0.4/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10438 +++ linux-3.0.4/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10439 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10440 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10441 * even though they are called in protected mode.
10442 */
10443 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10444 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10445 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10446
10447 static const char driver_version[] = "1.16ac"; /* no spaces */
10448 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10449 BUG_ON(cpu != 0);
10450 gdt = get_cpu_gdt_table(cpu);
10451 save_desc_40 = gdt[0x40 / 8];
10452 +
10453 + pax_open_kernel();
10454 gdt[0x40 / 8] = bad_bios_desc;
10455 + pax_close_kernel();
10456
10457 apm_irq_save(flags);
10458 APM_DO_SAVE_SEGS;
10459 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10460 &call->esi);
10461 APM_DO_RESTORE_SEGS;
10462 apm_irq_restore(flags);
10463 +
10464 + pax_open_kernel();
10465 gdt[0x40 / 8] = save_desc_40;
10466 + pax_close_kernel();
10467 +
10468 put_cpu();
10469
10470 return call->eax & 0xff;
10471 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10472 BUG_ON(cpu != 0);
10473 gdt = get_cpu_gdt_table(cpu);
10474 save_desc_40 = gdt[0x40 / 8];
10475 +
10476 + pax_open_kernel();
10477 gdt[0x40 / 8] = bad_bios_desc;
10478 + pax_close_kernel();
10479
10480 apm_irq_save(flags);
10481 APM_DO_SAVE_SEGS;
10482 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10483 &call->eax);
10484 APM_DO_RESTORE_SEGS;
10485 apm_irq_restore(flags);
10486 +
10487 + pax_open_kernel();
10488 gdt[0x40 / 8] = save_desc_40;
10489 + pax_close_kernel();
10490 +
10491 put_cpu();
10492 return error;
10493 }
10494 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10495 * code to that CPU.
10496 */
10497 gdt = get_cpu_gdt_table(0);
10498 +
10499 + pax_open_kernel();
10500 set_desc_base(&gdt[APM_CS >> 3],
10501 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10502 set_desc_base(&gdt[APM_CS_16 >> 3],
10503 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10504 set_desc_base(&gdt[APM_DS >> 3],
10505 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10506 + pax_close_kernel();
10507
10508 proc_create("apm", 0, NULL, &apm_file_ops);
10509
10510 diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets_64.c linux-3.0.4/arch/x86/kernel/asm-offsets_64.c
10511 --- linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10512 +++ linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10513 @@ -69,6 +69,7 @@ int main(void)
10514 BLANK();
10515 #undef ENTRY
10516
10517 + DEFINE(TSS_size, sizeof(struct tss_struct));
10518 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10519 BLANK();
10520
10521 diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets.c linux-3.0.4/arch/x86/kernel/asm-offsets.c
10522 --- linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10523 +++ linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10524 @@ -33,6 +33,8 @@ void common(void) {
10525 OFFSET(TI_status, thread_info, status);
10526 OFFSET(TI_addr_limit, thread_info, addr_limit);
10527 OFFSET(TI_preempt_count, thread_info, preempt_count);
10528 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10529 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10530
10531 BLANK();
10532 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10533 @@ -53,8 +55,26 @@ void common(void) {
10534 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10535 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10536 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10537 +
10538 +#ifdef CONFIG_PAX_KERNEXEC
10539 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10540 +#endif
10541 +
10542 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10543 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10544 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10545 +#ifdef CONFIG_X86_64
10546 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10547 +#endif
10548 #endif
10549
10550 +#endif
10551 +
10552 + BLANK();
10553 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10554 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10555 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10556 +
10557 #ifdef CONFIG_XEN
10558 BLANK();
10559 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10560 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/amd.c linux-3.0.4/arch/x86/kernel/cpu/amd.c
10561 --- linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10562 +++ linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10563 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10564 unsigned int size)
10565 {
10566 /* AMD errata T13 (order #21922) */
10567 - if ((c->x86 == 6)) {
10568 + if (c->x86 == 6) {
10569 /* Duron Rev A0 */
10570 if (c->x86_model == 3 && c->x86_mask == 0)
10571 size = 64;
10572 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/common.c linux-3.0.4/arch/x86/kernel/cpu/common.c
10573 --- linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10574 +++ linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10575 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10576
10577 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10578
10579 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10580 -#ifdef CONFIG_X86_64
10581 - /*
10582 - * We need valid kernel segments for data and code in long mode too
10583 - * IRET will check the segment types kkeil 2000/10/28
10584 - * Also sysret mandates a special GDT layout
10585 - *
10586 - * TLS descriptors are currently at a different place compared to i386.
10587 - * Hopefully nobody expects them at a fixed place (Wine?)
10588 - */
10589 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10590 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10591 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10592 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10593 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10594 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10595 -#else
10596 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10597 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10598 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10599 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10600 - /*
10601 - * Segments used for calling PnP BIOS have byte granularity.
10602 - * They code segments and data segments have fixed 64k limits,
10603 - * the transfer segment sizes are set at run time.
10604 - */
10605 - /* 32-bit code */
10606 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10607 - /* 16-bit code */
10608 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10609 - /* 16-bit data */
10610 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10611 - /* 16-bit data */
10612 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10613 - /* 16-bit data */
10614 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10615 - /*
10616 - * The APM segments have byte granularity and their bases
10617 - * are set at run time. All have 64k limits.
10618 - */
10619 - /* 32-bit code */
10620 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10621 - /* 16-bit code */
10622 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10623 - /* data */
10624 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10625 -
10626 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10627 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10628 - GDT_STACK_CANARY_INIT
10629 -#endif
10630 -} };
10631 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10632 -
10633 static int __init x86_xsave_setup(char *s)
10634 {
10635 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10636 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10637 {
10638 struct desc_ptr gdt_descr;
10639
10640 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10641 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10642 gdt_descr.size = GDT_SIZE - 1;
10643 load_gdt(&gdt_descr);
10644 /* Reload the per-cpu base */
10645 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10646 /* Filter out anything that depends on CPUID levels we don't have */
10647 filter_cpuid_features(c, true);
10648
10649 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10650 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10651 +#endif
10652 +
10653 /* If the model name is still unset, do table lookup. */
10654 if (!c->x86_model_id[0]) {
10655 const char *p;
10656 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10657 }
10658 __setup("clearcpuid=", setup_disablecpuid);
10659
10660 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10661 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10662 +
10663 #ifdef CONFIG_X86_64
10664 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10665
10666 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10667 EXPORT_PER_CPU_SYMBOL(current_task);
10668
10669 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10670 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10671 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10672 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10673
10674 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10675 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10676 {
10677 memset(regs, 0, sizeof(struct pt_regs));
10678 regs->fs = __KERNEL_PERCPU;
10679 - regs->gs = __KERNEL_STACK_CANARY;
10680 + savesegment(gs, regs->gs);
10681
10682 return regs;
10683 }
10684 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10685 int i;
10686
10687 cpu = stack_smp_processor_id();
10688 - t = &per_cpu(init_tss, cpu);
10689 + t = init_tss + cpu;
10690 oist = &per_cpu(orig_ist, cpu);
10691
10692 #ifdef CONFIG_NUMA
10693 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10694 switch_to_new_gdt(cpu);
10695 loadsegment(fs, 0);
10696
10697 - load_idt((const struct desc_ptr *)&idt_descr);
10698 + load_idt(&idt_descr);
10699
10700 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10701 syscall_init();
10702 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10703 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10704 barrier();
10705
10706 - x86_configure_nx();
10707 if (cpu != 0)
10708 enable_x2apic();
10709
10710 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10711 {
10712 int cpu = smp_processor_id();
10713 struct task_struct *curr = current;
10714 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10715 + struct tss_struct *t = init_tss + cpu;
10716 struct thread_struct *thread = &curr->thread;
10717
10718 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10719 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/intel.c linux-3.0.4/arch/x86/kernel/cpu/intel.c
10720 --- linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:26:13.000000000 -0400
10721 +++ linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10722 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10723 * Update the IDT descriptor and reload the IDT so that
10724 * it uses the read-only mapped virtual address.
10725 */
10726 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10727 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10728 load_idt(&idt_descr);
10729 }
10730 #endif
10731 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/Makefile linux-3.0.4/arch/x86/kernel/cpu/Makefile
10732 --- linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10733 +++ linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10734 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10735 CFLAGS_REMOVE_perf_event.o = -pg
10736 endif
10737
10738 -# Make sure load_percpu_segment has no stackprotector
10739 -nostackp := $(call cc-option, -fno-stack-protector)
10740 -CFLAGS_common.o := $(nostackp)
10741 -
10742 obj-y := intel_cacheinfo.o scattered.o topology.o
10743 obj-y += proc.o capflags.o powerflags.o common.o
10744 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10745 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c
10746 --- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10747 +++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10748 @@ -46,6 +46,7 @@
10749 #include <asm/ipi.h>
10750 #include <asm/mce.h>
10751 #include <asm/msr.h>
10752 +#include <asm/local.h>
10753
10754 #include "mce-internal.h"
10755
10756 @@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10757 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10758 m->cs, m->ip);
10759
10760 - if (m->cs == __KERNEL_CS)
10761 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10762 print_symbol("{%s}", m->ip);
10763 pr_cont("\n");
10764 }
10765 @@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10766
10767 #define PANIC_TIMEOUT 5 /* 5 seconds */
10768
10769 -static atomic_t mce_paniced;
10770 +static atomic_unchecked_t mce_paniced;
10771
10772 static int fake_panic;
10773 -static atomic_t mce_fake_paniced;
10774 +static atomic_unchecked_t mce_fake_paniced;
10775
10776 /* Panic in progress. Enable interrupts and wait for final IPI */
10777 static void wait_for_panic(void)
10778 @@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10779 /*
10780 * Make sure only one CPU runs in machine check panic
10781 */
10782 - if (atomic_inc_return(&mce_paniced) > 1)
10783 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10784 wait_for_panic();
10785 barrier();
10786
10787 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10788 console_verbose();
10789 } else {
10790 /* Don't log too much for fake panic */
10791 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10792 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10793 return;
10794 }
10795 /* First print corrected ones that are still unlogged */
10796 @@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10797 * might have been modified by someone else.
10798 */
10799 rmb();
10800 - if (atomic_read(&mce_paniced))
10801 + if (atomic_read_unchecked(&mce_paniced))
10802 wait_for_panic();
10803 if (!monarch_timeout)
10804 goto out;
10805 @@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10806 */
10807
10808 static DEFINE_SPINLOCK(mce_state_lock);
10809 -static int open_count; /* #times opened */
10810 +static local_t open_count; /* #times opened */
10811 static int open_exclu; /* already open exclusive? */
10812
10813 static int mce_open(struct inode *inode, struct file *file)
10814 {
10815 spin_lock(&mce_state_lock);
10816
10817 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10818 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10819 spin_unlock(&mce_state_lock);
10820
10821 return -EBUSY;
10822 @@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10823
10824 if (file->f_flags & O_EXCL)
10825 open_exclu = 1;
10826 - open_count++;
10827 + local_inc(&open_count);
10828
10829 spin_unlock(&mce_state_lock);
10830
10831 @@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10832 {
10833 spin_lock(&mce_state_lock);
10834
10835 - open_count--;
10836 + local_dec(&open_count);
10837 open_exclu = 0;
10838
10839 spin_unlock(&mce_state_lock);
10840 @@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10841 static void mce_reset(void)
10842 {
10843 cpu_missing = 0;
10844 - atomic_set(&mce_fake_paniced, 0);
10845 + atomic_set_unchecked(&mce_fake_paniced, 0);
10846 atomic_set(&mce_executing, 0);
10847 atomic_set(&mce_callin, 0);
10848 atomic_set(&global_nwo, 0);
10849 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10850 --- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10851 +++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10852 @@ -215,7 +215,9 @@ static int inject_init(void)
10853 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10854 return -ENOMEM;
10855 printk(KERN_INFO "Machine check injector initialized\n");
10856 - mce_chrdev_ops.write = mce_write;
10857 + pax_open_kernel();
10858 + *(void **)&mce_chrdev_ops.write = mce_write;
10859 + pax_close_kernel();
10860 register_die_notifier(&mce_raise_nb);
10861 return 0;
10862 }
10863 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c
10864 --- linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:13.000000000 -0400
10865 +++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
10866 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10867 u64 size_or_mask, size_and_mask;
10868 static bool mtrr_aps_delayed_init;
10869
10870 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10871 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10872
10873 const struct mtrr_ops *mtrr_if;
10874
10875 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10876 --- linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10877 +++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
10878 @@ -25,7 +25,7 @@ struct mtrr_ops {
10879 int (*validate_add_page)(unsigned long base, unsigned long size,
10880 unsigned int type);
10881 int (*have_wrcomb)(void);
10882 -};
10883 +} __do_const;
10884
10885 extern int generic_get_free_region(unsigned long base, unsigned long size,
10886 int replace_reg);
10887 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/perf_event.c linux-3.0.4/arch/x86/kernel/cpu/perf_event.c
10888 --- linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10889 +++ linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10890 @@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10891 int i, j, w, wmax, num = 0;
10892 struct hw_perf_event *hwc;
10893
10894 + pax_track_stack();
10895 +
10896 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10897
10898 for (i = 0; i < n; i++) {
10899 @@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10900 break;
10901
10902 perf_callchain_store(entry, frame.return_address);
10903 - fp = frame.next_frame;
10904 + fp = (__force const void __user *)frame.next_frame;
10905 }
10906 }
10907
10908 diff -urNp linux-3.0.4/arch/x86/kernel/crash.c linux-3.0.4/arch/x86/kernel/crash.c
10909 --- linux-3.0.4/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10910 +++ linux-3.0.4/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10911 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10912 regs = args->regs;
10913
10914 #ifdef CONFIG_X86_32
10915 - if (!user_mode_vm(regs)) {
10916 + if (!user_mode(regs)) {
10917 crash_fixup_ss_esp(&fixed_regs, regs);
10918 regs = &fixed_regs;
10919 }
10920 diff -urNp linux-3.0.4/arch/x86/kernel/doublefault_32.c linux-3.0.4/arch/x86/kernel/doublefault_32.c
10921 --- linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10922 +++ linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10923 @@ -11,7 +11,7 @@
10924
10925 #define DOUBLEFAULT_STACKSIZE (1024)
10926 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10927 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10928 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10929
10930 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10931
10932 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10933 unsigned long gdt, tss;
10934
10935 store_gdt(&gdt_desc);
10936 - gdt = gdt_desc.address;
10937 + gdt = (unsigned long)gdt_desc.address;
10938
10939 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10940
10941 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10942 /* 0x2 bit is always set */
10943 .flags = X86_EFLAGS_SF | 0x2,
10944 .sp = STACK_START,
10945 - .es = __USER_DS,
10946 + .es = __KERNEL_DS,
10947 .cs = __KERNEL_CS,
10948 .ss = __KERNEL_DS,
10949 - .ds = __USER_DS,
10950 + .ds = __KERNEL_DS,
10951 .fs = __KERNEL_PERCPU,
10952
10953 .__cr3 = __pa_nodebug(swapper_pg_dir),
10954 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_32.c linux-3.0.4/arch/x86/kernel/dumpstack_32.c
10955 --- linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
10956 +++ linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
10957 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
10958 bp = stack_frame(task, regs);
10959
10960 for (;;) {
10961 - struct thread_info *context;
10962 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
10963
10964 - context = (struct thread_info *)
10965 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
10966 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
10967 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
10968
10969 - stack = (unsigned long *)context->previous_esp;
10970 - if (!stack)
10971 + if (stack_start == task_stack_page(task))
10972 break;
10973 + stack = *(unsigned long **)stack_start;
10974 if (ops->stack(data, "IRQ") < 0)
10975 break;
10976 touch_nmi_watchdog();
10977 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
10978 * When in-kernel, we also print out the stack and code at the
10979 * time of the fault..
10980 */
10981 - if (!user_mode_vm(regs)) {
10982 + if (!user_mode(regs)) {
10983 unsigned int code_prologue = code_bytes * 43 / 64;
10984 unsigned int code_len = code_bytes;
10985 unsigned char c;
10986 u8 *ip;
10987 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10988
10989 printk(KERN_EMERG "Stack:\n");
10990 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
10991
10992 printk(KERN_EMERG "Code: ");
10993
10994 - ip = (u8 *)regs->ip - code_prologue;
10995 + ip = (u8 *)regs->ip - code_prologue + cs_base;
10996 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
10997 /* try starting at IP */
10998 - ip = (u8 *)regs->ip;
10999 + ip = (u8 *)regs->ip + cs_base;
11000 code_len = code_len - code_prologue + 1;
11001 }
11002 for (i = 0; i < code_len; i++, ip++) {
11003 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11004 printk(" Bad EIP value.");
11005 break;
11006 }
11007 - if (ip == (u8 *)regs->ip)
11008 + if (ip == (u8 *)regs->ip + cs_base)
11009 printk("<%02x> ", c);
11010 else
11011 printk("%02x ", c);
11012 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11013 {
11014 unsigned short ud2;
11015
11016 + ip = ktla_ktva(ip);
11017 if (ip < PAGE_OFFSET)
11018 return 0;
11019 if (probe_kernel_address((unsigned short *)ip, ud2))
11020 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_64.c linux-3.0.4/arch/x86/kernel/dumpstack_64.c
11021 --- linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11022 +++ linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11023 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11024 unsigned long *irq_stack_end =
11025 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11026 unsigned used = 0;
11027 - struct thread_info *tinfo;
11028 int graph = 0;
11029 unsigned long dummy;
11030 + void *stack_start;
11031
11032 if (!task)
11033 task = current;
11034 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11035 * current stack address. If the stacks consist of nested
11036 * exceptions
11037 */
11038 - tinfo = task_thread_info(task);
11039 for (;;) {
11040 char *id;
11041 unsigned long *estack_end;
11042 +
11043 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11044 &used, &id);
11045
11046 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11047 if (ops->stack(data, id) < 0)
11048 break;
11049
11050 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11051 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11052 data, estack_end, &graph);
11053 ops->stack(data, "<EOE>");
11054 /*
11055 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11056 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11057 if (ops->stack(data, "IRQ") < 0)
11058 break;
11059 - bp = ops->walk_stack(tinfo, stack, bp,
11060 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11061 ops, data, irq_stack_end, &graph);
11062 /*
11063 * We link to the next stack (which would be
11064 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11065 /*
11066 * This handles the process stack:
11067 */
11068 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11069 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11070 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11071 put_cpu();
11072 }
11073 EXPORT_SYMBOL(dump_trace);
11074 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack.c linux-3.0.4/arch/x86/kernel/dumpstack.c
11075 --- linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11076 +++ linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11077 @@ -2,6 +2,9 @@
11078 * Copyright (C) 1991, 1992 Linus Torvalds
11079 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11080 */
11081 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11082 +#define __INCLUDED_BY_HIDESYM 1
11083 +#endif
11084 #include <linux/kallsyms.h>
11085 #include <linux/kprobes.h>
11086 #include <linux/uaccess.h>
11087 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11088 static void
11089 print_ftrace_graph_addr(unsigned long addr, void *data,
11090 const struct stacktrace_ops *ops,
11091 - struct thread_info *tinfo, int *graph)
11092 + struct task_struct *task, int *graph)
11093 {
11094 - struct task_struct *task = tinfo->task;
11095 unsigned long ret_addr;
11096 int index = task->curr_ret_stack;
11097
11098 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11099 static inline void
11100 print_ftrace_graph_addr(unsigned long addr, void *data,
11101 const struct stacktrace_ops *ops,
11102 - struct thread_info *tinfo, int *graph)
11103 + struct task_struct *task, int *graph)
11104 { }
11105 #endif
11106
11107 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11108 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11109 */
11110
11111 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11112 - void *p, unsigned int size, void *end)
11113 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11114 {
11115 - void *t = tinfo;
11116 if (end) {
11117 if (p < end && p >= (end-THREAD_SIZE))
11118 return 1;
11119 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11120 }
11121
11122 unsigned long
11123 -print_context_stack(struct thread_info *tinfo,
11124 +print_context_stack(struct task_struct *task, void *stack_start,
11125 unsigned long *stack, unsigned long bp,
11126 const struct stacktrace_ops *ops, void *data,
11127 unsigned long *end, int *graph)
11128 {
11129 struct stack_frame *frame = (struct stack_frame *)bp;
11130
11131 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11132 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11133 unsigned long addr;
11134
11135 addr = *stack;
11136 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11137 } else {
11138 ops->address(data, addr, 0);
11139 }
11140 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11141 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11142 }
11143 stack++;
11144 }
11145 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11146 EXPORT_SYMBOL_GPL(print_context_stack);
11147
11148 unsigned long
11149 -print_context_stack_bp(struct thread_info *tinfo,
11150 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11151 unsigned long *stack, unsigned long bp,
11152 const struct stacktrace_ops *ops, void *data,
11153 unsigned long *end, int *graph)
11154 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11155 struct stack_frame *frame = (struct stack_frame *)bp;
11156 unsigned long *ret_addr = &frame->return_address;
11157
11158 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11159 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11160 unsigned long addr = *ret_addr;
11161
11162 if (!__kernel_text_address(addr))
11163 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11164 ops->address(data, addr, 1);
11165 frame = frame->next_frame;
11166 ret_addr = &frame->return_address;
11167 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11168 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11169 }
11170
11171 return (unsigned long)frame;
11172 @@ -186,7 +186,7 @@ void dump_stack(void)
11173
11174 bp = stack_frame(current, NULL);
11175 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11176 - current->pid, current->comm, print_tainted(),
11177 + task_pid_nr(current), current->comm, print_tainted(),
11178 init_utsname()->release,
11179 (int)strcspn(init_utsname()->version, " "),
11180 init_utsname()->version);
11181 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11182 }
11183 EXPORT_SYMBOL_GPL(oops_begin);
11184
11185 +extern void gr_handle_kernel_exploit(void);
11186 +
11187 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11188 {
11189 if (regs && kexec_should_crash(current))
11190 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11191 panic("Fatal exception in interrupt");
11192 if (panic_on_oops)
11193 panic("Fatal exception");
11194 - do_exit(signr);
11195 +
11196 + gr_handle_kernel_exploit();
11197 +
11198 + do_group_exit(signr);
11199 }
11200
11201 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11202 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11203
11204 show_registers(regs);
11205 #ifdef CONFIG_X86_32
11206 - if (user_mode_vm(regs)) {
11207 + if (user_mode(regs)) {
11208 sp = regs->sp;
11209 ss = regs->ss & 0xffff;
11210 } else {
11211 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11212 unsigned long flags = oops_begin();
11213 int sig = SIGSEGV;
11214
11215 - if (!user_mode_vm(regs))
11216 + if (!user_mode(regs))
11217 report_bug(regs->ip, regs);
11218
11219 if (__die(str, regs, err))
11220 diff -urNp linux-3.0.4/arch/x86/kernel/early_printk.c linux-3.0.4/arch/x86/kernel/early_printk.c
11221 --- linux-3.0.4/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11222 +++ linux-3.0.4/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11223 @@ -7,6 +7,7 @@
11224 #include <linux/pci_regs.h>
11225 #include <linux/pci_ids.h>
11226 #include <linux/errno.h>
11227 +#include <linux/sched.h>
11228 #include <asm/io.h>
11229 #include <asm/processor.h>
11230 #include <asm/fcntl.h>
11231 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11232 int n;
11233 va_list ap;
11234
11235 + pax_track_stack();
11236 +
11237 va_start(ap, fmt);
11238 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11239 early_console->write(early_console, buf, n);
11240 diff -urNp linux-3.0.4/arch/x86/kernel/entry_32.S linux-3.0.4/arch/x86/kernel/entry_32.S
11241 --- linux-3.0.4/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11242 +++ linux-3.0.4/arch/x86/kernel/entry_32.S 2011-08-23 21:48:14.000000000 -0400
11243 @@ -185,13 +185,146 @@
11244 /*CFI_REL_OFFSET gs, PT_GS*/
11245 .endm
11246 .macro SET_KERNEL_GS reg
11247 +
11248 +#ifdef CONFIG_CC_STACKPROTECTOR
11249 movl $(__KERNEL_STACK_CANARY), \reg
11250 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11251 + movl $(__USER_DS), \reg
11252 +#else
11253 + xorl \reg, \reg
11254 +#endif
11255 +
11256 movl \reg, %gs
11257 .endm
11258
11259 #endif /* CONFIG_X86_32_LAZY_GS */
11260
11261 -.macro SAVE_ALL
11262 +.macro pax_enter_kernel
11263 +#ifdef CONFIG_PAX_KERNEXEC
11264 + call pax_enter_kernel
11265 +#endif
11266 +.endm
11267 +
11268 +.macro pax_exit_kernel
11269 +#ifdef CONFIG_PAX_KERNEXEC
11270 + call pax_exit_kernel
11271 +#endif
11272 +.endm
11273 +
11274 +#ifdef CONFIG_PAX_KERNEXEC
11275 +ENTRY(pax_enter_kernel)
11276 +#ifdef CONFIG_PARAVIRT
11277 + pushl %eax
11278 + pushl %ecx
11279 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11280 + mov %eax, %esi
11281 +#else
11282 + mov %cr0, %esi
11283 +#endif
11284 + bts $16, %esi
11285 + jnc 1f
11286 + mov %cs, %esi
11287 + cmp $__KERNEL_CS, %esi
11288 + jz 3f
11289 + ljmp $__KERNEL_CS, $3f
11290 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11291 +2:
11292 +#ifdef CONFIG_PARAVIRT
11293 + mov %esi, %eax
11294 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11295 +#else
11296 + mov %esi, %cr0
11297 +#endif
11298 +3:
11299 +#ifdef CONFIG_PARAVIRT
11300 + popl %ecx
11301 + popl %eax
11302 +#endif
11303 + ret
11304 +ENDPROC(pax_enter_kernel)
11305 +
11306 +ENTRY(pax_exit_kernel)
11307 +#ifdef CONFIG_PARAVIRT
11308 + pushl %eax
11309 + pushl %ecx
11310 +#endif
11311 + mov %cs, %esi
11312 + cmp $__KERNEXEC_KERNEL_CS, %esi
11313 + jnz 2f
11314 +#ifdef CONFIG_PARAVIRT
11315 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11316 + mov %eax, %esi
11317 +#else
11318 + mov %cr0, %esi
11319 +#endif
11320 + btr $16, %esi
11321 + ljmp $__KERNEL_CS, $1f
11322 +1:
11323 +#ifdef CONFIG_PARAVIRT
11324 + mov %esi, %eax
11325 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11326 +#else
11327 + mov %esi, %cr0
11328 +#endif
11329 +2:
11330 +#ifdef CONFIG_PARAVIRT
11331 + popl %ecx
11332 + popl %eax
11333 +#endif
11334 + ret
11335 +ENDPROC(pax_exit_kernel)
11336 +#endif
11337 +
11338 +.macro pax_erase_kstack
11339 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11340 + call pax_erase_kstack
11341 +#endif
11342 +.endm
11343 +
11344 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11345 +/*
11346 + * ebp: thread_info
11347 + * ecx, edx: can be clobbered
11348 + */
11349 +ENTRY(pax_erase_kstack)
11350 + pushl %edi
11351 + pushl %eax
11352 +
11353 + mov TI_lowest_stack(%ebp), %edi
11354 + mov $-0xBEEF, %eax
11355 + std
11356 +
11357 +1: mov %edi, %ecx
11358 + and $THREAD_SIZE_asm - 1, %ecx
11359 + shr $2, %ecx
11360 + repne scasl
11361 + jecxz 2f
11362 +
11363 + cmp $2*16, %ecx
11364 + jc 2f
11365 +
11366 + mov $2*16, %ecx
11367 + repe scasl
11368 + jecxz 2f
11369 + jne 1b
11370 +
11371 +2: cld
11372 + mov %esp, %ecx
11373 + sub %edi, %ecx
11374 + shr $2, %ecx
11375 + rep stosl
11376 +
11377 + mov TI_task_thread_sp0(%ebp), %edi
11378 + sub $128, %edi
11379 + mov %edi, TI_lowest_stack(%ebp)
11380 +
11381 + popl %eax
11382 + popl %edi
11383 + ret
11384 +ENDPROC(pax_erase_kstack)
11385 +#endif
11386 +
11387 +.macro __SAVE_ALL _DS
11388 cld
11389 PUSH_GS
11390 pushl_cfi %fs
11391 @@ -214,7 +347,7 @@
11392 CFI_REL_OFFSET ecx, 0
11393 pushl_cfi %ebx
11394 CFI_REL_OFFSET ebx, 0
11395 - movl $(__USER_DS), %edx
11396 + movl $\_DS, %edx
11397 movl %edx, %ds
11398 movl %edx, %es
11399 movl $(__KERNEL_PERCPU), %edx
11400 @@ -222,6 +355,15 @@
11401 SET_KERNEL_GS %edx
11402 .endm
11403
11404 +.macro SAVE_ALL
11405 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11406 + __SAVE_ALL __KERNEL_DS
11407 + pax_enter_kernel
11408 +#else
11409 + __SAVE_ALL __USER_DS
11410 +#endif
11411 +.endm
11412 +
11413 .macro RESTORE_INT_REGS
11414 popl_cfi %ebx
11415 CFI_RESTORE ebx
11416 @@ -332,7 +474,15 @@ check_userspace:
11417 movb PT_CS(%esp), %al
11418 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11419 cmpl $USER_RPL, %eax
11420 +
11421 +#ifdef CONFIG_PAX_KERNEXEC
11422 + jae resume_userspace
11423 +
11424 + PAX_EXIT_KERNEL
11425 + jmp resume_kernel
11426 +#else
11427 jb resume_kernel # not returning to v8086 or userspace
11428 +#endif
11429
11430 ENTRY(resume_userspace)
11431 LOCKDEP_SYS_EXIT
11432 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11433 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11434 # int/exception return?
11435 jne work_pending
11436 - jmp restore_all
11437 + jmp restore_all_pax
11438 END(ret_from_exception)
11439
11440 #ifdef CONFIG_PREEMPT
11441 @@ -394,23 +544,34 @@ sysenter_past_esp:
11442 /*CFI_REL_OFFSET cs, 0*/
11443 /*
11444 * Push current_thread_info()->sysenter_return to the stack.
11445 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11446 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11447 */
11448 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11449 + pushl_cfi $0
11450 CFI_REL_OFFSET eip, 0
11451
11452 pushl_cfi %eax
11453 SAVE_ALL
11454 + GET_THREAD_INFO(%ebp)
11455 + movl TI_sysenter_return(%ebp),%ebp
11456 + movl %ebp,PT_EIP(%esp)
11457 ENABLE_INTERRUPTS(CLBR_NONE)
11458
11459 /*
11460 * Load the potential sixth argument from user stack.
11461 * Careful about security.
11462 */
11463 + movl PT_OLDESP(%esp),%ebp
11464 +
11465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11466 + mov PT_OLDSS(%esp),%ds
11467 +1: movl %ds:(%ebp),%ebp
11468 + push %ss
11469 + pop %ds
11470 +#else
11471 cmpl $__PAGE_OFFSET-3,%ebp
11472 jae syscall_fault
11473 1: movl (%ebp),%ebp
11474 +#endif
11475 +
11476 movl %ebp,PT_EBP(%esp)
11477 .section __ex_table,"a"
11478 .align 4
11479 @@ -433,12 +594,23 @@ sysenter_do_call:
11480 testl $_TIF_ALLWORK_MASK, %ecx
11481 jne sysexit_audit
11482 sysenter_exit:
11483 +
11484 +#ifdef CONFIG_PAX_RANDKSTACK
11485 + pushl_cfi %eax
11486 + call pax_randomize_kstack
11487 + popl_cfi %eax
11488 +#endif
11489 +
11490 + pax_erase_kstack
11491 +
11492 /* if something modifies registers it must also disable sysexit */
11493 movl PT_EIP(%esp), %edx
11494 movl PT_OLDESP(%esp), %ecx
11495 xorl %ebp,%ebp
11496 TRACE_IRQS_ON
11497 1: mov PT_FS(%esp), %fs
11498 +2: mov PT_DS(%esp), %ds
11499 +3: mov PT_ES(%esp), %es
11500 PTGS_TO_GS
11501 ENABLE_INTERRUPTS_SYSEXIT
11502
11503 @@ -455,6 +627,9 @@ sysenter_audit:
11504 movl %eax,%edx /* 2nd arg: syscall number */
11505 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11506 call audit_syscall_entry
11507 +
11508 + pax_erase_kstack
11509 +
11510 pushl_cfi %ebx
11511 movl PT_EAX(%esp),%eax /* reload syscall number */
11512 jmp sysenter_do_call
11513 @@ -481,11 +656,17 @@ sysexit_audit:
11514
11515 CFI_ENDPROC
11516 .pushsection .fixup,"ax"
11517 -2: movl $0,PT_FS(%esp)
11518 +4: movl $0,PT_FS(%esp)
11519 + jmp 1b
11520 +5: movl $0,PT_DS(%esp)
11521 + jmp 1b
11522 +6: movl $0,PT_ES(%esp)
11523 jmp 1b
11524 .section __ex_table,"a"
11525 .align 4
11526 - .long 1b,2b
11527 + .long 1b,4b
11528 + .long 2b,5b
11529 + .long 3b,6b
11530 .popsection
11531 PTGS_TO_GS_EX
11532 ENDPROC(ia32_sysenter_target)
11533 @@ -518,6 +699,14 @@ syscall_exit:
11534 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11535 jne syscall_exit_work
11536
11537 +restore_all_pax:
11538 +
11539 +#ifdef CONFIG_PAX_RANDKSTACK
11540 + call pax_randomize_kstack
11541 +#endif
11542 +
11543 + pax_erase_kstack
11544 +
11545 restore_all:
11546 TRACE_IRQS_IRET
11547 restore_all_notrace:
11548 @@ -577,14 +766,34 @@ ldt_ss:
11549 * compensating for the offset by changing to the ESPFIX segment with
11550 * a base address that matches for the difference.
11551 */
11552 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11553 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11554 mov %esp, %edx /* load kernel esp */
11555 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11556 mov %dx, %ax /* eax: new kernel esp */
11557 sub %eax, %edx /* offset (low word is 0) */
11558 +#ifdef CONFIG_SMP
11559 + movl PER_CPU_VAR(cpu_number), %ebx
11560 + shll $PAGE_SHIFT_asm, %ebx
11561 + addl $cpu_gdt_table, %ebx
11562 +#else
11563 + movl $cpu_gdt_table, %ebx
11564 +#endif
11565 shr $16, %edx
11566 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11567 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11568 +
11569 +#ifdef CONFIG_PAX_KERNEXEC
11570 + mov %cr0, %esi
11571 + btr $16, %esi
11572 + mov %esi, %cr0
11573 +#endif
11574 +
11575 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11576 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11577 +
11578 +#ifdef CONFIG_PAX_KERNEXEC
11579 + bts $16, %esi
11580 + mov %esi, %cr0
11581 +#endif
11582 +
11583 pushl_cfi $__ESPFIX_SS
11584 pushl_cfi %eax /* new kernel esp */
11585 /* Disable interrupts, but do not irqtrace this section: we
11586 @@ -613,29 +822,23 @@ work_resched:
11587 movl TI_flags(%ebp), %ecx
11588 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11589 # than syscall tracing?
11590 - jz restore_all
11591 + jz restore_all_pax
11592 testb $_TIF_NEED_RESCHED, %cl
11593 jnz work_resched
11594
11595 work_notifysig: # deal with pending signals and
11596 # notify-resume requests
11597 + movl %esp, %eax
11598 #ifdef CONFIG_VM86
11599 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11600 - movl %esp, %eax
11601 - jne work_notifysig_v86 # returning to kernel-space or
11602 + jz 1f # returning to kernel-space or
11603 # vm86-space
11604 - xorl %edx, %edx
11605 - call do_notify_resume
11606 - jmp resume_userspace_sig
11607
11608 - ALIGN
11609 -work_notifysig_v86:
11610 pushl_cfi %ecx # save ti_flags for do_notify_resume
11611 call save_v86_state # %eax contains pt_regs pointer
11612 popl_cfi %ecx
11613 movl %eax, %esp
11614 -#else
11615 - movl %esp, %eax
11616 +1:
11617 #endif
11618 xorl %edx, %edx
11619 call do_notify_resume
11620 @@ -648,6 +851,9 @@ syscall_trace_entry:
11621 movl $-ENOSYS,PT_EAX(%esp)
11622 movl %esp, %eax
11623 call syscall_trace_enter
11624 +
11625 + pax_erase_kstack
11626 +
11627 /* What it returned is what we'll actually use. */
11628 cmpl $(nr_syscalls), %eax
11629 jnae syscall_call
11630 @@ -670,6 +876,10 @@ END(syscall_exit_work)
11631
11632 RING0_INT_FRAME # can't unwind into user space anyway
11633 syscall_fault:
11634 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11635 + push %ss
11636 + pop %ds
11637 +#endif
11638 GET_THREAD_INFO(%ebp)
11639 movl $-EFAULT,PT_EAX(%esp)
11640 jmp resume_userspace
11641 @@ -752,6 +962,36 @@ ptregs_clone:
11642 CFI_ENDPROC
11643 ENDPROC(ptregs_clone)
11644
11645 + ALIGN;
11646 +ENTRY(kernel_execve)
11647 + CFI_STARTPROC
11648 + pushl_cfi %ebp
11649 + sub $PT_OLDSS+4,%esp
11650 + pushl_cfi %edi
11651 + pushl_cfi %ecx
11652 + pushl_cfi %eax
11653 + lea 3*4(%esp),%edi
11654 + mov $PT_OLDSS/4+1,%ecx
11655 + xorl %eax,%eax
11656 + rep stosl
11657 + popl_cfi %eax
11658 + popl_cfi %ecx
11659 + popl_cfi %edi
11660 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11661 + pushl_cfi %esp
11662 + call sys_execve
11663 + add $4,%esp
11664 + CFI_ADJUST_CFA_OFFSET -4
11665 + GET_THREAD_INFO(%ebp)
11666 + test %eax,%eax
11667 + jz syscall_exit
11668 + add $PT_OLDSS+4,%esp
11669 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11670 + popl_cfi %ebp
11671 + ret
11672 + CFI_ENDPROC
11673 +ENDPROC(kernel_execve)
11674 +
11675 .macro FIXUP_ESPFIX_STACK
11676 /*
11677 * Switch back for ESPFIX stack to the normal zerobased stack
11678 @@ -761,8 +1001,15 @@ ENDPROC(ptregs_clone)
11679 * normal stack and adjusts ESP with the matching offset.
11680 */
11681 /* fixup the stack */
11682 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11683 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11684 +#ifdef CONFIG_SMP
11685 + movl PER_CPU_VAR(cpu_number), %ebx
11686 + shll $PAGE_SHIFT_asm, %ebx
11687 + addl $cpu_gdt_table, %ebx
11688 +#else
11689 + movl $cpu_gdt_table, %ebx
11690 +#endif
11691 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11692 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11693 shl $16, %eax
11694 addl %esp, %eax /* the adjusted stack pointer */
11695 pushl_cfi $__KERNEL_DS
11696 @@ -1213,7 +1460,6 @@ return_to_handler:
11697 jmp *%ecx
11698 #endif
11699
11700 -.section .rodata,"a"
11701 #include "syscall_table_32.S"
11702
11703 syscall_table_size=(.-sys_call_table)
11704 @@ -1259,9 +1505,12 @@ error_code:
11705 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11706 REG_TO_PTGS %ecx
11707 SET_KERNEL_GS %ecx
11708 - movl $(__USER_DS), %ecx
11709 + movl $(__KERNEL_DS), %ecx
11710 movl %ecx, %ds
11711 movl %ecx, %es
11712 +
11713 + pax_enter_kernel
11714 +
11715 TRACE_IRQS_OFF
11716 movl %esp,%eax # pt_regs pointer
11717 call *%edi
11718 @@ -1346,6 +1595,9 @@ nmi_stack_correct:
11719 xorl %edx,%edx # zero error code
11720 movl %esp,%eax # pt_regs pointer
11721 call do_nmi
11722 +
11723 + pax_exit_kernel
11724 +
11725 jmp restore_all_notrace
11726 CFI_ENDPROC
11727
11728 @@ -1382,6 +1634,9 @@ nmi_espfix_stack:
11729 FIXUP_ESPFIX_STACK # %eax == %esp
11730 xorl %edx,%edx # zero error code
11731 call do_nmi
11732 +
11733 + pax_exit_kernel
11734 +
11735 RESTORE_REGS
11736 lss 12+4(%esp), %esp # back to espfix stack
11737 CFI_ADJUST_CFA_OFFSET -24
11738 diff -urNp linux-3.0.4/arch/x86/kernel/entry_64.S linux-3.0.4/arch/x86/kernel/entry_64.S
11739 --- linux-3.0.4/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11740 +++ linux-3.0.4/arch/x86/kernel/entry_64.S 2011-08-26 19:49:56.000000000 -0400
11741 @@ -53,6 +53,7 @@
11742 #include <asm/paravirt.h>
11743 #include <asm/ftrace.h>
11744 #include <asm/percpu.h>
11745 +#include <asm/pgtable.h>
11746
11747 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11748 #include <linux/elf-em.h>
11749 @@ -176,6 +177,264 @@ ENTRY(native_usergs_sysret64)
11750 ENDPROC(native_usergs_sysret64)
11751 #endif /* CONFIG_PARAVIRT */
11752
11753 + .macro ljmpq sel, off
11754 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11755 + .byte 0x48; ljmp *1234f(%rip)
11756 + .pushsection .rodata
11757 + .align 16
11758 + 1234: .quad \off; .word \sel
11759 + .popsection
11760 +#else
11761 + pushq $\sel
11762 + pushq $\off
11763 + lretq
11764 +#endif
11765 + .endm
11766 +
11767 + .macro pax_enter_kernel
11768 +#ifdef CONFIG_PAX_KERNEXEC
11769 + call pax_enter_kernel
11770 +#endif
11771 + .endm
11772 +
11773 + .macro pax_exit_kernel
11774 +#ifdef CONFIG_PAX_KERNEXEC
11775 + call pax_exit_kernel
11776 +#endif
11777 + .endm
11778 +
11779 +#ifdef CONFIG_PAX_KERNEXEC
11780 +ENTRY(pax_enter_kernel)
11781 + pushq %rdi
11782 +
11783 +#ifdef CONFIG_PARAVIRT
11784 + PV_SAVE_REGS(CLBR_RDI)
11785 +#endif
11786 +
11787 + GET_CR0_INTO_RDI
11788 + bts $16,%rdi
11789 + jnc 1f
11790 + mov %cs,%edi
11791 + cmp $__KERNEL_CS,%edi
11792 + jz 3f
11793 + ljmpq __KERNEL_CS,3f
11794 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11795 +2: SET_RDI_INTO_CR0
11796 +3:
11797 +
11798 +#ifdef CONFIG_PARAVIRT
11799 + PV_RESTORE_REGS(CLBR_RDI)
11800 +#endif
11801 +
11802 + popq %rdi
11803 + retq
11804 +ENDPROC(pax_enter_kernel)
11805 +
11806 +ENTRY(pax_exit_kernel)
11807 + pushq %rdi
11808 +
11809 +#ifdef CONFIG_PARAVIRT
11810 + PV_SAVE_REGS(CLBR_RDI)
11811 +#endif
11812 +
11813 + mov %cs,%rdi
11814 + cmp $__KERNEXEC_KERNEL_CS,%edi
11815 + jnz 2f
11816 + GET_CR0_INTO_RDI
11817 + btr $16,%rdi
11818 + ljmpq __KERNEL_CS,1f
11819 +1: SET_RDI_INTO_CR0
11820 +2:
11821 +
11822 +#ifdef CONFIG_PARAVIRT
11823 + PV_RESTORE_REGS(CLBR_RDI);
11824 +#endif
11825 +
11826 + popq %rdi
11827 + retq
11828 +ENDPROC(pax_exit_kernel)
11829 +#endif
11830 +
11831 + .macro pax_enter_kernel_user
11832 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11833 + call pax_enter_kernel_user
11834 +#endif
11835 + .endm
11836 +
11837 + .macro pax_exit_kernel_user
11838 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11839 + call pax_exit_kernel_user
11840 +#endif
11841 +#ifdef CONFIG_PAX_RANDKSTACK
11842 + push %rax
11843 + call pax_randomize_kstack
11844 + pop %rax
11845 +#endif
11846 + .endm
11847 +
11848 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11849 +ENTRY(pax_enter_kernel_user)
11850 + pushq %rdi
11851 + pushq %rbx
11852 +
11853 +#ifdef CONFIG_PARAVIRT
11854 + PV_SAVE_REGS(CLBR_RDI)
11855 +#endif
11856 +
11857 + GET_CR3_INTO_RDI
11858 + mov %rdi,%rbx
11859 + add $__START_KERNEL_map,%rbx
11860 + sub phys_base(%rip),%rbx
11861 +
11862 +#ifdef CONFIG_PARAVIRT
11863 + pushq %rdi
11864 + cmpl $0, pv_info+PARAVIRT_enabled
11865 + jz 1f
11866 + i = 0
11867 + .rept USER_PGD_PTRS
11868 + mov i*8(%rbx),%rsi
11869 + mov $0,%sil
11870 + lea i*8(%rbx),%rdi
11871 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11872 + i = i + 1
11873 + .endr
11874 + jmp 2f
11875 +1:
11876 +#endif
11877 +
11878 + i = 0
11879 + .rept USER_PGD_PTRS
11880 + movb $0,i*8(%rbx)
11881 + i = i + 1
11882 + .endr
11883 +
11884 +#ifdef CONFIG_PARAVIRT
11885 +2: popq %rdi
11886 +#endif
11887 + SET_RDI_INTO_CR3
11888 +
11889 +#ifdef CONFIG_PAX_KERNEXEC
11890 + GET_CR0_INTO_RDI
11891 + bts $16,%rdi
11892 + SET_RDI_INTO_CR0
11893 +#endif
11894 +
11895 +#ifdef CONFIG_PARAVIRT
11896 + PV_RESTORE_REGS(CLBR_RDI)
11897 +#endif
11898 +
11899 + popq %rbx
11900 + popq %rdi
11901 + retq
11902 +ENDPROC(pax_enter_kernel_user)
11903 +
11904 +ENTRY(pax_exit_kernel_user)
11905 + push %rdi
11906 +
11907 +#ifdef CONFIG_PARAVIRT
11908 + pushq %rbx
11909 + PV_SAVE_REGS(CLBR_RDI)
11910 +#endif
11911 +
11912 +#ifdef CONFIG_PAX_KERNEXEC
11913 + GET_CR0_INTO_RDI
11914 + btr $16,%rdi
11915 + SET_RDI_INTO_CR0
11916 +#endif
11917 +
11918 + GET_CR3_INTO_RDI
11919 + add $__START_KERNEL_map,%rdi
11920 + sub phys_base(%rip),%rdi
11921 +
11922 +#ifdef CONFIG_PARAVIRT
11923 + cmpl $0, pv_info+PARAVIRT_enabled
11924 + jz 1f
11925 + mov %rdi,%rbx
11926 + i = 0
11927 + .rept USER_PGD_PTRS
11928 + mov i*8(%rbx),%rsi
11929 + mov $0x67,%sil
11930 + lea i*8(%rbx),%rdi
11931 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11932 + i = i + 1
11933 + .endr
11934 + jmp 2f
11935 +1:
11936 +#endif
11937 +
11938 + i = 0
11939 + .rept USER_PGD_PTRS
11940 + movb $0x67,i*8(%rdi)
11941 + i = i + 1
11942 + .endr
11943 +
11944 +#ifdef CONFIG_PARAVIRT
11945 +2: PV_RESTORE_REGS(CLBR_RDI)
11946 + popq %rbx
11947 +#endif
11948 +
11949 + popq %rdi
11950 + retq
11951 +ENDPROC(pax_exit_kernel_user)
11952 +#endif
11953 +
11954 + .macro pax_erase_kstack
11955 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11956 + call pax_erase_kstack
11957 +#endif
11958 + .endm
11959 +
11960 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11961 +/*
11962 + * r10: thread_info
11963 + * rcx, rdx: can be clobbered
11964 + */
11965 +ENTRY(pax_erase_kstack)
11966 + pushq %rdi
11967 + pushq %rax
11968 + pushq %r10
11969 +
11970 + GET_THREAD_INFO(%r10)
11971 + mov TI_lowest_stack(%r10), %rdi
11972 + mov $-0xBEEF, %rax
11973 + std
11974 +
11975 +1: mov %edi, %ecx
11976 + and $THREAD_SIZE_asm - 1, %ecx
11977 + shr $3, %ecx
11978 + repne scasq
11979 + jecxz 2f
11980 +
11981 + cmp $2*8, %ecx
11982 + jc 2f
11983 +
11984 + mov $2*8, %ecx
11985 + repe scasq
11986 + jecxz 2f
11987 + jne 1b
11988 +
11989 +2: cld
11990 + mov %esp, %ecx
11991 + sub %edi, %ecx
11992 +
11993 + cmp $THREAD_SIZE_asm, %rcx
11994 + jb 3f
11995 + ud2
11996 +3:
11997 +
11998 + shr $3, %ecx
11999 + rep stosq
12000 +
12001 + mov TI_task_thread_sp0(%r10), %rdi
12002 + sub $256, %rdi
12003 + mov %rdi, TI_lowest_stack(%r10)
12004 +
12005 + popq %r10
12006 + popq %rax
12007 + popq %rdi
12008 + ret
12009 +ENDPROC(pax_erase_kstack)
12010 +#endif
12011
12012 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12013 #ifdef CONFIG_TRACE_IRQFLAGS
12014 @@ -318,7 +577,7 @@ ENTRY(save_args)
12015 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12016 movq_cfi rbp, 8 /* push %rbp */
12017 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12018 - testl $3, CS(%rdi)
12019 + testb $3, CS(%rdi)
12020 je 1f
12021 SWAPGS
12022 /*
12023 @@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
12024
12025 RESTORE_REST
12026
12027 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12028 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12029 je int_ret_from_sys_call
12030
12031 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12032 @@ -455,7 +714,7 @@ END(ret_from_fork)
12033 ENTRY(system_call)
12034 CFI_STARTPROC simple
12035 CFI_SIGNAL_FRAME
12036 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12037 + CFI_DEF_CFA rsp,0
12038 CFI_REGISTER rip,rcx
12039 /*CFI_REGISTER rflags,r11*/
12040 SWAPGS_UNSAFE_STACK
12041 @@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
12042
12043 movq %rsp,PER_CPU_VAR(old_rsp)
12044 movq PER_CPU_VAR(kernel_stack),%rsp
12045 + pax_enter_kernel_user
12046 /*
12047 * No need to follow this irqs off/on section - it's straight
12048 * and short:
12049 */
12050 ENABLE_INTERRUPTS(CLBR_NONE)
12051 - SAVE_ARGS 8,1
12052 + SAVE_ARGS 8*6,1
12053 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12054 movq %rcx,RIP-ARGOFFSET(%rsp)
12055 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12056 @@ -502,6 +762,8 @@ sysret_check:
12057 andl %edi,%edx
12058 jnz sysret_careful
12059 CFI_REMEMBER_STATE
12060 + pax_exit_kernel_user
12061 + pax_erase_kstack
12062 /*
12063 * sysretq will re-enable interrupts:
12064 */
12065 @@ -560,6 +822,9 @@ auditsys:
12066 movq %rax,%rsi /* 2nd arg: syscall number */
12067 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12068 call audit_syscall_entry
12069 +
12070 + pax_erase_kstack
12071 +
12072 LOAD_ARGS 0 /* reload call-clobbered registers */
12073 jmp system_call_fastpath
12074
12075 @@ -590,6 +855,9 @@ tracesys:
12076 FIXUP_TOP_OF_STACK %rdi
12077 movq %rsp,%rdi
12078 call syscall_trace_enter
12079 +
12080 + pax_erase_kstack
12081 +
12082 /*
12083 * Reload arg registers from stack in case ptrace changed them.
12084 * We don't reload %rax because syscall_trace_enter() returned
12085 @@ -611,7 +879,7 @@ tracesys:
12086 GLOBAL(int_ret_from_sys_call)
12087 DISABLE_INTERRUPTS(CLBR_NONE)
12088 TRACE_IRQS_OFF
12089 - testl $3,CS-ARGOFFSET(%rsp)
12090 + testb $3,CS-ARGOFFSET(%rsp)
12091 je retint_restore_args
12092 movl $_TIF_ALLWORK_MASK,%edi
12093 /* edi: mask to check */
12094 @@ -793,6 +1061,16 @@ END(interrupt)
12095 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12096 call save_args
12097 PARTIAL_FRAME 0
12098 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12099 + testb $3, CS(%rdi)
12100 + jnz 1f
12101 + pax_enter_kernel
12102 + jmp 2f
12103 +1: pax_enter_kernel_user
12104 +2:
12105 +#else
12106 + pax_enter_kernel
12107 +#endif
12108 call \func
12109 .endm
12110
12111 @@ -825,7 +1103,7 @@ ret_from_intr:
12112 CFI_ADJUST_CFA_OFFSET -8
12113 exit_intr:
12114 GET_THREAD_INFO(%rcx)
12115 - testl $3,CS-ARGOFFSET(%rsp)
12116 + testb $3,CS-ARGOFFSET(%rsp)
12117 je retint_kernel
12118
12119 /* Interrupt came from user space */
12120 @@ -847,12 +1125,15 @@ retint_swapgs: /* return to user-space
12121 * The iretq could re-enable interrupts:
12122 */
12123 DISABLE_INTERRUPTS(CLBR_ANY)
12124 + pax_exit_kernel_user
12125 + pax_erase_kstack
12126 TRACE_IRQS_IRETQ
12127 SWAPGS
12128 jmp restore_args
12129
12130 retint_restore_args: /* return to kernel space */
12131 DISABLE_INTERRUPTS(CLBR_ANY)
12132 + pax_exit_kernel
12133 /*
12134 * The iretq could re-enable interrupts:
12135 */
12136 @@ -1027,6 +1308,16 @@ ENTRY(\sym)
12137 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12138 call error_entry
12139 DEFAULT_FRAME 0
12140 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12141 + testb $3, CS(%rsp)
12142 + jnz 1f
12143 + pax_enter_kernel
12144 + jmp 2f
12145 +1: pax_enter_kernel_user
12146 +2:
12147 +#else
12148 + pax_enter_kernel
12149 +#endif
12150 movq %rsp,%rdi /* pt_regs pointer */
12151 xorl %esi,%esi /* no error code */
12152 call \do_sym
12153 @@ -1044,6 +1335,16 @@ ENTRY(\sym)
12154 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12155 call save_paranoid
12156 TRACE_IRQS_OFF
12157 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12158 + testb $3, CS(%rsp)
12159 + jnz 1f
12160 + pax_enter_kernel
12161 + jmp 2f
12162 +1: pax_enter_kernel_user
12163 +2:
12164 +#else
12165 + pax_enter_kernel
12166 +#endif
12167 movq %rsp,%rdi /* pt_regs pointer */
12168 xorl %esi,%esi /* no error code */
12169 call \do_sym
12170 @@ -1052,7 +1353,7 @@ ENTRY(\sym)
12171 END(\sym)
12172 .endm
12173
12174 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12175 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12176 .macro paranoidzeroentry_ist sym do_sym ist
12177 ENTRY(\sym)
12178 INTR_FRAME
12179 @@ -1062,8 +1363,24 @@ ENTRY(\sym)
12180 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12181 call save_paranoid
12182 TRACE_IRQS_OFF
12183 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12184 + testb $3, CS(%rsp)
12185 + jnz 1f
12186 + pax_enter_kernel
12187 + jmp 2f
12188 +1: pax_enter_kernel_user
12189 +2:
12190 +#else
12191 + pax_enter_kernel
12192 +#endif
12193 movq %rsp,%rdi /* pt_regs pointer */
12194 xorl %esi,%esi /* no error code */
12195 +#ifdef CONFIG_SMP
12196 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12197 + lea init_tss(%r12), %r12
12198 +#else
12199 + lea init_tss(%rip), %r12
12200 +#endif
12201 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12202 call \do_sym
12203 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12204 @@ -1080,6 +1397,16 @@ ENTRY(\sym)
12205 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12206 call error_entry
12207 DEFAULT_FRAME 0
12208 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12209 + testb $3, CS(%rsp)
12210 + jnz 1f
12211 + pax_enter_kernel
12212 + jmp 2f
12213 +1: pax_enter_kernel_user
12214 +2:
12215 +#else
12216 + pax_enter_kernel
12217 +#endif
12218 movq %rsp,%rdi /* pt_regs pointer */
12219 movq ORIG_RAX(%rsp),%rsi /* get error code */
12220 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12221 @@ -1099,6 +1426,16 @@ ENTRY(\sym)
12222 call save_paranoid
12223 DEFAULT_FRAME 0
12224 TRACE_IRQS_OFF
12225 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12226 + testb $3, CS(%rsp)
12227 + jnz 1f
12228 + pax_enter_kernel
12229 + jmp 2f
12230 +1: pax_enter_kernel_user
12231 +2:
12232 +#else
12233 + pax_enter_kernel
12234 +#endif
12235 movq %rsp,%rdi /* pt_regs pointer */
12236 movq ORIG_RAX(%rsp),%rsi /* get error code */
12237 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12238 @@ -1361,14 +1698,27 @@ ENTRY(paranoid_exit)
12239 TRACE_IRQS_OFF
12240 testl %ebx,%ebx /* swapgs needed? */
12241 jnz paranoid_restore
12242 - testl $3,CS(%rsp)
12243 + testb $3,CS(%rsp)
12244 jnz paranoid_userspace
12245 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12246 + pax_exit_kernel
12247 + TRACE_IRQS_IRETQ 0
12248 + SWAPGS_UNSAFE_STACK
12249 + RESTORE_ALL 8
12250 + jmp irq_return
12251 +#endif
12252 paranoid_swapgs:
12253 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12254 + pax_exit_kernel_user
12255 +#else
12256 + pax_exit_kernel
12257 +#endif
12258 TRACE_IRQS_IRETQ 0
12259 SWAPGS_UNSAFE_STACK
12260 RESTORE_ALL 8
12261 jmp irq_return
12262 paranoid_restore:
12263 + pax_exit_kernel
12264 TRACE_IRQS_IRETQ 0
12265 RESTORE_ALL 8
12266 jmp irq_return
12267 @@ -1426,7 +1776,7 @@ ENTRY(error_entry)
12268 movq_cfi r14, R14+8
12269 movq_cfi r15, R15+8
12270 xorl %ebx,%ebx
12271 - testl $3,CS+8(%rsp)
12272 + testb $3,CS+8(%rsp)
12273 je error_kernelspace
12274 error_swapgs:
12275 SWAPGS
12276 @@ -1490,6 +1840,16 @@ ENTRY(nmi)
12277 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12278 call save_paranoid
12279 DEFAULT_FRAME 0
12280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12281 + testb $3, CS(%rsp)
12282 + jnz 1f
12283 + pax_enter_kernel
12284 + jmp 2f
12285 +1: pax_enter_kernel_user
12286 +2:
12287 +#else
12288 + pax_enter_kernel
12289 +#endif
12290 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12291 movq %rsp,%rdi
12292 movq $-1,%rsi
12293 @@ -1500,11 +1860,25 @@ ENTRY(nmi)
12294 DISABLE_INTERRUPTS(CLBR_NONE)
12295 testl %ebx,%ebx /* swapgs needed? */
12296 jnz nmi_restore
12297 - testl $3,CS(%rsp)
12298 + testb $3,CS(%rsp)
12299 jnz nmi_userspace
12300 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12301 + pax_exit_kernel
12302 + SWAPGS_UNSAFE_STACK
12303 + RESTORE_ALL 8
12304 + jmp irq_return
12305 +#endif
12306 nmi_swapgs:
12307 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12308 + pax_exit_kernel_user
12309 +#else
12310 + pax_exit_kernel
12311 +#endif
12312 SWAPGS_UNSAFE_STACK
12313 + RESTORE_ALL 8
12314 + jmp irq_return
12315 nmi_restore:
12316 + pax_exit_kernel
12317 RESTORE_ALL 8
12318 jmp irq_return
12319 nmi_userspace:
12320 diff -urNp linux-3.0.4/arch/x86/kernel/ftrace.c linux-3.0.4/arch/x86/kernel/ftrace.c
12321 --- linux-3.0.4/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12322 +++ linux-3.0.4/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12323 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12324 static const void *mod_code_newcode; /* holds the text to write to the IP */
12325
12326 static unsigned nmi_wait_count;
12327 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12328 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12329
12330 int ftrace_arch_read_dyn_info(char *buf, int size)
12331 {
12332 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12333
12334 r = snprintf(buf, size, "%u %u",
12335 nmi_wait_count,
12336 - atomic_read(&nmi_update_count));
12337 + atomic_read_unchecked(&nmi_update_count));
12338 return r;
12339 }
12340
12341 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12342
12343 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12344 smp_rmb();
12345 + pax_open_kernel();
12346 ftrace_mod_code();
12347 - atomic_inc(&nmi_update_count);
12348 + pax_close_kernel();
12349 + atomic_inc_unchecked(&nmi_update_count);
12350 }
12351 /* Must have previous changes seen before executions */
12352 smp_mb();
12353 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12354 {
12355 unsigned char replaced[MCOUNT_INSN_SIZE];
12356
12357 + ip = ktla_ktva(ip);
12358 +
12359 /*
12360 * Note: Due to modules and __init, code can
12361 * disappear and change, we need to protect against faulting
12362 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12363 unsigned char old[MCOUNT_INSN_SIZE], *new;
12364 int ret;
12365
12366 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12367 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12368 new = ftrace_call_replace(ip, (unsigned long)func);
12369 ret = ftrace_modify_code(ip, old, new);
12370
12371 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12372 {
12373 unsigned char code[MCOUNT_INSN_SIZE];
12374
12375 + ip = ktla_ktva(ip);
12376 +
12377 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12378 return -EFAULT;
12379
12380 diff -urNp linux-3.0.4/arch/x86/kernel/head32.c linux-3.0.4/arch/x86/kernel/head32.c
12381 --- linux-3.0.4/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12382 +++ linux-3.0.4/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12383 @@ -19,6 +19,7 @@
12384 #include <asm/io_apic.h>
12385 #include <asm/bios_ebda.h>
12386 #include <asm/tlbflush.h>
12387 +#include <asm/boot.h>
12388
12389 static void __init i386_default_early_setup(void)
12390 {
12391 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12392 {
12393 memblock_init();
12394
12395 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12396 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12397
12398 #ifdef CONFIG_BLK_DEV_INITRD
12399 /* Reserve INITRD */
12400 diff -urNp linux-3.0.4/arch/x86/kernel/head_32.S linux-3.0.4/arch/x86/kernel/head_32.S
12401 --- linux-3.0.4/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12402 +++ linux-3.0.4/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12403 @@ -25,6 +25,12 @@
12404 /* Physical address */
12405 #define pa(X) ((X) - __PAGE_OFFSET)
12406
12407 +#ifdef CONFIG_PAX_KERNEXEC
12408 +#define ta(X) (X)
12409 +#else
12410 +#define ta(X) ((X) - __PAGE_OFFSET)
12411 +#endif
12412 +
12413 /*
12414 * References to members of the new_cpu_data structure.
12415 */
12416 @@ -54,11 +60,7 @@
12417 * and small than max_low_pfn, otherwise will waste some page table entries
12418 */
12419
12420 -#if PTRS_PER_PMD > 1
12421 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12422 -#else
12423 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12424 -#endif
12425 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12426
12427 /* Number of possible pages in the lowmem region */
12428 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12429 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12430 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12431
12432 /*
12433 + * Real beginning of normal "text" segment
12434 + */
12435 +ENTRY(stext)
12436 +ENTRY(_stext)
12437 +
12438 +/*
12439 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12440 * %esi points to the real-mode code as a 32-bit pointer.
12441 * CS and DS must be 4 GB flat segments, but we don't depend on
12442 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12443 * can.
12444 */
12445 __HEAD
12446 +
12447 +#ifdef CONFIG_PAX_KERNEXEC
12448 + jmp startup_32
12449 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12450 +.fill PAGE_SIZE-5,1,0xcc
12451 +#endif
12452 +
12453 ENTRY(startup_32)
12454 movl pa(stack_start),%ecx
12455
12456 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12457 2:
12458 leal -__PAGE_OFFSET(%ecx),%esp
12459
12460 +#ifdef CONFIG_SMP
12461 + movl $pa(cpu_gdt_table),%edi
12462 + movl $__per_cpu_load,%eax
12463 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12464 + rorl $16,%eax
12465 + movb %al,__KERNEL_PERCPU + 4(%edi)
12466 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12467 + movl $__per_cpu_end - 1,%eax
12468 + subl $__per_cpu_start,%eax
12469 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12470 +#endif
12471 +
12472 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12473 + movl $NR_CPUS,%ecx
12474 + movl $pa(cpu_gdt_table),%edi
12475 +1:
12476 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12477 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12478 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12479 + addl $PAGE_SIZE_asm,%edi
12480 + loop 1b
12481 +#endif
12482 +
12483 +#ifdef CONFIG_PAX_KERNEXEC
12484 + movl $pa(boot_gdt),%edi
12485 + movl $__LOAD_PHYSICAL_ADDR,%eax
12486 + movw %ax,__BOOT_CS + 2(%edi)
12487 + rorl $16,%eax
12488 + movb %al,__BOOT_CS + 4(%edi)
12489 + movb %ah,__BOOT_CS + 7(%edi)
12490 + rorl $16,%eax
12491 +
12492 + ljmp $(__BOOT_CS),$1f
12493 +1:
12494 +
12495 + movl $NR_CPUS,%ecx
12496 + movl $pa(cpu_gdt_table),%edi
12497 + addl $__PAGE_OFFSET,%eax
12498 +1:
12499 + movw %ax,__KERNEL_CS + 2(%edi)
12500 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12501 + rorl $16,%eax
12502 + movb %al,__KERNEL_CS + 4(%edi)
12503 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12504 + movb %ah,__KERNEL_CS + 7(%edi)
12505 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12506 + rorl $16,%eax
12507 + addl $PAGE_SIZE_asm,%edi
12508 + loop 1b
12509 +#endif
12510 +
12511 /*
12512 * Clear BSS first so that there are no surprises...
12513 */
12514 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12515 movl %eax, pa(max_pfn_mapped)
12516
12517 /* Do early initialization of the fixmap area */
12518 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12519 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12520 +#ifdef CONFIG_COMPAT_VDSO
12521 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12522 +#else
12523 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12524 +#endif
12525 #else /* Not PAE */
12526
12527 page_pde_offset = (__PAGE_OFFSET >> 20);
12528 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12529 movl %eax, pa(max_pfn_mapped)
12530
12531 /* Do early initialization of the fixmap area */
12532 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12533 - movl %eax,pa(initial_page_table+0xffc)
12534 +#ifdef CONFIG_COMPAT_VDSO
12535 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12536 +#else
12537 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12538 +#endif
12539 #endif
12540
12541 #ifdef CONFIG_PARAVIRT
12542 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12543 cmpl $num_subarch_entries, %eax
12544 jae bad_subarch
12545
12546 - movl pa(subarch_entries)(,%eax,4), %eax
12547 - subl $__PAGE_OFFSET, %eax
12548 - jmp *%eax
12549 + jmp *pa(subarch_entries)(,%eax,4)
12550
12551 bad_subarch:
12552 WEAK(lguest_entry)
12553 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12554 __INITDATA
12555
12556 subarch_entries:
12557 - .long default_entry /* normal x86/PC */
12558 - .long lguest_entry /* lguest hypervisor */
12559 - .long xen_entry /* Xen hypervisor */
12560 - .long default_entry /* Moorestown MID */
12561 + .long ta(default_entry) /* normal x86/PC */
12562 + .long ta(lguest_entry) /* lguest hypervisor */
12563 + .long ta(xen_entry) /* Xen hypervisor */
12564 + .long ta(default_entry) /* Moorestown MID */
12565 num_subarch_entries = (. - subarch_entries) / 4
12566 .previous
12567 #else
12568 @@ -312,6 +382,7 @@ default_entry:
12569 orl %edx,%eax
12570 movl %eax,%cr4
12571
12572 +#ifdef CONFIG_X86_PAE
12573 testb $X86_CR4_PAE, %al # check if PAE is enabled
12574 jz 6f
12575
12576 @@ -340,6 +411,9 @@ default_entry:
12577 /* Make changes effective */
12578 wrmsr
12579
12580 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12581 +#endif
12582 +
12583 6:
12584
12585 /*
12586 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12587 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12588 movl %eax,%ss # after changing gdt.
12589
12590 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12591 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12592 movl %eax,%ds
12593 movl %eax,%es
12594
12595 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12596 */
12597 cmpb $0,ready
12598 jne 1f
12599 - movl $gdt_page,%eax
12600 + movl $cpu_gdt_table,%eax
12601 movl $stack_canary,%ecx
12602 +#ifdef CONFIG_SMP
12603 + addl $__per_cpu_load,%ecx
12604 +#endif
12605 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12606 shrl $16, %ecx
12607 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12608 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12609 1:
12610 -#endif
12611 movl $(__KERNEL_STACK_CANARY),%eax
12612 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12613 + movl $(__USER_DS),%eax
12614 +#else
12615 + xorl %eax,%eax
12616 +#endif
12617 movl %eax,%gs
12618
12619 xorl %eax,%eax # Clear LDT
12620 @@ -558,22 +639,22 @@ early_page_fault:
12621 jmp early_fault
12622
12623 early_fault:
12624 - cld
12625 #ifdef CONFIG_PRINTK
12626 + cmpl $1,%ss:early_recursion_flag
12627 + je hlt_loop
12628 + incl %ss:early_recursion_flag
12629 + cld
12630 pusha
12631 movl $(__KERNEL_DS),%eax
12632 movl %eax,%ds
12633 movl %eax,%es
12634 - cmpl $2,early_recursion_flag
12635 - je hlt_loop
12636 - incl early_recursion_flag
12637 movl %cr2,%eax
12638 pushl %eax
12639 pushl %edx /* trapno */
12640 pushl $fault_msg
12641 call printk
12642 +; call dump_stack
12643 #endif
12644 - call dump_stack
12645 hlt_loop:
12646 hlt
12647 jmp hlt_loop
12648 @@ -581,8 +662,11 @@ hlt_loop:
12649 /* This is the default interrupt "handler" :-) */
12650 ALIGN
12651 ignore_int:
12652 - cld
12653 #ifdef CONFIG_PRINTK
12654 + cmpl $2,%ss:early_recursion_flag
12655 + je hlt_loop
12656 + incl %ss:early_recursion_flag
12657 + cld
12658 pushl %eax
12659 pushl %ecx
12660 pushl %edx
12661 @@ -591,9 +675,6 @@ ignore_int:
12662 movl $(__KERNEL_DS),%eax
12663 movl %eax,%ds
12664 movl %eax,%es
12665 - cmpl $2,early_recursion_flag
12666 - je hlt_loop
12667 - incl early_recursion_flag
12668 pushl 16(%esp)
12669 pushl 24(%esp)
12670 pushl 32(%esp)
12671 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12672 /*
12673 * BSS section
12674 */
12675 -__PAGE_ALIGNED_BSS
12676 - .align PAGE_SIZE
12677 #ifdef CONFIG_X86_PAE
12678 +.section .initial_pg_pmd,"a",@progbits
12679 initial_pg_pmd:
12680 .fill 1024*KPMDS,4,0
12681 #else
12682 +.section .initial_page_table,"a",@progbits
12683 ENTRY(initial_page_table)
12684 .fill 1024,4,0
12685 #endif
12686 +.section .initial_pg_fixmap,"a",@progbits
12687 initial_pg_fixmap:
12688 .fill 1024,4,0
12689 +.section .empty_zero_page,"a",@progbits
12690 ENTRY(empty_zero_page)
12691 .fill 4096,1,0
12692 +.section .swapper_pg_dir,"a",@progbits
12693 ENTRY(swapper_pg_dir)
12694 +#ifdef CONFIG_X86_PAE
12695 + .fill 4,8,0
12696 +#else
12697 .fill 1024,4,0
12698 +#endif
12699 +
12700 +/*
12701 + * The IDT has to be page-aligned to simplify the Pentium
12702 + * F0 0F bug workaround.. We have a special link segment
12703 + * for this.
12704 + */
12705 +.section .idt,"a",@progbits
12706 +ENTRY(idt_table)
12707 + .fill 256,8,0
12708
12709 /*
12710 * This starts the data section.
12711 */
12712 #ifdef CONFIG_X86_PAE
12713 -__PAGE_ALIGNED_DATA
12714 - /* Page-aligned for the benefit of paravirt? */
12715 - .align PAGE_SIZE
12716 +.section .initial_page_table,"a",@progbits
12717 ENTRY(initial_page_table)
12718 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12719 # if KPMDS == 3
12720 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12721 # error "Kernel PMDs should be 1, 2 or 3"
12722 # endif
12723 .align PAGE_SIZE /* needs to be page-sized too */
12724 +
12725 +#ifdef CONFIG_PAX_PER_CPU_PGD
12726 +ENTRY(cpu_pgd)
12727 + .rept NR_CPUS
12728 + .fill 4,8,0
12729 + .endr
12730 +#endif
12731 +
12732 #endif
12733
12734 .data
12735 .balign 4
12736 ENTRY(stack_start)
12737 - .long init_thread_union+THREAD_SIZE
12738 + .long init_thread_union+THREAD_SIZE-8
12739 +
12740 +ready: .byte 0
12741
12742 +.section .rodata,"a",@progbits
12743 early_recursion_flag:
12744 .long 0
12745
12746 -ready: .byte 0
12747 -
12748 int_msg:
12749 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12750
12751 @@ -707,7 +811,7 @@ fault_msg:
12752 .word 0 # 32 bit align gdt_desc.address
12753 boot_gdt_descr:
12754 .word __BOOT_DS+7
12755 - .long boot_gdt - __PAGE_OFFSET
12756 + .long pa(boot_gdt)
12757
12758 .word 0 # 32-bit align idt_desc.address
12759 idt_descr:
12760 @@ -718,7 +822,7 @@ idt_descr:
12761 .word 0 # 32 bit align gdt_desc.address
12762 ENTRY(early_gdt_descr)
12763 .word GDT_ENTRIES*8-1
12764 - .long gdt_page /* Overwritten for secondary CPUs */
12765 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12766
12767 /*
12768 * The boot_gdt must mirror the equivalent in setup.S and is
12769 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12770 .align L1_CACHE_BYTES
12771 ENTRY(boot_gdt)
12772 .fill GDT_ENTRY_BOOT_CS,8,0
12773 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12774 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12775 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12776 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12777 +
12778 + .align PAGE_SIZE_asm
12779 +ENTRY(cpu_gdt_table)
12780 + .rept NR_CPUS
12781 + .quad 0x0000000000000000 /* NULL descriptor */
12782 + .quad 0x0000000000000000 /* 0x0b reserved */
12783 + .quad 0x0000000000000000 /* 0x13 reserved */
12784 + .quad 0x0000000000000000 /* 0x1b reserved */
12785 +
12786 +#ifdef CONFIG_PAX_KERNEXEC
12787 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12788 +#else
12789 + .quad 0x0000000000000000 /* 0x20 unused */
12790 +#endif
12791 +
12792 + .quad 0x0000000000000000 /* 0x28 unused */
12793 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12794 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12795 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12796 + .quad 0x0000000000000000 /* 0x4b reserved */
12797 + .quad 0x0000000000000000 /* 0x53 reserved */
12798 + .quad 0x0000000000000000 /* 0x5b reserved */
12799 +
12800 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12801 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12802 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12803 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12804 +
12805 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12806 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12807 +
12808 + /*
12809 + * Segments used for calling PnP BIOS have byte granularity.
12810 + * The code segments and data segments have fixed 64k limits,
12811 + * the transfer segment sizes are set at run time.
12812 + */
12813 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12814 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12815 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12816 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12817 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12818 +
12819 + /*
12820 + * The APM segments have byte granularity and their bases
12821 + * are set at run time. All have 64k limits.
12822 + */
12823 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12824 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12825 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12826 +
12827 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12828 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12829 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12830 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12831 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12832 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12833 +
12834 + /* Be sure this is zeroed to avoid false validations in Xen */
12835 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12836 + .endr
12837 diff -urNp linux-3.0.4/arch/x86/kernel/head_64.S linux-3.0.4/arch/x86/kernel/head_64.S
12838 --- linux-3.0.4/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12839 +++ linux-3.0.4/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12840 @@ -19,6 +19,7 @@
12841 #include <asm/cache.h>
12842 #include <asm/processor-flags.h>
12843 #include <asm/percpu.h>
12844 +#include <asm/cpufeature.h>
12845
12846 #ifdef CONFIG_PARAVIRT
12847 #include <asm/asm-offsets.h>
12848 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12849 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12850 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12851 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12852 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12853 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12854 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12855 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12856
12857 .text
12858 __HEAD
12859 @@ -85,35 +90,22 @@ startup_64:
12860 */
12861 addq %rbp, init_level4_pgt + 0(%rip)
12862 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12863 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12864 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12865 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12866
12867 addq %rbp, level3_ident_pgt + 0(%rip)
12868 +#ifndef CONFIG_XEN
12869 + addq %rbp, level3_ident_pgt + 8(%rip)
12870 +#endif
12871
12872 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12873 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12874 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12875
12876 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12877 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12878 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12879
12880 - /* Add an Identity mapping if I am above 1G */
12881 - leaq _text(%rip), %rdi
12882 - andq $PMD_PAGE_MASK, %rdi
12883 -
12884 - movq %rdi, %rax
12885 - shrq $PUD_SHIFT, %rax
12886 - andq $(PTRS_PER_PUD - 1), %rax
12887 - jz ident_complete
12888 -
12889 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12890 - leaq level3_ident_pgt(%rip), %rbx
12891 - movq %rdx, 0(%rbx, %rax, 8)
12892 -
12893 - movq %rdi, %rax
12894 - shrq $PMD_SHIFT, %rax
12895 - andq $(PTRS_PER_PMD - 1), %rax
12896 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12897 - leaq level2_spare_pgt(%rip), %rbx
12898 - movq %rdx, 0(%rbx, %rax, 8)
12899 -ident_complete:
12900 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12901 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12902
12903 /*
12904 * Fixup the kernel text+data virtual addresses. Note that
12905 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12906 * after the boot processor executes this code.
12907 */
12908
12909 - /* Enable PAE mode and PGE */
12910 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12911 + /* Enable PAE mode and PSE/PGE */
12912 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12913 movq %rax, %cr4
12914
12915 /* Setup early boot stage 4 level pagetables. */
12916 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12917 movl $MSR_EFER, %ecx
12918 rdmsr
12919 btsl $_EFER_SCE, %eax /* Enable System Call */
12920 - btl $20,%edi /* No Execute supported? */
12921 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12922 jnc 1f
12923 btsl $_EFER_NX, %eax
12924 + leaq init_level4_pgt(%rip), %rdi
12925 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12926 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12927 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12928 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12929 1: wrmsr /* Make changes effective */
12930
12931 /* Setup cr0 */
12932 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12933 bad_address:
12934 jmp bad_address
12935
12936 - .section ".init.text","ax"
12937 + __INIT
12938 #ifdef CONFIG_EARLY_PRINTK
12939 .globl early_idt_handlers
12940 early_idt_handlers:
12941 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12942 #endif /* EARLY_PRINTK */
12943 1: hlt
12944 jmp 1b
12945 + .previous
12946
12947 #ifdef CONFIG_EARLY_PRINTK
12948 + __INITDATA
12949 early_recursion_flag:
12950 .long 0
12951 + .previous
12952
12953 + .section .rodata,"a",@progbits
12954 early_idt_msg:
12955 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12956 early_idt_ripmsg:
12957 .asciz "RIP %s\n"
12958 -#endif /* CONFIG_EARLY_PRINTK */
12959 .previous
12960 +#endif /* CONFIG_EARLY_PRINTK */
12961
12962 + .section .rodata,"a",@progbits
12963 #define NEXT_PAGE(name) \
12964 .balign PAGE_SIZE; \
12965 ENTRY(name)
12966 @@ -338,7 +340,6 @@ ENTRY(name)
12967 i = i + 1 ; \
12968 .endr
12969
12970 - .data
12971 /*
12972 * This default setting generates an ident mapping at address 0x100000
12973 * and a mapping for the kernel that precisely maps virtual address
12974 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12975 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12976 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12977 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12978 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
12979 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12980 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12981 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12982 .org init_level4_pgt + L4_START_KERNEL*8, 0
12983 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12984 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12985
12986 +#ifdef CONFIG_PAX_PER_CPU_PGD
12987 +NEXT_PAGE(cpu_pgd)
12988 + .rept NR_CPUS
12989 + .fill 512,8,0
12990 + .endr
12991 +#endif
12992 +
12993 NEXT_PAGE(level3_ident_pgt)
12994 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12995 +#ifdef CONFIG_XEN
12996 .fill 511,8,0
12997 +#else
12998 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
12999 + .fill 510,8,0
13000 +#endif
13001 +
13002 +NEXT_PAGE(level3_vmalloc_pgt)
13003 + .fill 512,8,0
13004 +
13005 +NEXT_PAGE(level3_vmemmap_pgt)
13006 + .fill L3_VMEMMAP_START,8,0
13007 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13008
13009 NEXT_PAGE(level3_kernel_pgt)
13010 .fill L3_START_KERNEL,8,0
13011 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13012 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13013 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13014
13015 +NEXT_PAGE(level2_vmemmap_pgt)
13016 + .fill 512,8,0
13017 +
13018 NEXT_PAGE(level2_fixmap_pgt)
13019 - .fill 506,8,0
13020 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13021 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13022 - .fill 5,8,0
13023 + .fill 507,8,0
13024 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13025 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13026 + .fill 4,8,0
13027
13028 -NEXT_PAGE(level1_fixmap_pgt)
13029 +NEXT_PAGE(level1_vsyscall_pgt)
13030 .fill 512,8,0
13031
13032 -NEXT_PAGE(level2_ident_pgt)
13033 - /* Since I easily can, map the first 1G.
13034 + /* Since I easily can, map the first 2G.
13035 * Don't set NX because code runs from these pages.
13036 */
13037 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13038 +NEXT_PAGE(level2_ident_pgt)
13039 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13040
13041 NEXT_PAGE(level2_kernel_pgt)
13042 /*
13043 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13044 * If you want to increase this then increase MODULES_VADDR
13045 * too.)
13046 */
13047 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13048 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13049 -
13050 -NEXT_PAGE(level2_spare_pgt)
13051 - .fill 512, 8, 0
13052 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13053
13054 #undef PMDS
13055 #undef NEXT_PAGE
13056
13057 - .data
13058 + .align PAGE_SIZE
13059 +ENTRY(cpu_gdt_table)
13060 + .rept NR_CPUS
13061 + .quad 0x0000000000000000 /* NULL descriptor */
13062 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13063 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13064 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13065 + .quad 0x00cffb000000ffff /* __USER32_CS */
13066 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13067 + .quad 0x00affb000000ffff /* __USER_CS */
13068 +
13069 +#ifdef CONFIG_PAX_KERNEXEC
13070 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13071 +#else
13072 + .quad 0x0 /* unused */
13073 +#endif
13074 +
13075 + .quad 0,0 /* TSS */
13076 + .quad 0,0 /* LDT */
13077 + .quad 0,0,0 /* three TLS descriptors */
13078 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13079 + /* asm/segment.h:GDT_ENTRIES must match this */
13080 +
13081 + /* zero the remaining page */
13082 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13083 + .endr
13084 +
13085 .align 16
13086 .globl early_gdt_descr
13087 early_gdt_descr:
13088 .word GDT_ENTRIES*8-1
13089 early_gdt_descr_base:
13090 - .quad INIT_PER_CPU_VAR(gdt_page)
13091 + .quad cpu_gdt_table
13092
13093 ENTRY(phys_base)
13094 /* This must match the first entry in level2_kernel_pgt */
13095 .quad 0x0000000000000000
13096
13097 #include "../../x86/xen/xen-head.S"
13098 -
13099 - .section .bss, "aw", @nobits
13100 +
13101 + .section .rodata,"a",@progbits
13102 .align L1_CACHE_BYTES
13103 ENTRY(idt_table)
13104 - .skip IDT_ENTRIES * 16
13105 + .fill 512,8,0
13106
13107 __PAGE_ALIGNED_BSS
13108 .align PAGE_SIZE
13109 diff -urNp linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c
13110 --- linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13111 +++ linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13112 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13113 EXPORT_SYMBOL(cmpxchg8b_emu);
13114 #endif
13115
13116 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13117 +
13118 /* Networking helper routines. */
13119 EXPORT_SYMBOL(csum_partial_copy_generic);
13120 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13121 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13122
13123 EXPORT_SYMBOL(__get_user_1);
13124 EXPORT_SYMBOL(__get_user_2);
13125 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13126
13127 EXPORT_SYMBOL(csum_partial);
13128 EXPORT_SYMBOL(empty_zero_page);
13129 +
13130 +#ifdef CONFIG_PAX_KERNEXEC
13131 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13132 +#endif
13133 diff -urNp linux-3.0.4/arch/x86/kernel/i8259.c linux-3.0.4/arch/x86/kernel/i8259.c
13134 --- linux-3.0.4/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13135 +++ linux-3.0.4/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13136 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13137 "spurious 8259A interrupt: IRQ%d.\n", irq);
13138 spurious_irq_mask |= irqmask;
13139 }
13140 - atomic_inc(&irq_err_count);
13141 + atomic_inc_unchecked(&irq_err_count);
13142 /*
13143 * Theoretically we do not have to handle this IRQ,
13144 * but in Linux this does not cause problems and is
13145 diff -urNp linux-3.0.4/arch/x86/kernel/init_task.c linux-3.0.4/arch/x86/kernel/init_task.c
13146 --- linux-3.0.4/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13147 +++ linux-3.0.4/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13148 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13149 * way process stacks are handled. This is done by having a special
13150 * "init_task" linker map entry..
13151 */
13152 -union thread_union init_thread_union __init_task_data =
13153 - { INIT_THREAD_INFO(init_task) };
13154 +union thread_union init_thread_union __init_task_data;
13155
13156 /*
13157 * Initial task structure.
13158 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13159 * section. Since TSS's are completely CPU-local, we want them
13160 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13161 */
13162 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13163 -
13164 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13165 +EXPORT_SYMBOL(init_tss);
13166 diff -urNp linux-3.0.4/arch/x86/kernel/ioport.c linux-3.0.4/arch/x86/kernel/ioport.c
13167 --- linux-3.0.4/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13168 +++ linux-3.0.4/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13169 @@ -6,6 +6,7 @@
13170 #include <linux/sched.h>
13171 #include <linux/kernel.h>
13172 #include <linux/capability.h>
13173 +#include <linux/security.h>
13174 #include <linux/errno.h>
13175 #include <linux/types.h>
13176 #include <linux/ioport.h>
13177 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13178
13179 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13180 return -EINVAL;
13181 +#ifdef CONFIG_GRKERNSEC_IO
13182 + if (turn_on && grsec_disable_privio) {
13183 + gr_handle_ioperm();
13184 + return -EPERM;
13185 + }
13186 +#endif
13187 if (turn_on && !capable(CAP_SYS_RAWIO))
13188 return -EPERM;
13189
13190 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13191 * because the ->io_bitmap_max value must match the bitmap
13192 * contents:
13193 */
13194 - tss = &per_cpu(init_tss, get_cpu());
13195 + tss = init_tss + get_cpu();
13196
13197 if (turn_on)
13198 bitmap_clear(t->io_bitmap_ptr, from, num);
13199 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13200 return -EINVAL;
13201 /* Trying to gain more privileges? */
13202 if (level > old) {
13203 +#ifdef CONFIG_GRKERNSEC_IO
13204 + if (grsec_disable_privio) {
13205 + gr_handle_iopl();
13206 + return -EPERM;
13207 + }
13208 +#endif
13209 if (!capable(CAP_SYS_RAWIO))
13210 return -EPERM;
13211 }
13212 diff -urNp linux-3.0.4/arch/x86/kernel/irq_32.c linux-3.0.4/arch/x86/kernel/irq_32.c
13213 --- linux-3.0.4/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13214 +++ linux-3.0.4/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13215 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13216 __asm__ __volatile__("andl %%esp,%0" :
13217 "=r" (sp) : "0" (THREAD_SIZE - 1));
13218
13219 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13220 + return sp < STACK_WARN;
13221 }
13222
13223 static void print_stack_overflow(void)
13224 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13225 * per-CPU IRQ handling contexts (thread information and stack)
13226 */
13227 union irq_ctx {
13228 - struct thread_info tinfo;
13229 - u32 stack[THREAD_SIZE/sizeof(u32)];
13230 + unsigned long previous_esp;
13231 + u32 stack[THREAD_SIZE/sizeof(u32)];
13232 } __attribute__((aligned(THREAD_SIZE)));
13233
13234 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13235 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13236 static inline int
13237 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13238 {
13239 - union irq_ctx *curctx, *irqctx;
13240 + union irq_ctx *irqctx;
13241 u32 *isp, arg1, arg2;
13242
13243 - curctx = (union irq_ctx *) current_thread_info();
13244 irqctx = __this_cpu_read(hardirq_ctx);
13245
13246 /*
13247 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13248 * handler) we can't do that and just have to keep using the
13249 * current stack (which is the irq stack already after all)
13250 */
13251 - if (unlikely(curctx == irqctx))
13252 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13253 return 0;
13254
13255 /* build the stack frame on the IRQ stack */
13256 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13257 - irqctx->tinfo.task = curctx->tinfo.task;
13258 - irqctx->tinfo.previous_esp = current_stack_pointer;
13259 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13260 + irqctx->previous_esp = current_stack_pointer;
13261
13262 - /*
13263 - * Copy the softirq bits in preempt_count so that the
13264 - * softirq checks work in the hardirq context.
13265 - */
13266 - irqctx->tinfo.preempt_count =
13267 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13268 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13269 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13270 + __set_fs(MAKE_MM_SEG(0));
13271 +#endif
13272
13273 if (unlikely(overflow))
13274 call_on_stack(print_stack_overflow, isp);
13275 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13276 : "0" (irq), "1" (desc), "2" (isp),
13277 "D" (desc->handle_irq)
13278 : "memory", "cc", "ecx");
13279 +
13280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13281 + __set_fs(current_thread_info()->addr_limit);
13282 +#endif
13283 +
13284 return 1;
13285 }
13286
13287 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13288 */
13289 void __cpuinit irq_ctx_init(int cpu)
13290 {
13291 - union irq_ctx *irqctx;
13292 -
13293 if (per_cpu(hardirq_ctx, cpu))
13294 return;
13295
13296 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13297 - THREAD_FLAGS,
13298 - THREAD_ORDER));
13299 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13300 - irqctx->tinfo.cpu = cpu;
13301 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13302 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13303 -
13304 - per_cpu(hardirq_ctx, cpu) = irqctx;
13305 -
13306 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13307 - THREAD_FLAGS,
13308 - THREAD_ORDER));
13309 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13310 - irqctx->tinfo.cpu = cpu;
13311 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13312 -
13313 - per_cpu(softirq_ctx, cpu) = irqctx;
13314 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13315 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13316
13317 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13318 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13319 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13320 asmlinkage void do_softirq(void)
13321 {
13322 unsigned long flags;
13323 - struct thread_info *curctx;
13324 union irq_ctx *irqctx;
13325 u32 *isp;
13326
13327 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13328 local_irq_save(flags);
13329
13330 if (local_softirq_pending()) {
13331 - curctx = current_thread_info();
13332 irqctx = __this_cpu_read(softirq_ctx);
13333 - irqctx->tinfo.task = curctx->task;
13334 - irqctx->tinfo.previous_esp = current_stack_pointer;
13335 + irqctx->previous_esp = current_stack_pointer;
13336
13337 /* build the stack frame on the softirq stack */
13338 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13339 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13340 +
13341 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13342 + __set_fs(MAKE_MM_SEG(0));
13343 +#endif
13344
13345 call_on_stack(__do_softirq, isp);
13346 +
13347 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13348 + __set_fs(current_thread_info()->addr_limit);
13349 +#endif
13350 +
13351 /*
13352 * Shouldn't happen, we returned above if in_interrupt():
13353 */
13354 diff -urNp linux-3.0.4/arch/x86/kernel/irq.c linux-3.0.4/arch/x86/kernel/irq.c
13355 --- linux-3.0.4/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13356 +++ linux-3.0.4/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13357 @@ -17,7 +17,7 @@
13358 #include <asm/mce.h>
13359 #include <asm/hw_irq.h>
13360
13361 -atomic_t irq_err_count;
13362 +atomic_unchecked_t irq_err_count;
13363
13364 /* Function pointer for generic interrupt vector handling */
13365 void (*x86_platform_ipi_callback)(void) = NULL;
13366 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13367 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13368 seq_printf(p, " Machine check polls\n");
13369 #endif
13370 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13371 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13372 #if defined(CONFIG_X86_IO_APIC)
13373 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13374 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13375 #endif
13376 return 0;
13377 }
13378 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13379
13380 u64 arch_irq_stat(void)
13381 {
13382 - u64 sum = atomic_read(&irq_err_count);
13383 + u64 sum = atomic_read_unchecked(&irq_err_count);
13384
13385 #ifdef CONFIG_X86_IO_APIC
13386 - sum += atomic_read(&irq_mis_count);
13387 + sum += atomic_read_unchecked(&irq_mis_count);
13388 #endif
13389 return sum;
13390 }
13391 diff -urNp linux-3.0.4/arch/x86/kernel/kgdb.c linux-3.0.4/arch/x86/kernel/kgdb.c
13392 --- linux-3.0.4/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13393 +++ linux-3.0.4/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13394 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13395 #ifdef CONFIG_X86_32
13396 switch (regno) {
13397 case GDB_SS:
13398 - if (!user_mode_vm(regs))
13399 + if (!user_mode(regs))
13400 *(unsigned long *)mem = __KERNEL_DS;
13401 break;
13402 case GDB_SP:
13403 - if (!user_mode_vm(regs))
13404 + if (!user_mode(regs))
13405 *(unsigned long *)mem = kernel_stack_pointer(regs);
13406 break;
13407 case GDB_GS:
13408 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13409 case 'k':
13410 /* clear the trace bit */
13411 linux_regs->flags &= ~X86_EFLAGS_TF;
13412 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13413 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13414
13415 /* set the trace bit if we're stepping */
13416 if (remcomInBuffer[0] == 's') {
13417 linux_regs->flags |= X86_EFLAGS_TF;
13418 - atomic_set(&kgdb_cpu_doing_single_step,
13419 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13420 raw_smp_processor_id());
13421 }
13422
13423 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13424 return NOTIFY_DONE;
13425
13426 case DIE_DEBUG:
13427 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13428 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13429 if (user_mode(regs))
13430 return single_step_cont(regs, args);
13431 break;
13432 diff -urNp linux-3.0.4/arch/x86/kernel/kprobes.c linux-3.0.4/arch/x86/kernel/kprobes.c
13433 --- linux-3.0.4/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13434 +++ linux-3.0.4/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13435 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13436 } __attribute__((packed)) *insn;
13437
13438 insn = (struct __arch_relative_insn *)from;
13439 +
13440 + pax_open_kernel();
13441 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13442 insn->op = op;
13443 + pax_close_kernel();
13444 }
13445
13446 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13447 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13448 kprobe_opcode_t opcode;
13449 kprobe_opcode_t *orig_opcodes = opcodes;
13450
13451 - if (search_exception_tables((unsigned long)opcodes))
13452 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13453 return 0; /* Page fault may occur on this address. */
13454
13455 retry:
13456 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13457 }
13458 }
13459 insn_get_length(&insn);
13460 + pax_open_kernel();
13461 memcpy(dest, insn.kaddr, insn.length);
13462 + pax_close_kernel();
13463
13464 #ifdef CONFIG_X86_64
13465 if (insn_rip_relative(&insn)) {
13466 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13467 (u8 *) dest;
13468 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13469 disp = (u8 *) dest + insn_offset_displacement(&insn);
13470 + pax_open_kernel();
13471 *(s32 *) disp = (s32) newdisp;
13472 + pax_close_kernel();
13473 }
13474 #endif
13475 return insn.length;
13476 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13477 */
13478 __copy_instruction(p->ainsn.insn, p->addr, 0);
13479
13480 - if (can_boost(p->addr))
13481 + if (can_boost(ktla_ktva(p->addr)))
13482 p->ainsn.boostable = 0;
13483 else
13484 p->ainsn.boostable = -1;
13485
13486 - p->opcode = *p->addr;
13487 + p->opcode = *(ktla_ktva(p->addr));
13488 }
13489
13490 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13491 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13492 * nor set current_kprobe, because it doesn't use single
13493 * stepping.
13494 */
13495 - regs->ip = (unsigned long)p->ainsn.insn;
13496 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13497 preempt_enable_no_resched();
13498 return;
13499 }
13500 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13501 if (p->opcode == BREAKPOINT_INSTRUCTION)
13502 regs->ip = (unsigned long)p->addr;
13503 else
13504 - regs->ip = (unsigned long)p->ainsn.insn;
13505 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13506 }
13507
13508 /*
13509 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13510 setup_singlestep(p, regs, kcb, 0);
13511 return 1;
13512 }
13513 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13514 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13515 /*
13516 * The breakpoint instruction was removed right
13517 * after we hit it. Another cpu has removed
13518 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13519 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13520 {
13521 unsigned long *tos = stack_addr(regs);
13522 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13523 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13524 unsigned long orig_ip = (unsigned long)p->addr;
13525 kprobe_opcode_t *insn = p->ainsn.insn;
13526
13527 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13528 struct die_args *args = data;
13529 int ret = NOTIFY_DONE;
13530
13531 - if (args->regs && user_mode_vm(args->regs))
13532 + if (args->regs && user_mode(args->regs))
13533 return ret;
13534
13535 switch (val) {
13536 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13537 * Verify if the address gap is in 2GB range, because this uses
13538 * a relative jump.
13539 */
13540 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13541 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13542 if (abs(rel) > 0x7fffffff)
13543 return -ERANGE;
13544
13545 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13546 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13547
13548 /* Set probe function call */
13549 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13550 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13551
13552 /* Set returning jmp instruction at the tail of out-of-line buffer */
13553 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13554 - (u8 *)op->kp.addr + op->optinsn.size);
13555 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13556
13557 flush_icache_range((unsigned long) buf,
13558 (unsigned long) buf + TMPL_END_IDX +
13559 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13560 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13561
13562 /* Backup instructions which will be replaced by jump address */
13563 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13564 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13565 RELATIVE_ADDR_SIZE);
13566
13567 insn_buf[0] = RELATIVEJUMP_OPCODE;
13568 diff -urNp linux-3.0.4/arch/x86/kernel/kvm.c linux-3.0.4/arch/x86/kernel/kvm.c
13569 --- linux-3.0.4/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13570 +++ linux-3.0.4/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13571 @@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13572 pv_mmu_ops.set_pud = kvm_set_pud;
13573 #if PAGETABLE_LEVELS == 4
13574 pv_mmu_ops.set_pgd = kvm_set_pgd;
13575 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13576 #endif
13577 #endif
13578 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13579 diff -urNp linux-3.0.4/arch/x86/kernel/ldt.c linux-3.0.4/arch/x86/kernel/ldt.c
13580 --- linux-3.0.4/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13581 +++ linux-3.0.4/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13582 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13583 if (reload) {
13584 #ifdef CONFIG_SMP
13585 preempt_disable();
13586 - load_LDT(pc);
13587 + load_LDT_nolock(pc);
13588 if (!cpumask_equal(mm_cpumask(current->mm),
13589 cpumask_of(smp_processor_id())))
13590 smp_call_function(flush_ldt, current->mm, 1);
13591 preempt_enable();
13592 #else
13593 - load_LDT(pc);
13594 + load_LDT_nolock(pc);
13595 #endif
13596 }
13597 if (oldsize) {
13598 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13599 return err;
13600
13601 for (i = 0; i < old->size; i++)
13602 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13603 + write_ldt_entry(new->ldt, i, old->ldt + i);
13604 return 0;
13605 }
13606
13607 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13608 retval = copy_ldt(&mm->context, &old_mm->context);
13609 mutex_unlock(&old_mm->context.lock);
13610 }
13611 +
13612 + if (tsk == current) {
13613 + mm->context.vdso = 0;
13614 +
13615 +#ifdef CONFIG_X86_32
13616 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13617 + mm->context.user_cs_base = 0UL;
13618 + mm->context.user_cs_limit = ~0UL;
13619 +
13620 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13621 + cpus_clear(mm->context.cpu_user_cs_mask);
13622 +#endif
13623 +
13624 +#endif
13625 +#endif
13626 +
13627 + }
13628 +
13629 return retval;
13630 }
13631
13632 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13633 }
13634 }
13635
13636 +#ifdef CONFIG_PAX_SEGMEXEC
13637 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13638 + error = -EINVAL;
13639 + goto out_unlock;
13640 + }
13641 +#endif
13642 +
13643 fill_ldt(&ldt, &ldt_info);
13644 if (oldmode)
13645 ldt.avl = 0;
13646 diff -urNp linux-3.0.4/arch/x86/kernel/machine_kexec_32.c linux-3.0.4/arch/x86/kernel/machine_kexec_32.c
13647 --- linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13648 +++ linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13649 @@ -27,7 +27,7 @@
13650 #include <asm/cacheflush.h>
13651 #include <asm/debugreg.h>
13652
13653 -static void set_idt(void *newidt, __u16 limit)
13654 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13655 {
13656 struct desc_ptr curidt;
13657
13658 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13659 }
13660
13661
13662 -static void set_gdt(void *newgdt, __u16 limit)
13663 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13664 {
13665 struct desc_ptr curgdt;
13666
13667 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13668 }
13669
13670 control_page = page_address(image->control_code_page);
13671 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13672 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13673
13674 relocate_kernel_ptr = control_page;
13675 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13676 diff -urNp linux-3.0.4/arch/x86/kernel/microcode_intel.c linux-3.0.4/arch/x86/kernel/microcode_intel.c
13677 --- linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13678 +++ linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13679 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13680
13681 static int get_ucode_user(void *to, const void *from, size_t n)
13682 {
13683 - return copy_from_user(to, from, n);
13684 + return copy_from_user(to, (__force const void __user *)from, n);
13685 }
13686
13687 static enum ucode_state
13688 request_microcode_user(int cpu, const void __user *buf, size_t size)
13689 {
13690 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13691 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13692 }
13693
13694 static void microcode_fini_cpu(int cpu)
13695 diff -urNp linux-3.0.4/arch/x86/kernel/module.c linux-3.0.4/arch/x86/kernel/module.c
13696 --- linux-3.0.4/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13697 +++ linux-3.0.4/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13698 @@ -36,21 +36,66 @@
13699 #define DEBUGP(fmt...)
13700 #endif
13701
13702 -void *module_alloc(unsigned long size)
13703 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13704 {
13705 if (PAGE_ALIGN(size) > MODULES_LEN)
13706 return NULL;
13707 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13708 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13709 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13710 -1, __builtin_return_address(0));
13711 }
13712
13713 +void *module_alloc(unsigned long size)
13714 +{
13715 +
13716 +#ifdef CONFIG_PAX_KERNEXEC
13717 + return __module_alloc(size, PAGE_KERNEL);
13718 +#else
13719 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13720 +#endif
13721 +
13722 +}
13723 +
13724 /* Free memory returned from module_alloc */
13725 void module_free(struct module *mod, void *module_region)
13726 {
13727 vfree(module_region);
13728 }
13729
13730 +#ifdef CONFIG_PAX_KERNEXEC
13731 +#ifdef CONFIG_X86_32
13732 +void *module_alloc_exec(unsigned long size)
13733 +{
13734 + struct vm_struct *area;
13735 +
13736 + if (size == 0)
13737 + return NULL;
13738 +
13739 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13740 + return area ? area->addr : NULL;
13741 +}
13742 +EXPORT_SYMBOL(module_alloc_exec);
13743 +
13744 +void module_free_exec(struct module *mod, void *module_region)
13745 +{
13746 + vunmap(module_region);
13747 +}
13748 +EXPORT_SYMBOL(module_free_exec);
13749 +#else
13750 +void module_free_exec(struct module *mod, void *module_region)
13751 +{
13752 + module_free(mod, module_region);
13753 +}
13754 +EXPORT_SYMBOL(module_free_exec);
13755 +
13756 +void *module_alloc_exec(unsigned long size)
13757 +{
13758 + return __module_alloc(size, PAGE_KERNEL_RX);
13759 +}
13760 +EXPORT_SYMBOL(module_alloc_exec);
13761 +#endif
13762 +#endif
13763 +
13764 /* We don't need anything special. */
13765 int module_frob_arch_sections(Elf_Ehdr *hdr,
13766 Elf_Shdr *sechdrs,
13767 @@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13768 unsigned int i;
13769 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13770 Elf32_Sym *sym;
13771 - uint32_t *location;
13772 + uint32_t *plocation, location;
13773
13774 DEBUGP("Applying relocate section %u to %u\n", relsec,
13775 sechdrs[relsec].sh_info);
13776 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13777 /* This is where to make the change */
13778 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13779 - + rel[i].r_offset;
13780 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13781 + location = (uint32_t)plocation;
13782 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13783 + plocation = ktla_ktva((void *)plocation);
13784 /* This is the symbol it is referring to. Note that all
13785 undefined symbols have been resolved. */
13786 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13787 @@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13788 switch (ELF32_R_TYPE(rel[i].r_info)) {
13789 case R_386_32:
13790 /* We add the value into the location given */
13791 - *location += sym->st_value;
13792 + pax_open_kernel();
13793 + *plocation += sym->st_value;
13794 + pax_close_kernel();
13795 break;
13796 case R_386_PC32:
13797 /* Add the value, subtract its postition */
13798 - *location += sym->st_value - (uint32_t)location;
13799 + pax_open_kernel();
13800 + *plocation += sym->st_value - location;
13801 + pax_close_kernel();
13802 break;
13803 default:
13804 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13805 @@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13806 case R_X86_64_NONE:
13807 break;
13808 case R_X86_64_64:
13809 + pax_open_kernel();
13810 *(u64 *)loc = val;
13811 + pax_close_kernel();
13812 break;
13813 case R_X86_64_32:
13814 + pax_open_kernel();
13815 *(u32 *)loc = val;
13816 + pax_close_kernel();
13817 if (val != *(u32 *)loc)
13818 goto overflow;
13819 break;
13820 case R_X86_64_32S:
13821 + pax_open_kernel();
13822 *(s32 *)loc = val;
13823 + pax_close_kernel();
13824 if ((s64)val != *(s32 *)loc)
13825 goto overflow;
13826 break;
13827 case R_X86_64_PC32:
13828 val -= (u64)loc;
13829 + pax_open_kernel();
13830 *(u32 *)loc = val;
13831 + pax_close_kernel();
13832 +
13833 #if 0
13834 if ((s64)val != *(s32 *)loc)
13835 goto overflow;
13836 diff -urNp linux-3.0.4/arch/x86/kernel/paravirt.c linux-3.0.4/arch/x86/kernel/paravirt.c
13837 --- linux-3.0.4/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13838 +++ linux-3.0.4/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13839 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13840 {
13841 return x;
13842 }
13843 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13844 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13845 +#endif
13846
13847 void __init default_banner(void)
13848 {
13849 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13850 * corresponding structure. */
13851 static void *get_call_destination(u8 type)
13852 {
13853 - struct paravirt_patch_template tmpl = {
13854 + const struct paravirt_patch_template tmpl = {
13855 .pv_init_ops = pv_init_ops,
13856 .pv_time_ops = pv_time_ops,
13857 .pv_cpu_ops = pv_cpu_ops,
13858 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13859 .pv_lock_ops = pv_lock_ops,
13860 #endif
13861 };
13862 +
13863 + pax_track_stack();
13864 +
13865 return *((void **)&tmpl + type);
13866 }
13867
13868 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13869 if (opfunc == NULL)
13870 /* If there's no function, patch it with a ud2a (BUG) */
13871 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13872 - else if (opfunc == _paravirt_nop)
13873 + else if (opfunc == (void *)_paravirt_nop)
13874 /* If the operation is a nop, then nop the callsite */
13875 ret = paravirt_patch_nop();
13876
13877 /* identity functions just return their single argument */
13878 - else if (opfunc == _paravirt_ident_32)
13879 + else if (opfunc == (void *)_paravirt_ident_32)
13880 ret = paravirt_patch_ident_32(insnbuf, len);
13881 - else if (opfunc == _paravirt_ident_64)
13882 + else if (opfunc == (void *)_paravirt_ident_64)
13883 ret = paravirt_patch_ident_64(insnbuf, len);
13884 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13885 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13886 + ret = paravirt_patch_ident_64(insnbuf, len);
13887 +#endif
13888
13889 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13890 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13891 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13892 if (insn_len > len || start == NULL)
13893 insn_len = len;
13894 else
13895 - memcpy(insnbuf, start, insn_len);
13896 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13897
13898 return insn_len;
13899 }
13900 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13901 preempt_enable();
13902 }
13903
13904 -struct pv_info pv_info = {
13905 +struct pv_info pv_info __read_only = {
13906 .name = "bare hardware",
13907 .paravirt_enabled = 0,
13908 .kernel_rpl = 0,
13909 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13910 };
13911
13912 -struct pv_init_ops pv_init_ops = {
13913 +struct pv_init_ops pv_init_ops __read_only = {
13914 .patch = native_patch,
13915 };
13916
13917 -struct pv_time_ops pv_time_ops = {
13918 +struct pv_time_ops pv_time_ops __read_only = {
13919 .sched_clock = native_sched_clock,
13920 };
13921
13922 -struct pv_irq_ops pv_irq_ops = {
13923 +struct pv_irq_ops pv_irq_ops __read_only = {
13924 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13925 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13926 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13927 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13928 #endif
13929 };
13930
13931 -struct pv_cpu_ops pv_cpu_ops = {
13932 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13933 .cpuid = native_cpuid,
13934 .get_debugreg = native_get_debugreg,
13935 .set_debugreg = native_set_debugreg,
13936 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13937 .end_context_switch = paravirt_nop,
13938 };
13939
13940 -struct pv_apic_ops pv_apic_ops = {
13941 +struct pv_apic_ops pv_apic_ops __read_only = {
13942 #ifdef CONFIG_X86_LOCAL_APIC
13943 .startup_ipi_hook = paravirt_nop,
13944 #endif
13945 };
13946
13947 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13948 +#ifdef CONFIG_X86_32
13949 +#ifdef CONFIG_X86_PAE
13950 +/* 64-bit pagetable entries */
13951 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13952 +#else
13953 /* 32-bit pagetable entries */
13954 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13955 +#endif
13956 #else
13957 /* 64-bit pagetable entries */
13958 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13959 #endif
13960
13961 -struct pv_mmu_ops pv_mmu_ops = {
13962 +struct pv_mmu_ops pv_mmu_ops __read_only = {
13963
13964 .read_cr2 = native_read_cr2,
13965 .write_cr2 = native_write_cr2,
13966 @@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
13967 .make_pud = PTE_IDENT,
13968
13969 .set_pgd = native_set_pgd,
13970 + .set_pgd_batched = native_set_pgd_batched,
13971 #endif
13972 #endif /* PAGETABLE_LEVELS >= 3 */
13973
13974 @@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13975 },
13976
13977 .set_fixmap = native_set_fixmap,
13978 +
13979 +#ifdef CONFIG_PAX_KERNEXEC
13980 + .pax_open_kernel = native_pax_open_kernel,
13981 + .pax_close_kernel = native_pax_close_kernel,
13982 +#endif
13983 +
13984 };
13985
13986 EXPORT_SYMBOL_GPL(pv_time_ops);
13987 diff -urNp linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c
13988 --- linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
13989 +++ linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
13990 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13991 arch_spin_lock(lock);
13992 }
13993
13994 -struct pv_lock_ops pv_lock_ops = {
13995 +struct pv_lock_ops pv_lock_ops __read_only = {
13996 #ifdef CONFIG_SMP
13997 .spin_is_locked = __ticket_spin_is_locked,
13998 .spin_is_contended = __ticket_spin_is_contended,
13999 diff -urNp linux-3.0.4/arch/x86/kernel/pci-iommu_table.c linux-3.0.4/arch/x86/kernel/pci-iommu_table.c
14000 --- linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14001 +++ linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14002 @@ -2,7 +2,7 @@
14003 #include <asm/iommu_table.h>
14004 #include <linux/string.h>
14005 #include <linux/kallsyms.h>
14006 -
14007 +#include <linux/sched.h>
14008
14009 #define DEBUG 1
14010
14011 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14012 {
14013 struct iommu_table_entry *p, *q, *x;
14014
14015 + pax_track_stack();
14016 +
14017 /* Simple cyclic dependency checker. */
14018 for (p = start; p < finish; p++) {
14019 q = find_dependents_of(start, finish, p);
14020 diff -urNp linux-3.0.4/arch/x86/kernel/process_32.c linux-3.0.4/arch/x86/kernel/process_32.c
14021 --- linux-3.0.4/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14022 +++ linux-3.0.4/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14023 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14024 unsigned long thread_saved_pc(struct task_struct *tsk)
14025 {
14026 return ((unsigned long *)tsk->thread.sp)[3];
14027 +//XXX return tsk->thread.eip;
14028 }
14029
14030 #ifndef CONFIG_SMP
14031 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14032 unsigned long sp;
14033 unsigned short ss, gs;
14034
14035 - if (user_mode_vm(regs)) {
14036 + if (user_mode(regs)) {
14037 sp = regs->sp;
14038 ss = regs->ss & 0xffff;
14039 - gs = get_user_gs(regs);
14040 } else {
14041 sp = kernel_stack_pointer(regs);
14042 savesegment(ss, ss);
14043 - savesegment(gs, gs);
14044 }
14045 + gs = get_user_gs(regs);
14046
14047 show_regs_common();
14048
14049 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14050 struct task_struct *tsk;
14051 int err;
14052
14053 - childregs = task_pt_regs(p);
14054 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14055 *childregs = *regs;
14056 childregs->ax = 0;
14057 childregs->sp = sp;
14058
14059 p->thread.sp = (unsigned long) childregs;
14060 p->thread.sp0 = (unsigned long) (childregs+1);
14061 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14062
14063 p->thread.ip = (unsigned long) ret_from_fork;
14064
14065 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14066 struct thread_struct *prev = &prev_p->thread,
14067 *next = &next_p->thread;
14068 int cpu = smp_processor_id();
14069 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14070 + struct tss_struct *tss = init_tss + cpu;
14071 bool preload_fpu;
14072
14073 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14074 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14075 */
14076 lazy_save_gs(prev->gs);
14077
14078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14079 + __set_fs(task_thread_info(next_p)->addr_limit);
14080 +#endif
14081 +
14082 /*
14083 * Load the per-thread Thread-Local Storage descriptor.
14084 */
14085 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14086 */
14087 arch_end_context_switch(next_p);
14088
14089 + percpu_write(current_task, next_p);
14090 + percpu_write(current_tinfo, &next_p->tinfo);
14091 +
14092 if (preload_fpu)
14093 __math_state_restore();
14094
14095 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14096 if (prev->gs | next->gs)
14097 lazy_load_gs(next->gs);
14098
14099 - percpu_write(current_task, next_p);
14100 -
14101 return prev_p;
14102 }
14103
14104 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14105 } while (count++ < 16);
14106 return 0;
14107 }
14108 -
14109 diff -urNp linux-3.0.4/arch/x86/kernel/process_64.c linux-3.0.4/arch/x86/kernel/process_64.c
14110 --- linux-3.0.4/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14111 +++ linux-3.0.4/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14112 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14113 void exit_idle(void)
14114 {
14115 /* idle loop has pid 0 */
14116 - if (current->pid)
14117 + if (task_pid_nr(current))
14118 return;
14119 __exit_idle();
14120 }
14121 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14122 struct pt_regs *childregs;
14123 struct task_struct *me = current;
14124
14125 - childregs = ((struct pt_regs *)
14126 - (THREAD_SIZE + task_stack_page(p))) - 1;
14127 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14128 *childregs = *regs;
14129
14130 childregs->ax = 0;
14131 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14132 p->thread.sp = (unsigned long) childregs;
14133 p->thread.sp0 = (unsigned long) (childregs+1);
14134 p->thread.usersp = me->thread.usersp;
14135 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14136
14137 set_tsk_thread_flag(p, TIF_FORK);
14138
14139 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14140 struct thread_struct *prev = &prev_p->thread;
14141 struct thread_struct *next = &next_p->thread;
14142 int cpu = smp_processor_id();
14143 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14144 + struct tss_struct *tss = init_tss + cpu;
14145 unsigned fsindex, gsindex;
14146 bool preload_fpu;
14147
14148 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14149 prev->usersp = percpu_read(old_rsp);
14150 percpu_write(old_rsp, next->usersp);
14151 percpu_write(current_task, next_p);
14152 + percpu_write(current_tinfo, &next_p->tinfo);
14153
14154 - percpu_write(kernel_stack,
14155 - (unsigned long)task_stack_page(next_p) +
14156 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14157 + percpu_write(kernel_stack, next->sp0);
14158
14159 /*
14160 * Now maybe reload the debug registers and handle I/O bitmaps
14161 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14162 if (!p || p == current || p->state == TASK_RUNNING)
14163 return 0;
14164 stack = (unsigned long)task_stack_page(p);
14165 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14166 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14167 return 0;
14168 fp = *(u64 *)(p->thread.sp);
14169 do {
14170 - if (fp < (unsigned long)stack ||
14171 - fp >= (unsigned long)stack+THREAD_SIZE)
14172 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14173 return 0;
14174 ip = *(u64 *)(fp+8);
14175 if (!in_sched_functions(ip))
14176 diff -urNp linux-3.0.4/arch/x86/kernel/process.c linux-3.0.4/arch/x86/kernel/process.c
14177 --- linux-3.0.4/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14178 +++ linux-3.0.4/arch/x86/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
14179 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14180
14181 void free_thread_info(struct thread_info *ti)
14182 {
14183 - free_thread_xstate(ti->task);
14184 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14185 }
14186
14187 +static struct kmem_cache *task_struct_cachep;
14188 +
14189 void arch_task_cache_init(void)
14190 {
14191 - task_xstate_cachep =
14192 - kmem_cache_create("task_xstate", xstate_size,
14193 + /* create a slab on which task_structs can be allocated */
14194 + task_struct_cachep =
14195 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14196 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14197 +
14198 + task_xstate_cachep =
14199 + kmem_cache_create("task_xstate", xstate_size,
14200 __alignof__(union thread_xstate),
14201 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14202 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14203 +}
14204 +
14205 +struct task_struct *alloc_task_struct_node(int node)
14206 +{
14207 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14208 +}
14209 +
14210 +void free_task_struct(struct task_struct *task)
14211 +{
14212 + free_thread_xstate(task);
14213 + kmem_cache_free(task_struct_cachep, task);
14214 }
14215
14216 /*
14217 @@ -70,7 +87,7 @@ void exit_thread(void)
14218 unsigned long *bp = t->io_bitmap_ptr;
14219
14220 if (bp) {
14221 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14222 + struct tss_struct *tss = init_tss + get_cpu();
14223
14224 t->io_bitmap_ptr = NULL;
14225 clear_thread_flag(TIF_IO_BITMAP);
14226 @@ -106,7 +123,7 @@ void show_regs_common(void)
14227
14228 printk(KERN_CONT "\n");
14229 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14230 - current->pid, current->comm, print_tainted(),
14231 + task_pid_nr(current), current->comm, print_tainted(),
14232 init_utsname()->release,
14233 (int)strcspn(init_utsname()->version, " "),
14234 init_utsname()->version);
14235 @@ -120,6 +137,9 @@ void flush_thread(void)
14236 {
14237 struct task_struct *tsk = current;
14238
14239 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14240 + loadsegment(gs, 0);
14241 +#endif
14242 flush_ptrace_hw_breakpoint(tsk);
14243 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14244 /*
14245 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14246 regs.di = (unsigned long) arg;
14247
14248 #ifdef CONFIG_X86_32
14249 - regs.ds = __USER_DS;
14250 - regs.es = __USER_DS;
14251 + regs.ds = __KERNEL_DS;
14252 + regs.es = __KERNEL_DS;
14253 regs.fs = __KERNEL_PERCPU;
14254 - regs.gs = __KERNEL_STACK_CANARY;
14255 + savesegment(gs, regs.gs);
14256 #else
14257 regs.ss = __KERNEL_DS;
14258 #endif
14259 @@ -403,7 +423,7 @@ void default_idle(void)
14260 EXPORT_SYMBOL(default_idle);
14261 #endif
14262
14263 -void stop_this_cpu(void *dummy)
14264 +__noreturn void stop_this_cpu(void *dummy)
14265 {
14266 local_irq_disable();
14267 /*
14268 @@ -668,16 +688,34 @@ static int __init idle_setup(char *str)
14269 }
14270 early_param("idle", idle_setup);
14271
14272 -unsigned long arch_align_stack(unsigned long sp)
14273 +#ifdef CONFIG_PAX_RANDKSTACK
14274 +asmlinkage void pax_randomize_kstack(void)
14275 {
14276 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14277 - sp -= get_random_int() % 8192;
14278 - return sp & ~0xf;
14279 -}
14280 + struct thread_struct *thread = &current->thread;
14281 + unsigned long time;
14282
14283 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14284 -{
14285 - unsigned long range_end = mm->brk + 0x02000000;
14286 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14287 -}
14288 + if (!randomize_va_space)
14289 + return;
14290 +
14291 + rdtscl(time);
14292 +
14293 + /* P4 seems to return a 0 LSB, ignore it */
14294 +#ifdef CONFIG_MPENTIUM4
14295 + time &= 0x3EUL;
14296 + time <<= 2;
14297 +#elif defined(CONFIG_X86_64)
14298 + time &= 0xFUL;
14299 + time <<= 4;
14300 +#else
14301 + time &= 0x1FUL;
14302 + time <<= 3;
14303 +#endif
14304 +
14305 + thread->sp0 ^= time;
14306 + load_sp0(init_tss + smp_processor_id(), thread);
14307
14308 +#ifdef CONFIG_X86_64
14309 + percpu_write(kernel_stack, thread->sp0);
14310 +#endif
14311 +}
14312 +#endif
14313 diff -urNp linux-3.0.4/arch/x86/kernel/ptrace.c linux-3.0.4/arch/x86/kernel/ptrace.c
14314 --- linux-3.0.4/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14315 +++ linux-3.0.4/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14316 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14317 unsigned long addr, unsigned long data)
14318 {
14319 int ret;
14320 - unsigned long __user *datap = (unsigned long __user *)data;
14321 + unsigned long __user *datap = (__force unsigned long __user *)data;
14322
14323 switch (request) {
14324 /* read the word at location addr in the USER area. */
14325 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14326 if ((int) addr < 0)
14327 return -EIO;
14328 ret = do_get_thread_area(child, addr,
14329 - (struct user_desc __user *)data);
14330 + (__force struct user_desc __user *) data);
14331 break;
14332
14333 case PTRACE_SET_THREAD_AREA:
14334 if ((int) addr < 0)
14335 return -EIO;
14336 ret = do_set_thread_area(child, addr,
14337 - (struct user_desc __user *)data, 0);
14338 + (__force struct user_desc __user *) data, 0);
14339 break;
14340 #endif
14341
14342 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14343 memset(info, 0, sizeof(*info));
14344 info->si_signo = SIGTRAP;
14345 info->si_code = si_code;
14346 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14347 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14348 }
14349
14350 void user_single_step_siginfo(struct task_struct *tsk,
14351 diff -urNp linux-3.0.4/arch/x86/kernel/pvclock.c linux-3.0.4/arch/x86/kernel/pvclock.c
14352 --- linux-3.0.4/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14353 +++ linux-3.0.4/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14354 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14355 return pv_tsc_khz;
14356 }
14357
14358 -static atomic64_t last_value = ATOMIC64_INIT(0);
14359 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14360
14361 void pvclock_resume(void)
14362 {
14363 - atomic64_set(&last_value, 0);
14364 + atomic64_set_unchecked(&last_value, 0);
14365 }
14366
14367 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14368 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14369 * updating at the same time, and one of them could be slightly behind,
14370 * making the assumption that last_value always go forward fail to hold.
14371 */
14372 - last = atomic64_read(&last_value);
14373 + last = atomic64_read_unchecked(&last_value);
14374 do {
14375 if (ret < last)
14376 return last;
14377 - last = atomic64_cmpxchg(&last_value, last, ret);
14378 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14379 } while (unlikely(last != ret));
14380
14381 return ret;
14382 diff -urNp linux-3.0.4/arch/x86/kernel/reboot.c linux-3.0.4/arch/x86/kernel/reboot.c
14383 --- linux-3.0.4/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14384 +++ linux-3.0.4/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14385 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14386 EXPORT_SYMBOL(pm_power_off);
14387
14388 static const struct desc_ptr no_idt = {};
14389 -static int reboot_mode;
14390 +static unsigned short reboot_mode;
14391 enum reboot_type reboot_type = BOOT_ACPI;
14392 int reboot_force;
14393
14394 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
14395 extern const unsigned char machine_real_restart_asm[];
14396 extern const u64 machine_real_restart_gdt[3];
14397
14398 -void machine_real_restart(unsigned int type)
14399 +__noreturn void machine_real_restart(unsigned int type)
14400 {
14401 void *restart_va;
14402 unsigned long restart_pa;
14403 - void (*restart_lowmem)(unsigned int);
14404 + void (* __noreturn restart_lowmem)(unsigned int);
14405 u64 *lowmem_gdt;
14406
14407 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14408 + struct desc_struct *gdt;
14409 +#endif
14410 +
14411 local_irq_disable();
14412
14413 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14414 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14415 boot)". This seems like a fairly standard thing that gets set by
14416 REBOOT.COM programs, and the previous reset routine did this
14417 too. */
14418 - *((unsigned short *)0x472) = reboot_mode;
14419 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14420
14421 /* Patch the GDT in the low memory trampoline */
14422 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14423
14424 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14425 restart_pa = virt_to_phys(restart_va);
14426 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14427 + restart_lowmem = (void *)restart_pa;
14428
14429 /* GDT[0]: GDT self-pointer */
14430 lowmem_gdt[0] =
14431 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14432 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14433
14434 /* Jump to the identity-mapped low memory code */
14435 +
14436 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14437 + gdt = get_cpu_gdt_table(smp_processor_id());
14438 + pax_open_kernel();
14439 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14440 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14441 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14442 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14443 +#endif
14444 +#ifdef CONFIG_PAX_KERNEXEC
14445 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14446 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14447 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14448 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14449 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14450 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14451 +#endif
14452 + pax_close_kernel();
14453 +#endif
14454 +
14455 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14456 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14457 + unreachable();
14458 +#else
14459 restart_lowmem(type);
14460 +#endif
14461 +
14462 }
14463 #ifdef CONFIG_APM_MODULE
14464 EXPORT_SYMBOL(machine_real_restart);
14465 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14466 * try to force a triple fault and then cycle between hitting the keyboard
14467 * controller and doing that
14468 */
14469 -static void native_machine_emergency_restart(void)
14470 +__noreturn static void native_machine_emergency_restart(void)
14471 {
14472 int i;
14473 int attempt = 0;
14474 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14475 #endif
14476 }
14477
14478 -static void __machine_emergency_restart(int emergency)
14479 +static __noreturn void __machine_emergency_restart(int emergency)
14480 {
14481 reboot_emergency = emergency;
14482 machine_ops.emergency_restart();
14483 }
14484
14485 -static void native_machine_restart(char *__unused)
14486 +static __noreturn void native_machine_restart(char *__unused)
14487 {
14488 printk("machine restart\n");
14489
14490 @@ -662,7 +692,7 @@ static void native_machine_restart(char
14491 __machine_emergency_restart(0);
14492 }
14493
14494 -static void native_machine_halt(void)
14495 +static __noreturn void native_machine_halt(void)
14496 {
14497 /* stop other cpus and apics */
14498 machine_shutdown();
14499 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
14500 stop_this_cpu(NULL);
14501 }
14502
14503 -static void native_machine_power_off(void)
14504 +__noreturn static void native_machine_power_off(void)
14505 {
14506 if (pm_power_off) {
14507 if (!reboot_force)
14508 @@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14509 }
14510 /* a fallback in case there is no PM info available */
14511 tboot_shutdown(TB_SHUTDOWN_HALT);
14512 + unreachable();
14513 }
14514
14515 struct machine_ops machine_ops = {
14516 diff -urNp linux-3.0.4/arch/x86/kernel/setup.c linux-3.0.4/arch/x86/kernel/setup.c
14517 --- linux-3.0.4/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14518 +++ linux-3.0.4/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14519 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14520 * area (640->1Mb) as ram even though it is not.
14521 * take them out.
14522 */
14523 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14524 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14525 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14526 }
14527
14528 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14529
14530 if (!boot_params.hdr.root_flags)
14531 root_mountflags &= ~MS_RDONLY;
14532 - init_mm.start_code = (unsigned long) _text;
14533 - init_mm.end_code = (unsigned long) _etext;
14534 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14535 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14536 init_mm.end_data = (unsigned long) _edata;
14537 init_mm.brk = _brk_end;
14538
14539 - code_resource.start = virt_to_phys(_text);
14540 - code_resource.end = virt_to_phys(_etext)-1;
14541 - data_resource.start = virt_to_phys(_etext);
14542 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14543 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14544 + data_resource.start = virt_to_phys(_sdata);
14545 data_resource.end = virt_to_phys(_edata)-1;
14546 bss_resource.start = virt_to_phys(&__bss_start);
14547 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14548 diff -urNp linux-3.0.4/arch/x86/kernel/setup_percpu.c linux-3.0.4/arch/x86/kernel/setup_percpu.c
14549 --- linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14550 +++ linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14551 @@ -21,19 +21,17 @@
14552 #include <asm/cpu.h>
14553 #include <asm/stackprotector.h>
14554
14555 -DEFINE_PER_CPU(int, cpu_number);
14556 +#ifdef CONFIG_SMP
14557 +DEFINE_PER_CPU(unsigned int, cpu_number);
14558 EXPORT_PER_CPU_SYMBOL(cpu_number);
14559 +#endif
14560
14561 -#ifdef CONFIG_X86_64
14562 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14563 -#else
14564 -#define BOOT_PERCPU_OFFSET 0
14565 -#endif
14566
14567 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14568 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14569
14570 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14571 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14572 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14573 };
14574 EXPORT_SYMBOL(__per_cpu_offset);
14575 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14576 {
14577 #ifdef CONFIG_X86_32
14578 struct desc_struct gdt;
14579 + unsigned long base = per_cpu_offset(cpu);
14580
14581 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14582 - 0x2 | DESCTYPE_S, 0x8);
14583 - gdt.s = 1;
14584 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14585 + 0x83 | DESCTYPE_S, 0xC);
14586 write_gdt_entry(get_cpu_gdt_table(cpu),
14587 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14588 #endif
14589 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14590 /* alrighty, percpu areas up and running */
14591 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14592 for_each_possible_cpu(cpu) {
14593 +#ifdef CONFIG_CC_STACKPROTECTOR
14594 +#ifdef CONFIG_X86_32
14595 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14596 +#endif
14597 +#endif
14598 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14599 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14600 per_cpu(cpu_number, cpu) = cpu;
14601 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14602 */
14603 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14604 #endif
14605 +#ifdef CONFIG_CC_STACKPROTECTOR
14606 +#ifdef CONFIG_X86_32
14607 + if (!cpu)
14608 + per_cpu(stack_canary.canary, cpu) = canary;
14609 +#endif
14610 +#endif
14611 /*
14612 * Up to this point, the boot CPU has been using .init.data
14613 * area. Reload any changed state for the boot CPU.
14614 diff -urNp linux-3.0.4/arch/x86/kernel/signal.c linux-3.0.4/arch/x86/kernel/signal.c
14615 --- linux-3.0.4/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14616 +++ linux-3.0.4/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14617 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14618 * Align the stack pointer according to the i386 ABI,
14619 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14620 */
14621 - sp = ((sp + 4) & -16ul) - 4;
14622 + sp = ((sp - 12) & -16ul) - 4;
14623 #else /* !CONFIG_X86_32 */
14624 sp = round_down(sp, 16) - 8;
14625 #endif
14626 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14627 * Return an always-bogus address instead so we will die with SIGSEGV.
14628 */
14629 if (onsigstack && !likely(on_sig_stack(sp)))
14630 - return (void __user *)-1L;
14631 + return (__force void __user *)-1L;
14632
14633 /* save i387 state */
14634 if (used_math() && save_i387_xstate(*fpstate) < 0)
14635 - return (void __user *)-1L;
14636 + return (__force void __user *)-1L;
14637
14638 return (void __user *)sp;
14639 }
14640 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14641 }
14642
14643 if (current->mm->context.vdso)
14644 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14645 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14646 else
14647 - restorer = &frame->retcode;
14648 + restorer = (void __user *)&frame->retcode;
14649 if (ka->sa.sa_flags & SA_RESTORER)
14650 restorer = ka->sa.sa_restorer;
14651
14652 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14653 * reasons and because gdb uses it as a signature to notice
14654 * signal handler stack frames.
14655 */
14656 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14657 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14658
14659 if (err)
14660 return -EFAULT;
14661 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14662 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14663
14664 /* Set up to return from userspace. */
14665 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14666 + if (current->mm->context.vdso)
14667 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14668 + else
14669 + restorer = (void __user *)&frame->retcode;
14670 if (ka->sa.sa_flags & SA_RESTORER)
14671 restorer = ka->sa.sa_restorer;
14672 put_user_ex(restorer, &frame->pretcode);
14673 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14674 * reasons and because gdb uses it as a signature to notice
14675 * signal handler stack frames.
14676 */
14677 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14678 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14679 } put_user_catch(err);
14680
14681 if (err)
14682 @@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14683 int signr;
14684 sigset_t *oldset;
14685
14686 + pax_track_stack();
14687 +
14688 /*
14689 * We want the common case to go fast, which is why we may in certain
14690 * cases get here from kernel mode. Just return without doing anything
14691 @@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14692 * X86_32: vm86 regs switched out by assembly code before reaching
14693 * here, so testing against kernel CS suffices.
14694 */
14695 - if (!user_mode(regs))
14696 + if (!user_mode_novm(regs))
14697 return;
14698
14699 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14700 diff -urNp linux-3.0.4/arch/x86/kernel/smpboot.c linux-3.0.4/arch/x86/kernel/smpboot.c
14701 --- linux-3.0.4/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14702 +++ linux-3.0.4/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14703 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14704 set_idle_for_cpu(cpu, c_idle.idle);
14705 do_rest:
14706 per_cpu(current_task, cpu) = c_idle.idle;
14707 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14708 #ifdef CONFIG_X86_32
14709 /* Stack for startup_32 can be just as for start_secondary onwards */
14710 irq_ctx_init(cpu);
14711 #else
14712 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14713 initial_gs = per_cpu_offset(cpu);
14714 - per_cpu(kernel_stack, cpu) =
14715 - (unsigned long)task_stack_page(c_idle.idle) -
14716 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14717 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14718 #endif
14719 +
14720 + pax_open_kernel();
14721 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14722 + pax_close_kernel();
14723 +
14724 initial_code = (unsigned long)start_secondary;
14725 stack_start = c_idle.idle->thread.sp;
14726
14727 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14728
14729 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14730
14731 +#ifdef CONFIG_PAX_PER_CPU_PGD
14732 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14733 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14734 + KERNEL_PGD_PTRS);
14735 +#endif
14736 +
14737 err = do_boot_cpu(apicid, cpu);
14738 if (err) {
14739 pr_debug("do_boot_cpu failed %d\n", err);
14740 diff -urNp linux-3.0.4/arch/x86/kernel/step.c linux-3.0.4/arch/x86/kernel/step.c
14741 --- linux-3.0.4/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14742 +++ linux-3.0.4/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14743 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14744 struct desc_struct *desc;
14745 unsigned long base;
14746
14747 - seg &= ~7UL;
14748 + seg >>= 3;
14749
14750 mutex_lock(&child->mm->context.lock);
14751 - if (unlikely((seg >> 3) >= child->mm->context.size))
14752 + if (unlikely(seg >= child->mm->context.size))
14753 addr = -1L; /* bogus selector, access would fault */
14754 else {
14755 desc = child->mm->context.ldt + seg;
14756 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14757 addr += base;
14758 }
14759 mutex_unlock(&child->mm->context.lock);
14760 - }
14761 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14762 + addr = ktla_ktva(addr);
14763
14764 return addr;
14765 }
14766 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14767 unsigned char opcode[15];
14768 unsigned long addr = convert_ip_to_linear(child, regs);
14769
14770 + if (addr == -EINVAL)
14771 + return 0;
14772 +
14773 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14774 for (i = 0; i < copied; i++) {
14775 switch (opcode[i]) {
14776 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14777
14778 #ifdef CONFIG_X86_64
14779 case 0x40 ... 0x4f:
14780 - if (regs->cs != __USER_CS)
14781 + if ((regs->cs & 0xffff) != __USER_CS)
14782 /* 32-bit mode: register increment */
14783 return 0;
14784 /* 64-bit mode: REX prefix */
14785 diff -urNp linux-3.0.4/arch/x86/kernel/syscall_table_32.S linux-3.0.4/arch/x86/kernel/syscall_table_32.S
14786 --- linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14787 +++ linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14788 @@ -1,3 +1,4 @@
14789 +.section .rodata,"a",@progbits
14790 ENTRY(sys_call_table)
14791 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14792 .long sys_exit
14793 diff -urNp linux-3.0.4/arch/x86/kernel/sys_i386_32.c linux-3.0.4/arch/x86/kernel/sys_i386_32.c
14794 --- linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14795 +++ linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14796 @@ -24,17 +24,224 @@
14797
14798 #include <asm/syscalls.h>
14799
14800 -/*
14801 - * Do a system call from kernel instead of calling sys_execve so we
14802 - * end up with proper pt_regs.
14803 - */
14804 -int kernel_execve(const char *filename,
14805 - const char *const argv[],
14806 - const char *const envp[])
14807 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14808 {
14809 - long __res;
14810 - asm volatile ("int $0x80"
14811 - : "=a" (__res)
14812 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14813 - return __res;
14814 + unsigned long pax_task_size = TASK_SIZE;
14815 +
14816 +#ifdef CONFIG_PAX_SEGMEXEC
14817 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14818 + pax_task_size = SEGMEXEC_TASK_SIZE;
14819 +#endif
14820 +
14821 + if (len > pax_task_size || addr > pax_task_size - len)
14822 + return -EINVAL;
14823 +
14824 + return 0;
14825 +}
14826 +
14827 +unsigned long
14828 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14829 + unsigned long len, unsigned long pgoff, unsigned long flags)
14830 +{
14831 + struct mm_struct *mm = current->mm;
14832 + struct vm_area_struct *vma;
14833 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14834 +
14835 +#ifdef CONFIG_PAX_SEGMEXEC
14836 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14837 + pax_task_size = SEGMEXEC_TASK_SIZE;
14838 +#endif
14839 +
14840 + pax_task_size -= PAGE_SIZE;
14841 +
14842 + if (len > pax_task_size)
14843 + return -ENOMEM;
14844 +
14845 + if (flags & MAP_FIXED)
14846 + return addr;
14847 +
14848 +#ifdef CONFIG_PAX_RANDMMAP
14849 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14850 +#endif
14851 +
14852 + if (addr) {
14853 + addr = PAGE_ALIGN(addr);
14854 + if (pax_task_size - len >= addr) {
14855 + vma = find_vma(mm, addr);
14856 + if (check_heap_stack_gap(vma, addr, len))
14857 + return addr;
14858 + }
14859 + }
14860 + if (len > mm->cached_hole_size) {
14861 + start_addr = addr = mm->free_area_cache;
14862 + } else {
14863 + start_addr = addr = mm->mmap_base;
14864 + mm->cached_hole_size = 0;
14865 + }
14866 +
14867 +#ifdef CONFIG_PAX_PAGEEXEC
14868 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14869 + start_addr = 0x00110000UL;
14870 +
14871 +#ifdef CONFIG_PAX_RANDMMAP
14872 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14873 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14874 +#endif
14875 +
14876 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14877 + start_addr = addr = mm->mmap_base;
14878 + else
14879 + addr = start_addr;
14880 + }
14881 +#endif
14882 +
14883 +full_search:
14884 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14885 + /* At this point: (!vma || addr < vma->vm_end). */
14886 + if (pax_task_size - len < addr) {
14887 + /*
14888 + * Start a new search - just in case we missed
14889 + * some holes.
14890 + */
14891 + if (start_addr != mm->mmap_base) {
14892 + start_addr = addr = mm->mmap_base;
14893 + mm->cached_hole_size = 0;
14894 + goto full_search;
14895 + }
14896 + return -ENOMEM;
14897 + }
14898 + if (check_heap_stack_gap(vma, addr, len))
14899 + break;
14900 + if (addr + mm->cached_hole_size < vma->vm_start)
14901 + mm->cached_hole_size = vma->vm_start - addr;
14902 + addr = vma->vm_end;
14903 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14904 + start_addr = addr = mm->mmap_base;
14905 + mm->cached_hole_size = 0;
14906 + goto full_search;
14907 + }
14908 + }
14909 +
14910 + /*
14911 + * Remember the place where we stopped the search:
14912 + */
14913 + mm->free_area_cache = addr + len;
14914 + return addr;
14915 +}
14916 +
14917 +unsigned long
14918 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14919 + const unsigned long len, const unsigned long pgoff,
14920 + const unsigned long flags)
14921 +{
14922 + struct vm_area_struct *vma;
14923 + struct mm_struct *mm = current->mm;
14924 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14925 +
14926 +#ifdef CONFIG_PAX_SEGMEXEC
14927 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14928 + pax_task_size = SEGMEXEC_TASK_SIZE;
14929 +#endif
14930 +
14931 + pax_task_size -= PAGE_SIZE;
14932 +
14933 + /* requested length too big for entire address space */
14934 + if (len > pax_task_size)
14935 + return -ENOMEM;
14936 +
14937 + if (flags & MAP_FIXED)
14938 + return addr;
14939 +
14940 +#ifdef CONFIG_PAX_PAGEEXEC
14941 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14942 + goto bottomup;
14943 +#endif
14944 +
14945 +#ifdef CONFIG_PAX_RANDMMAP
14946 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14947 +#endif
14948 +
14949 + /* requesting a specific address */
14950 + if (addr) {
14951 + addr = PAGE_ALIGN(addr);
14952 + if (pax_task_size - len >= addr) {
14953 + vma = find_vma(mm, addr);
14954 + if (check_heap_stack_gap(vma, addr, len))
14955 + return addr;
14956 + }
14957 + }
14958 +
14959 + /* check if free_area_cache is useful for us */
14960 + if (len <= mm->cached_hole_size) {
14961 + mm->cached_hole_size = 0;
14962 + mm->free_area_cache = mm->mmap_base;
14963 + }
14964 +
14965 + /* either no address requested or can't fit in requested address hole */
14966 + addr = mm->free_area_cache;
14967 +
14968 + /* make sure it can fit in the remaining address space */
14969 + if (addr > len) {
14970 + vma = find_vma(mm, addr-len);
14971 + if (check_heap_stack_gap(vma, addr - len, len))
14972 + /* remember the address as a hint for next time */
14973 + return (mm->free_area_cache = addr-len);
14974 + }
14975 +
14976 + if (mm->mmap_base < len)
14977 + goto bottomup;
14978 +
14979 + addr = mm->mmap_base-len;
14980 +
14981 + do {
14982 + /*
14983 + * Lookup failure means no vma is above this address,
14984 + * else if new region fits below vma->vm_start,
14985 + * return with success:
14986 + */
14987 + vma = find_vma(mm, addr);
14988 + if (check_heap_stack_gap(vma, addr, len))
14989 + /* remember the address as a hint for next time */
14990 + return (mm->free_area_cache = addr);
14991 +
14992 + /* remember the largest hole we saw so far */
14993 + if (addr + mm->cached_hole_size < vma->vm_start)
14994 + mm->cached_hole_size = vma->vm_start - addr;
14995 +
14996 + /* try just below the current vma->vm_start */
14997 + addr = skip_heap_stack_gap(vma, len);
14998 + } while (!IS_ERR_VALUE(addr));
14999 +
15000 +bottomup:
15001 + /*
15002 + * A failed mmap() very likely causes application failure,
15003 + * so fall back to the bottom-up function here. This scenario
15004 + * can happen with large stack limits and large mmap()
15005 + * allocations.
15006 + */
15007 +
15008 +#ifdef CONFIG_PAX_SEGMEXEC
15009 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15010 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15011 + else
15012 +#endif
15013 +
15014 + mm->mmap_base = TASK_UNMAPPED_BASE;
15015 +
15016 +#ifdef CONFIG_PAX_RANDMMAP
15017 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15018 + mm->mmap_base += mm->delta_mmap;
15019 +#endif
15020 +
15021 + mm->free_area_cache = mm->mmap_base;
15022 + mm->cached_hole_size = ~0UL;
15023 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15024 + /*
15025 + * Restore the topdown base:
15026 + */
15027 + mm->mmap_base = base;
15028 + mm->free_area_cache = base;
15029 + mm->cached_hole_size = ~0UL;
15030 +
15031 + return addr;
15032 }
15033 diff -urNp linux-3.0.4/arch/x86/kernel/sys_x86_64.c linux-3.0.4/arch/x86/kernel/sys_x86_64.c
15034 --- linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15035 +++ linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15036 @@ -32,8 +32,8 @@ out:
15037 return error;
15038 }
15039
15040 -static void find_start_end(unsigned long flags, unsigned long *begin,
15041 - unsigned long *end)
15042 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15043 + unsigned long *begin, unsigned long *end)
15044 {
15045 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15046 unsigned long new_begin;
15047 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15048 *begin = new_begin;
15049 }
15050 } else {
15051 - *begin = TASK_UNMAPPED_BASE;
15052 + *begin = mm->mmap_base;
15053 *end = TASK_SIZE;
15054 }
15055 }
15056 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15057 if (flags & MAP_FIXED)
15058 return addr;
15059
15060 - find_start_end(flags, &begin, &end);
15061 + find_start_end(mm, flags, &begin, &end);
15062
15063 if (len > end)
15064 return -ENOMEM;
15065
15066 +#ifdef CONFIG_PAX_RANDMMAP
15067 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15068 +#endif
15069 +
15070 if (addr) {
15071 addr = PAGE_ALIGN(addr);
15072 vma = find_vma(mm, addr);
15073 - if (end - len >= addr &&
15074 - (!vma || addr + len <= vma->vm_start))
15075 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15076 return addr;
15077 }
15078 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15079 @@ -106,7 +109,7 @@ full_search:
15080 }
15081 return -ENOMEM;
15082 }
15083 - if (!vma || addr + len <= vma->vm_start) {
15084 + if (check_heap_stack_gap(vma, addr, len)) {
15085 /*
15086 * Remember the place where we stopped the search:
15087 */
15088 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15089 {
15090 struct vm_area_struct *vma;
15091 struct mm_struct *mm = current->mm;
15092 - unsigned long addr = addr0;
15093 + unsigned long base = mm->mmap_base, addr = addr0;
15094
15095 /* requested length too big for entire address space */
15096 if (len > TASK_SIZE)
15097 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15098 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15099 goto bottomup;
15100
15101 +#ifdef CONFIG_PAX_RANDMMAP
15102 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15103 +#endif
15104 +
15105 /* requesting a specific address */
15106 if (addr) {
15107 addr = PAGE_ALIGN(addr);
15108 - vma = find_vma(mm, addr);
15109 - if (TASK_SIZE - len >= addr &&
15110 - (!vma || addr + len <= vma->vm_start))
15111 - return addr;
15112 + if (TASK_SIZE - len >= addr) {
15113 + vma = find_vma(mm, addr);
15114 + if (check_heap_stack_gap(vma, addr, len))
15115 + return addr;
15116 + }
15117 }
15118
15119 /* check if free_area_cache is useful for us */
15120 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15121 /* make sure it can fit in the remaining address space */
15122 if (addr > len) {
15123 vma = find_vma(mm, addr-len);
15124 - if (!vma || addr <= vma->vm_start)
15125 + if (check_heap_stack_gap(vma, addr - len, len))
15126 /* remember the address as a hint for next time */
15127 return mm->free_area_cache = addr-len;
15128 }
15129 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15130 * return with success:
15131 */
15132 vma = find_vma(mm, addr);
15133 - if (!vma || addr+len <= vma->vm_start)
15134 + if (check_heap_stack_gap(vma, addr, len))
15135 /* remember the address as a hint for next time */
15136 return mm->free_area_cache = addr;
15137
15138 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15139 mm->cached_hole_size = vma->vm_start - addr;
15140
15141 /* try just below the current vma->vm_start */
15142 - addr = vma->vm_start-len;
15143 - } while (len < vma->vm_start);
15144 + addr = skip_heap_stack_gap(vma, len);
15145 + } while (!IS_ERR_VALUE(addr));
15146
15147 bottomup:
15148 /*
15149 @@ -198,13 +206,21 @@ bottomup:
15150 * can happen with large stack limits and large mmap()
15151 * allocations.
15152 */
15153 + mm->mmap_base = TASK_UNMAPPED_BASE;
15154 +
15155 +#ifdef CONFIG_PAX_RANDMMAP
15156 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15157 + mm->mmap_base += mm->delta_mmap;
15158 +#endif
15159 +
15160 + mm->free_area_cache = mm->mmap_base;
15161 mm->cached_hole_size = ~0UL;
15162 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15163 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15164 /*
15165 * Restore the topdown base:
15166 */
15167 - mm->free_area_cache = mm->mmap_base;
15168 + mm->mmap_base = base;
15169 + mm->free_area_cache = base;
15170 mm->cached_hole_size = ~0UL;
15171
15172 return addr;
15173 diff -urNp linux-3.0.4/arch/x86/kernel/tboot.c linux-3.0.4/arch/x86/kernel/tboot.c
15174 --- linux-3.0.4/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15175 +++ linux-3.0.4/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15176 @@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15177
15178 void tboot_shutdown(u32 shutdown_type)
15179 {
15180 - void (*shutdown)(void);
15181 + void (* __noreturn shutdown)(void);
15182
15183 if (!tboot_enabled())
15184 return;
15185 @@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15186
15187 switch_to_tboot_pt();
15188
15189 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15190 + shutdown = (void *)tboot->shutdown_entry;
15191 shutdown();
15192
15193 /* should not reach here */
15194 @@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15195 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15196 }
15197
15198 -static atomic_t ap_wfs_count;
15199 +static atomic_unchecked_t ap_wfs_count;
15200
15201 static int tboot_wait_for_aps(int num_aps)
15202 {
15203 @@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15204 {
15205 switch (action) {
15206 case CPU_DYING:
15207 - atomic_inc(&ap_wfs_count);
15208 + atomic_inc_unchecked(&ap_wfs_count);
15209 if (num_online_cpus() == 1)
15210 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15211 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15212 return NOTIFY_BAD;
15213 break;
15214 }
15215 @@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15216
15217 tboot_create_trampoline();
15218
15219 - atomic_set(&ap_wfs_count, 0);
15220 + atomic_set_unchecked(&ap_wfs_count, 0);
15221 register_hotcpu_notifier(&tboot_cpu_notifier);
15222 return 0;
15223 }
15224 diff -urNp linux-3.0.4/arch/x86/kernel/time.c linux-3.0.4/arch/x86/kernel/time.c
15225 --- linux-3.0.4/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15226 +++ linux-3.0.4/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15227 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15228 {
15229 unsigned long pc = instruction_pointer(regs);
15230
15231 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15232 + if (!user_mode(regs) && in_lock_functions(pc)) {
15233 #ifdef CONFIG_FRAME_POINTER
15234 - return *(unsigned long *)(regs->bp + sizeof(long));
15235 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15236 #else
15237 unsigned long *sp =
15238 (unsigned long *)kernel_stack_pointer(regs);
15239 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15240 * or above a saved flags. Eflags has bits 22-31 zero,
15241 * kernel addresses don't.
15242 */
15243 +
15244 +#ifdef CONFIG_PAX_KERNEXEC
15245 + return ktla_ktva(sp[0]);
15246 +#else
15247 if (sp[0] >> 22)
15248 return sp[0];
15249 if (sp[1] >> 22)
15250 return sp[1];
15251 #endif
15252 +
15253 +#endif
15254 }
15255 return pc;
15256 }
15257 diff -urNp linux-3.0.4/arch/x86/kernel/tls.c linux-3.0.4/arch/x86/kernel/tls.c
15258 --- linux-3.0.4/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15259 +++ linux-3.0.4/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15260 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15261 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15262 return -EINVAL;
15263
15264 +#ifdef CONFIG_PAX_SEGMEXEC
15265 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15266 + return -EINVAL;
15267 +#endif
15268 +
15269 set_tls_desc(p, idx, &info, 1);
15270
15271 return 0;
15272 diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_32.S linux-3.0.4/arch/x86/kernel/trampoline_32.S
15273 --- linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15274 +++ linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15275 @@ -32,6 +32,12 @@
15276 #include <asm/segment.h>
15277 #include <asm/page_types.h>
15278
15279 +#ifdef CONFIG_PAX_KERNEXEC
15280 +#define ta(X) (X)
15281 +#else
15282 +#define ta(X) ((X) - __PAGE_OFFSET)
15283 +#endif
15284 +
15285 #ifdef CONFIG_SMP
15286
15287 .section ".x86_trampoline","a"
15288 @@ -62,7 +68,7 @@ r_base = .
15289 inc %ax # protected mode (PE) bit
15290 lmsw %ax # into protected mode
15291 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15292 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15293 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15294
15295 # These need to be in the same 64K segment as the above;
15296 # hence we don't use the boot_gdt_descr defined in head.S
15297 diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_64.S linux-3.0.4/arch/x86/kernel/trampoline_64.S
15298 --- linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15299 +++ linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15300 @@ -90,7 +90,7 @@ startup_32:
15301 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15302 movl %eax, %ds
15303
15304 - movl $X86_CR4_PAE, %eax
15305 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15306 movl %eax, %cr4 # Enable PAE mode
15307
15308 # Setup trampoline 4 level pagetables
15309 @@ -138,7 +138,7 @@ tidt:
15310 # so the kernel can live anywhere
15311 .balign 4
15312 tgdt:
15313 - .short tgdt_end - tgdt # gdt limit
15314 + .short tgdt_end - tgdt - 1 # gdt limit
15315 .long tgdt - r_base
15316 .short 0
15317 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15318 diff -urNp linux-3.0.4/arch/x86/kernel/traps.c linux-3.0.4/arch/x86/kernel/traps.c
15319 --- linux-3.0.4/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15320 +++ linux-3.0.4/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15321 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15322
15323 /* Do we ignore FPU interrupts ? */
15324 char ignore_fpu_irq;
15325 -
15326 -/*
15327 - * The IDT has to be page-aligned to simplify the Pentium
15328 - * F0 0F bug workaround.
15329 - */
15330 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15331 #endif
15332
15333 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15334 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15335 }
15336
15337 static void __kprobes
15338 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15339 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15340 long error_code, siginfo_t *info)
15341 {
15342 struct task_struct *tsk = current;
15343
15344 #ifdef CONFIG_X86_32
15345 - if (regs->flags & X86_VM_MASK) {
15346 + if (v8086_mode(regs)) {
15347 /*
15348 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15349 * On nmi (interrupt 2), do_trap should not be called.
15350 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15351 }
15352 #endif
15353
15354 - if (!user_mode(regs))
15355 + if (!user_mode_novm(regs))
15356 goto kernel_trap;
15357
15358 #ifdef CONFIG_X86_32
15359 @@ -157,7 +151,7 @@ trap_signal:
15360 printk_ratelimit()) {
15361 printk(KERN_INFO
15362 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15363 - tsk->comm, tsk->pid, str,
15364 + tsk->comm, task_pid_nr(tsk), str,
15365 regs->ip, regs->sp, error_code);
15366 print_vma_addr(" in ", regs->ip);
15367 printk("\n");
15368 @@ -174,8 +168,20 @@ kernel_trap:
15369 if (!fixup_exception(regs)) {
15370 tsk->thread.error_code = error_code;
15371 tsk->thread.trap_no = trapnr;
15372 +
15373 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15374 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15375 + str = "PAX: suspicious stack segment fault";
15376 +#endif
15377 +
15378 die(str, regs, error_code);
15379 }
15380 +
15381 +#ifdef CONFIG_PAX_REFCOUNT
15382 + if (trapnr == 4)
15383 + pax_report_refcount_overflow(regs);
15384 +#endif
15385 +
15386 return;
15387
15388 #ifdef CONFIG_X86_32
15389 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15390 conditional_sti(regs);
15391
15392 #ifdef CONFIG_X86_32
15393 - if (regs->flags & X86_VM_MASK)
15394 + if (v8086_mode(regs))
15395 goto gp_in_vm86;
15396 #endif
15397
15398 tsk = current;
15399 - if (!user_mode(regs))
15400 + if (!user_mode_novm(regs))
15401 goto gp_in_kernel;
15402
15403 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15404 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15405 + struct mm_struct *mm = tsk->mm;
15406 + unsigned long limit;
15407 +
15408 + down_write(&mm->mmap_sem);
15409 + limit = mm->context.user_cs_limit;
15410 + if (limit < TASK_SIZE) {
15411 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15412 + up_write(&mm->mmap_sem);
15413 + return;
15414 + }
15415 + up_write(&mm->mmap_sem);
15416 + }
15417 +#endif
15418 +
15419 tsk->thread.error_code = error_code;
15420 tsk->thread.trap_no = 13;
15421
15422 @@ -304,6 +326,13 @@ gp_in_kernel:
15423 if (notify_die(DIE_GPF, "general protection fault", regs,
15424 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15425 return;
15426 +
15427 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15428 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15429 + die("PAX: suspicious general protection fault", regs, error_code);
15430 + else
15431 +#endif
15432 +
15433 die("general protection fault", regs, error_code);
15434 }
15435
15436 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15437 dotraplinkage notrace __kprobes void
15438 do_nmi(struct pt_regs *regs, long error_code)
15439 {
15440 +
15441 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15442 + if (!user_mode(regs)) {
15443 + unsigned long cs = regs->cs & 0xFFFF;
15444 + unsigned long ip = ktva_ktla(regs->ip);
15445 +
15446 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15447 + regs->ip = ip;
15448 + }
15449 +#endif
15450 +
15451 nmi_enter();
15452
15453 inc_irq_stat(__nmi_count);
15454 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15455 /* It's safe to allow irq's after DR6 has been saved */
15456 preempt_conditional_sti(regs);
15457
15458 - if (regs->flags & X86_VM_MASK) {
15459 + if (v8086_mode(regs)) {
15460 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15461 error_code, 1);
15462 preempt_conditional_cli(regs);
15463 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15464 * We already checked v86 mode above, so we can check for kernel mode
15465 * by just checking the CPL of CS.
15466 */
15467 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15468 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15469 tsk->thread.debugreg6 &= ~DR_STEP;
15470 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15471 regs->flags &= ~X86_EFLAGS_TF;
15472 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15473 return;
15474 conditional_sti(regs);
15475
15476 - if (!user_mode_vm(regs))
15477 + if (!user_mode(regs))
15478 {
15479 if (!fixup_exception(regs)) {
15480 task->thread.error_code = error_code;
15481 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15482 void __math_state_restore(void)
15483 {
15484 struct thread_info *thread = current_thread_info();
15485 - struct task_struct *tsk = thread->task;
15486 + struct task_struct *tsk = current;
15487
15488 /*
15489 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15490 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15491 */
15492 asmlinkage void math_state_restore(void)
15493 {
15494 - struct thread_info *thread = current_thread_info();
15495 - struct task_struct *tsk = thread->task;
15496 + struct task_struct *tsk = current;
15497
15498 if (!tsk_used_math(tsk)) {
15499 local_irq_enable();
15500 diff -urNp linux-3.0.4/arch/x86/kernel/verify_cpu.S linux-3.0.4/arch/x86/kernel/verify_cpu.S
15501 --- linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15502 +++ linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15503 @@ -20,6 +20,7 @@
15504 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15505 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15506 * arch/x86/kernel/head_32.S: processor startup
15507 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15508 *
15509 * verify_cpu, returns the status of longmode and SSE in register %eax.
15510 * 0: Success 1: Failure
15511 diff -urNp linux-3.0.4/arch/x86/kernel/vm86_32.c linux-3.0.4/arch/x86/kernel/vm86_32.c
15512 --- linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15513 +++ linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15514 @@ -41,6 +41,7 @@
15515 #include <linux/ptrace.h>
15516 #include <linux/audit.h>
15517 #include <linux/stddef.h>
15518 +#include <linux/grsecurity.h>
15519
15520 #include <asm/uaccess.h>
15521 #include <asm/io.h>
15522 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15523 do_exit(SIGSEGV);
15524 }
15525
15526 - tss = &per_cpu(init_tss, get_cpu());
15527 + tss = init_tss + get_cpu();
15528 current->thread.sp0 = current->thread.saved_sp0;
15529 current->thread.sysenter_cs = __KERNEL_CS;
15530 load_sp0(tss, &current->thread);
15531 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15532 struct task_struct *tsk;
15533 int tmp, ret = -EPERM;
15534
15535 +#ifdef CONFIG_GRKERNSEC_VM86
15536 + if (!capable(CAP_SYS_RAWIO)) {
15537 + gr_handle_vm86();
15538 + goto out;
15539 + }
15540 +#endif
15541 +
15542 tsk = current;
15543 if (tsk->thread.saved_sp0)
15544 goto out;
15545 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15546 int tmp, ret;
15547 struct vm86plus_struct __user *v86;
15548
15549 +#ifdef CONFIG_GRKERNSEC_VM86
15550 + if (!capable(CAP_SYS_RAWIO)) {
15551 + gr_handle_vm86();
15552 + ret = -EPERM;
15553 + goto out;
15554 + }
15555 +#endif
15556 +
15557 tsk = current;
15558 switch (cmd) {
15559 case VM86_REQUEST_IRQ:
15560 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15561 tsk->thread.saved_fs = info->regs32->fs;
15562 tsk->thread.saved_gs = get_user_gs(info->regs32);
15563
15564 - tss = &per_cpu(init_tss, get_cpu());
15565 + tss = init_tss + get_cpu();
15566 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15567 if (cpu_has_sep)
15568 tsk->thread.sysenter_cs = 0;
15569 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15570 goto cannot_handle;
15571 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15572 goto cannot_handle;
15573 - intr_ptr = (unsigned long __user *) (i << 2);
15574 + intr_ptr = (__force unsigned long __user *) (i << 2);
15575 if (get_user(segoffs, intr_ptr))
15576 goto cannot_handle;
15577 if ((segoffs >> 16) == BIOSSEG)
15578 diff -urNp linux-3.0.4/arch/x86/kernel/vmlinux.lds.S linux-3.0.4/arch/x86/kernel/vmlinux.lds.S
15579 --- linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15580 +++ linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15581 @@ -26,6 +26,13 @@
15582 #include <asm/page_types.h>
15583 #include <asm/cache.h>
15584 #include <asm/boot.h>
15585 +#include <asm/segment.h>
15586 +
15587 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15588 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15589 +#else
15590 +#define __KERNEL_TEXT_OFFSET 0
15591 +#endif
15592
15593 #undef i386 /* in case the preprocessor is a 32bit one */
15594
15595 @@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15596
15597 PHDRS {
15598 text PT_LOAD FLAGS(5); /* R_E */
15599 +#ifdef CONFIG_X86_32
15600 + module PT_LOAD FLAGS(5); /* R_E */
15601 +#endif
15602 +#ifdef CONFIG_XEN
15603 + rodata PT_LOAD FLAGS(5); /* R_E */
15604 +#else
15605 + rodata PT_LOAD FLAGS(4); /* R__ */
15606 +#endif
15607 data PT_LOAD FLAGS(6); /* RW_ */
15608 #ifdef CONFIG_X86_64
15609 user PT_LOAD FLAGS(5); /* R_E */
15610 +#endif
15611 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15612 #ifdef CONFIG_SMP
15613 percpu PT_LOAD FLAGS(6); /* RW_ */
15614 #endif
15615 + text.init PT_LOAD FLAGS(5); /* R_E */
15616 + text.exit PT_LOAD FLAGS(5); /* R_E */
15617 init PT_LOAD FLAGS(7); /* RWE */
15618 -#endif
15619 note PT_NOTE FLAGS(0); /* ___ */
15620 }
15621
15622 SECTIONS
15623 {
15624 #ifdef CONFIG_X86_32
15625 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15626 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15627 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15628 #else
15629 - . = __START_KERNEL;
15630 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15631 + . = __START_KERNEL;
15632 #endif
15633
15634 /* Text and read-only data */
15635 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15636 - _text = .;
15637 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15638 /* bootstrapping code */
15639 +#ifdef CONFIG_X86_32
15640 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15641 +#else
15642 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15643 +#endif
15644 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15645 + _text = .;
15646 HEAD_TEXT
15647 #ifdef CONFIG_X86_32
15648 . = ALIGN(PAGE_SIZE);
15649 @@ -109,13 +131,47 @@ SECTIONS
15650 IRQENTRY_TEXT
15651 *(.fixup)
15652 *(.gnu.warning)
15653 - /* End of text section */
15654 - _etext = .;
15655 } :text = 0x9090
15656
15657 - NOTES :text :note
15658 + . += __KERNEL_TEXT_OFFSET;
15659 +
15660 +#ifdef CONFIG_X86_32
15661 + . = ALIGN(PAGE_SIZE);
15662 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15663 +
15664 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15665 + MODULES_EXEC_VADDR = .;
15666 + BYTE(0)
15667 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15668 + . = ALIGN(HPAGE_SIZE);
15669 + MODULES_EXEC_END = . - 1;
15670 +#endif
15671 +
15672 + } :module
15673 +#endif
15674 +
15675 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15676 + /* End of text section */
15677 + _etext = . - __KERNEL_TEXT_OFFSET;
15678 + }
15679 +
15680 +#ifdef CONFIG_X86_32
15681 + . = ALIGN(PAGE_SIZE);
15682 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15683 + *(.idt)
15684 + . = ALIGN(PAGE_SIZE);
15685 + *(.empty_zero_page)
15686 + *(.initial_pg_fixmap)
15687 + *(.initial_pg_pmd)
15688 + *(.initial_page_table)
15689 + *(.swapper_pg_dir)
15690 + } :rodata
15691 +#endif
15692 +
15693 + . = ALIGN(PAGE_SIZE);
15694 + NOTES :rodata :note
15695
15696 - EXCEPTION_TABLE(16) :text = 0x9090
15697 + EXCEPTION_TABLE(16) :rodata
15698
15699 #if defined(CONFIG_DEBUG_RODATA)
15700 /* .text should occupy whole number of pages */
15701 @@ -127,16 +183,20 @@ SECTIONS
15702
15703 /* Data */
15704 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15705 +
15706 +#ifdef CONFIG_PAX_KERNEXEC
15707 + . = ALIGN(HPAGE_SIZE);
15708 +#else
15709 + . = ALIGN(PAGE_SIZE);
15710 +#endif
15711 +
15712 /* Start of data section */
15713 _sdata = .;
15714
15715 /* init_task */
15716 INIT_TASK_DATA(THREAD_SIZE)
15717
15718 -#ifdef CONFIG_X86_32
15719 - /* 32 bit has nosave before _edata */
15720 NOSAVE_DATA
15721 -#endif
15722
15723 PAGE_ALIGNED_DATA(PAGE_SIZE)
15724
15725 @@ -208,12 +268,19 @@ SECTIONS
15726 #endif /* CONFIG_X86_64 */
15727
15728 /* Init code and data - will be freed after init */
15729 - . = ALIGN(PAGE_SIZE);
15730 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15731 + BYTE(0)
15732 +
15733 +#ifdef CONFIG_PAX_KERNEXEC
15734 + . = ALIGN(HPAGE_SIZE);
15735 +#else
15736 + . = ALIGN(PAGE_SIZE);
15737 +#endif
15738 +
15739 __init_begin = .; /* paired with __init_end */
15740 - }
15741 + } :init.begin
15742
15743 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15744 +#ifdef CONFIG_SMP
15745 /*
15746 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15747 * output PHDR, so the next output section - .init.text - should
15748 @@ -222,12 +289,27 @@ SECTIONS
15749 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15750 #endif
15751
15752 - INIT_TEXT_SECTION(PAGE_SIZE)
15753 -#ifdef CONFIG_X86_64
15754 - :init
15755 -#endif
15756 + . = ALIGN(PAGE_SIZE);
15757 + init_begin = .;
15758 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15759 + VMLINUX_SYMBOL(_sinittext) = .;
15760 + INIT_TEXT
15761 + VMLINUX_SYMBOL(_einittext) = .;
15762 + . = ALIGN(PAGE_SIZE);
15763 + } :text.init
15764
15765 - INIT_DATA_SECTION(16)
15766 + /*
15767 + * .exit.text is discard at runtime, not link time, to deal with
15768 + * references from .altinstructions and .eh_frame
15769 + */
15770 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15771 + EXIT_TEXT
15772 + . = ALIGN(16);
15773 + } :text.exit
15774 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15775 +
15776 + . = ALIGN(PAGE_SIZE);
15777 + INIT_DATA_SECTION(16) :init
15778
15779 /*
15780 * Code and data for a variety of lowlevel trampolines, to be
15781 @@ -301,19 +383,12 @@ SECTIONS
15782 }
15783
15784 . = ALIGN(8);
15785 - /*
15786 - * .exit.text is discard at runtime, not link time, to deal with
15787 - * references from .altinstructions and .eh_frame
15788 - */
15789 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15790 - EXIT_TEXT
15791 - }
15792
15793 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15794 EXIT_DATA
15795 }
15796
15797 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15798 +#ifndef CONFIG_SMP
15799 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15800 #endif
15801
15802 @@ -332,16 +407,10 @@ SECTIONS
15803 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15804 __smp_locks = .;
15805 *(.smp_locks)
15806 - . = ALIGN(PAGE_SIZE);
15807 __smp_locks_end = .;
15808 + . = ALIGN(PAGE_SIZE);
15809 }
15810
15811 -#ifdef CONFIG_X86_64
15812 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15813 - NOSAVE_DATA
15814 - }
15815 -#endif
15816 -
15817 /* BSS */
15818 . = ALIGN(PAGE_SIZE);
15819 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15820 @@ -357,6 +426,7 @@ SECTIONS
15821 __brk_base = .;
15822 . += 64 * 1024; /* 64k alignment slop space */
15823 *(.brk_reservation) /* areas brk users have reserved */
15824 + . = ALIGN(HPAGE_SIZE);
15825 __brk_limit = .;
15826 }
15827
15828 @@ -383,13 +453,12 @@ SECTIONS
15829 * for the boot processor.
15830 */
15831 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15832 -INIT_PER_CPU(gdt_page);
15833 INIT_PER_CPU(irq_stack_union);
15834
15835 /*
15836 * Build-time check on the image size:
15837 */
15838 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15839 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15840 "kernel image bigger than KERNEL_IMAGE_SIZE");
15841
15842 #ifdef CONFIG_SMP
15843 diff -urNp linux-3.0.4/arch/x86/kernel/vsyscall_64.c linux-3.0.4/arch/x86/kernel/vsyscall_64.c
15844 --- linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15845 +++ linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15846 @@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15847 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15848 {
15849 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15850 - .sysctl_enabled = 1,
15851 + .sysctl_enabled = 0,
15852 };
15853
15854 void update_vsyscall_tz(void)
15855 @@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15856 static ctl_table kernel_table2[] = {
15857 { .procname = "vsyscall64",
15858 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15859 - .mode = 0644,
15860 + .mode = 0444,
15861 .proc_handler = proc_dointvec },
15862 {}
15863 };
15864 diff -urNp linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c
15865 --- linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15866 +++ linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15867 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15868 EXPORT_SYMBOL(copy_user_generic_string);
15869 EXPORT_SYMBOL(copy_user_generic_unrolled);
15870 EXPORT_SYMBOL(__copy_user_nocache);
15871 -EXPORT_SYMBOL(_copy_from_user);
15872 -EXPORT_SYMBOL(_copy_to_user);
15873
15874 EXPORT_SYMBOL(copy_page);
15875 EXPORT_SYMBOL(clear_page);
15876 diff -urNp linux-3.0.4/arch/x86/kernel/xsave.c linux-3.0.4/arch/x86/kernel/xsave.c
15877 --- linux-3.0.4/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15878 +++ linux-3.0.4/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15879 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15880 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15881 return -EINVAL;
15882
15883 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15884 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15885 fx_sw_user->extended_size -
15886 FP_XSTATE_MAGIC2_SIZE));
15887 if (err)
15888 @@ -267,7 +267,7 @@ fx_only:
15889 * the other extended state.
15890 */
15891 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15892 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15893 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15894 }
15895
15896 /*
15897 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15898 if (use_xsave())
15899 err = restore_user_xstate(buf);
15900 else
15901 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
15902 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
15903 buf);
15904 if (unlikely(err)) {
15905 /*
15906 diff -urNp linux-3.0.4/arch/x86/kvm/emulate.c linux-3.0.4/arch/x86/kvm/emulate.c
15907 --- linux-3.0.4/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15908 +++ linux-3.0.4/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15909 @@ -96,7 +96,7 @@
15910 #define Src2ImmByte (2<<29)
15911 #define Src2One (3<<29)
15912 #define Src2Imm (4<<29)
15913 -#define Src2Mask (7<<29)
15914 +#define Src2Mask (7U<<29)
15915
15916 #define X2(x...) x, x
15917 #define X3(x...) X2(x), x
15918 @@ -207,6 +207,7 @@ struct gprefix {
15919
15920 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15921 do { \
15922 + unsigned long _tmp; \
15923 __asm__ __volatile__ ( \
15924 _PRE_EFLAGS("0", "4", "2") \
15925 _op _suffix " %"_x"3,%1; " \
15926 @@ -220,8 +221,6 @@ struct gprefix {
15927 /* Raw emulation: instruction has two explicit operands. */
15928 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15929 do { \
15930 - unsigned long _tmp; \
15931 - \
15932 switch ((_dst).bytes) { \
15933 case 2: \
15934 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15935 @@ -237,7 +236,6 @@ struct gprefix {
15936
15937 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15938 do { \
15939 - unsigned long _tmp; \
15940 switch ((_dst).bytes) { \
15941 case 1: \
15942 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
15943 diff -urNp linux-3.0.4/arch/x86/kvm/lapic.c linux-3.0.4/arch/x86/kvm/lapic.c
15944 --- linux-3.0.4/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
15945 +++ linux-3.0.4/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
15946 @@ -53,7 +53,7 @@
15947 #define APIC_BUS_CYCLE_NS 1
15948
15949 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15950 -#define apic_debug(fmt, arg...)
15951 +#define apic_debug(fmt, arg...) do {} while (0)
15952
15953 #define APIC_LVT_NUM 6
15954 /* 14 is the version for Xeon and Pentium 8.4.8*/
15955 diff -urNp linux-3.0.4/arch/x86/kvm/mmu.c linux-3.0.4/arch/x86/kvm/mmu.c
15956 --- linux-3.0.4/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
15957 +++ linux-3.0.4/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
15958 @@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15959
15960 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
15961
15962 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
15963 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
15964
15965 /*
15966 * Assume that the pte write on a page table of the same type
15967 @@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15968 }
15969
15970 spin_lock(&vcpu->kvm->mmu_lock);
15971 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15972 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15973 gentry = 0;
15974 kvm_mmu_free_some_pages(vcpu);
15975 ++vcpu->kvm->stat.mmu_pte_write;
15976 diff -urNp linux-3.0.4/arch/x86/kvm/paging_tmpl.h linux-3.0.4/arch/x86/kvm/paging_tmpl.h
15977 --- linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
15978 +++ linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
15979 @@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
15980 unsigned long mmu_seq;
15981 bool map_writable;
15982
15983 + pax_track_stack();
15984 +
15985 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
15986
15987 r = mmu_topup_memory_caches(vcpu);
15988 @@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
15989 if (need_flush)
15990 kvm_flush_remote_tlbs(vcpu->kvm);
15991
15992 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
15993 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
15994
15995 spin_unlock(&vcpu->kvm->mmu_lock);
15996
15997 diff -urNp linux-3.0.4/arch/x86/kvm/svm.c linux-3.0.4/arch/x86/kvm/svm.c
15998 --- linux-3.0.4/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
15999 +++ linux-3.0.4/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16000 @@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16001 int cpu = raw_smp_processor_id();
16002
16003 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16004 +
16005 + pax_open_kernel();
16006 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16007 + pax_close_kernel();
16008 +
16009 load_TR_desc();
16010 }
16011
16012 @@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16013 #endif
16014 #endif
16015
16016 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16017 + __set_fs(current_thread_info()->addr_limit);
16018 +#endif
16019 +
16020 reload_tss(vcpu);
16021
16022 local_irq_disable();
16023 diff -urNp linux-3.0.4/arch/x86/kvm/vmx.c linux-3.0.4/arch/x86/kvm/vmx.c
16024 --- linux-3.0.4/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16025 +++ linux-3.0.4/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16026 @@ -797,7 +797,11 @@ static void reload_tss(void)
16027 struct desc_struct *descs;
16028
16029 descs = (void *)gdt->address;
16030 +
16031 + pax_open_kernel();
16032 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16033 + pax_close_kernel();
16034 +
16035 load_TR_desc();
16036 }
16037
16038 @@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16039 if (!cpu_has_vmx_flexpriority())
16040 flexpriority_enabled = 0;
16041
16042 - if (!cpu_has_vmx_tpr_shadow())
16043 - kvm_x86_ops->update_cr8_intercept = NULL;
16044 + if (!cpu_has_vmx_tpr_shadow()) {
16045 + pax_open_kernel();
16046 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16047 + pax_close_kernel();
16048 + }
16049
16050 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16051 kvm_disable_largepages();
16052 @@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16053 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16054
16055 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16056 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16057 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16058 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16059 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16060 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16061 @@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16062 "jmp .Lkvm_vmx_return \n\t"
16063 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16064 ".Lkvm_vmx_return: "
16065 +
16066 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16067 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16068 + ".Lkvm_vmx_return2: "
16069 +#endif
16070 +
16071 /* Save guest registers, load host registers, keep flags */
16072 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16073 "pop %0 \n\t"
16074 @@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16075 #endif
16076 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16077 [wordsize]"i"(sizeof(ulong))
16078 +
16079 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16080 + ,[cs]"i"(__KERNEL_CS)
16081 +#endif
16082 +
16083 : "cc", "memory"
16084 , R"ax", R"bx", R"di", R"si"
16085 #ifdef CONFIG_X86_64
16086 @@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16087
16088 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16089
16090 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16091 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16092 +
16093 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16094 + loadsegment(fs, __KERNEL_PERCPU);
16095 +#endif
16096 +
16097 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16098 + __set_fs(current_thread_info()->addr_limit);
16099 +#endif
16100 +
16101 vmx->launched = 1;
16102
16103 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16104 diff -urNp linux-3.0.4/arch/x86/kvm/x86.c linux-3.0.4/arch/x86/kvm/x86.c
16105 --- linux-3.0.4/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16106 +++ linux-3.0.4/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16107 @@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16108 if (n < msr_list.nmsrs)
16109 goto out;
16110 r = -EFAULT;
16111 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16112 + goto out;
16113 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16114 num_msrs_to_save * sizeof(u32)))
16115 goto out;
16116 @@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16117 struct kvm_cpuid2 *cpuid,
16118 struct kvm_cpuid_entry2 __user *entries)
16119 {
16120 - int r;
16121 + int r, i;
16122
16123 r = -E2BIG;
16124 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16125 goto out;
16126 r = -EFAULT;
16127 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16128 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16129 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16130 goto out;
16131 + for (i = 0; i < cpuid->nent; ++i) {
16132 + struct kvm_cpuid_entry2 cpuid_entry;
16133 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16134 + goto out;
16135 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16136 + }
16137 vcpu->arch.cpuid_nent = cpuid->nent;
16138 kvm_apic_set_version(vcpu);
16139 kvm_x86_ops->cpuid_update(vcpu);
16140 @@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16141 struct kvm_cpuid2 *cpuid,
16142 struct kvm_cpuid_entry2 __user *entries)
16143 {
16144 - int r;
16145 + int r, i;
16146
16147 r = -E2BIG;
16148 if (cpuid->nent < vcpu->arch.cpuid_nent)
16149 goto out;
16150 r = -EFAULT;
16151 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16152 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16153 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16154 goto out;
16155 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16156 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16157 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16158 + goto out;
16159 + }
16160 return 0;
16161
16162 out:
16163 @@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16164 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16165 struct kvm_interrupt *irq)
16166 {
16167 - if (irq->irq < 0 || irq->irq >= 256)
16168 + if (irq->irq >= 256)
16169 return -EINVAL;
16170 if (irqchip_in_kernel(vcpu->kvm))
16171 return -ENXIO;
16172 @@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16173 }
16174 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16175
16176 -int kvm_arch_init(void *opaque)
16177 +int kvm_arch_init(const void *opaque)
16178 {
16179 int r;
16180 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16181 diff -urNp linux-3.0.4/arch/x86/lguest/boot.c linux-3.0.4/arch/x86/lguest/boot.c
16182 --- linux-3.0.4/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16183 +++ linux-3.0.4/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16184 @@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16185 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16186 * Launcher to reboot us.
16187 */
16188 -static void lguest_restart(char *reason)
16189 +static __noreturn void lguest_restart(char *reason)
16190 {
16191 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16192 + BUG();
16193 }
16194
16195 /*G:050
16196 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_32.c linux-3.0.4/arch/x86/lib/atomic64_32.c
16197 --- linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16198 +++ linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16199 @@ -8,18 +8,30 @@
16200
16201 long long atomic64_read_cx8(long long, const atomic64_t *v);
16202 EXPORT_SYMBOL(atomic64_read_cx8);
16203 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16204 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16205 long long atomic64_set_cx8(long long, const atomic64_t *v);
16206 EXPORT_SYMBOL(atomic64_set_cx8);
16207 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16208 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16209 long long atomic64_xchg_cx8(long long, unsigned high);
16210 EXPORT_SYMBOL(atomic64_xchg_cx8);
16211 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16212 EXPORT_SYMBOL(atomic64_add_return_cx8);
16213 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16214 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16215 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16216 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16217 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16218 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16219 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16220 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16221 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16222 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16223 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16224 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16225 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16226 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16227 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16228 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16229 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16230 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16231 #ifndef CONFIG_X86_CMPXCHG64
16232 long long atomic64_read_386(long long, const atomic64_t *v);
16233 EXPORT_SYMBOL(atomic64_read_386);
16234 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16235 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16236 long long atomic64_set_386(long long, const atomic64_t *v);
16237 EXPORT_SYMBOL(atomic64_set_386);
16238 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16239 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16240 long long atomic64_xchg_386(long long, unsigned high);
16241 EXPORT_SYMBOL(atomic64_xchg_386);
16242 long long atomic64_add_return_386(long long a, atomic64_t *v);
16243 EXPORT_SYMBOL(atomic64_add_return_386);
16244 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16245 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16246 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16247 EXPORT_SYMBOL(atomic64_sub_return_386);
16248 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16249 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16250 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16251 EXPORT_SYMBOL(atomic64_inc_return_386);
16252 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16253 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16254 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16255 EXPORT_SYMBOL(atomic64_dec_return_386);
16256 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16257 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16258 long long atomic64_add_386(long long a, atomic64_t *v);
16259 EXPORT_SYMBOL(atomic64_add_386);
16260 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16261 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16262 long long atomic64_sub_386(long long a, atomic64_t *v);
16263 EXPORT_SYMBOL(atomic64_sub_386);
16264 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16265 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16266 long long atomic64_inc_386(long long a, atomic64_t *v);
16267 EXPORT_SYMBOL(atomic64_inc_386);
16268 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16269 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16270 long long atomic64_dec_386(long long a, atomic64_t *v);
16271 EXPORT_SYMBOL(atomic64_dec_386);
16272 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16273 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16274 long long atomic64_dec_if_positive_386(atomic64_t *v);
16275 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16276 int atomic64_inc_not_zero_386(atomic64_t *v);
16277 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_386_32.S linux-3.0.4/arch/x86/lib/atomic64_386_32.S
16278 --- linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16279 +++ linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16280 @@ -48,6 +48,10 @@ BEGIN(read)
16281 movl (v), %eax
16282 movl 4(v), %edx
16283 RET_ENDP
16284 +BEGIN(read_unchecked)
16285 + movl (v), %eax
16286 + movl 4(v), %edx
16287 +RET_ENDP
16288 #undef v
16289
16290 #define v %esi
16291 @@ -55,6 +59,10 @@ BEGIN(set)
16292 movl %ebx, (v)
16293 movl %ecx, 4(v)
16294 RET_ENDP
16295 +BEGIN(set_unchecked)
16296 + movl %ebx, (v)
16297 + movl %ecx, 4(v)
16298 +RET_ENDP
16299 #undef v
16300
16301 #define v %esi
16302 @@ -70,6 +78,20 @@ RET_ENDP
16303 BEGIN(add)
16304 addl %eax, (v)
16305 adcl %edx, 4(v)
16306 +
16307 +#ifdef CONFIG_PAX_REFCOUNT
16308 + jno 0f
16309 + subl %eax, (v)
16310 + sbbl %edx, 4(v)
16311 + int $4
16312 +0:
16313 + _ASM_EXTABLE(0b, 0b)
16314 +#endif
16315 +
16316 +RET_ENDP
16317 +BEGIN(add_unchecked)
16318 + addl %eax, (v)
16319 + adcl %edx, 4(v)
16320 RET_ENDP
16321 #undef v
16322
16323 @@ -77,6 +99,24 @@ RET_ENDP
16324 BEGIN(add_return)
16325 addl (v), %eax
16326 adcl 4(v), %edx
16327 +
16328 +#ifdef CONFIG_PAX_REFCOUNT
16329 + into
16330 +1234:
16331 + _ASM_EXTABLE(1234b, 2f)
16332 +#endif
16333 +
16334 + movl %eax, (v)
16335 + movl %edx, 4(v)
16336 +
16337 +#ifdef CONFIG_PAX_REFCOUNT
16338 +2:
16339 +#endif
16340 +
16341 +RET_ENDP
16342 +BEGIN(add_return_unchecked)
16343 + addl (v), %eax
16344 + adcl 4(v), %edx
16345 movl %eax, (v)
16346 movl %edx, 4(v)
16347 RET_ENDP
16348 @@ -86,6 +126,20 @@ RET_ENDP
16349 BEGIN(sub)
16350 subl %eax, (v)
16351 sbbl %edx, 4(v)
16352 +
16353 +#ifdef CONFIG_PAX_REFCOUNT
16354 + jno 0f
16355 + addl %eax, (v)
16356 + adcl %edx, 4(v)
16357 + int $4
16358 +0:
16359 + _ASM_EXTABLE(0b, 0b)
16360 +#endif
16361 +
16362 +RET_ENDP
16363 +BEGIN(sub_unchecked)
16364 + subl %eax, (v)
16365 + sbbl %edx, 4(v)
16366 RET_ENDP
16367 #undef v
16368
16369 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16370 sbbl $0, %edx
16371 addl (v), %eax
16372 adcl 4(v), %edx
16373 +
16374 +#ifdef CONFIG_PAX_REFCOUNT
16375 + into
16376 +1234:
16377 + _ASM_EXTABLE(1234b, 2f)
16378 +#endif
16379 +
16380 + movl %eax, (v)
16381 + movl %edx, 4(v)
16382 +
16383 +#ifdef CONFIG_PAX_REFCOUNT
16384 +2:
16385 +#endif
16386 +
16387 +RET_ENDP
16388 +BEGIN(sub_return_unchecked)
16389 + negl %edx
16390 + negl %eax
16391 + sbbl $0, %edx
16392 + addl (v), %eax
16393 + adcl 4(v), %edx
16394 movl %eax, (v)
16395 movl %edx, 4(v)
16396 RET_ENDP
16397 @@ -105,6 +180,20 @@ RET_ENDP
16398 BEGIN(inc)
16399 addl $1, (v)
16400 adcl $0, 4(v)
16401 +
16402 +#ifdef CONFIG_PAX_REFCOUNT
16403 + jno 0f
16404 + subl $1, (v)
16405 + sbbl $0, 4(v)
16406 + int $4
16407 +0:
16408 + _ASM_EXTABLE(0b, 0b)
16409 +#endif
16410 +
16411 +RET_ENDP
16412 +BEGIN(inc_unchecked)
16413 + addl $1, (v)
16414 + adcl $0, 4(v)
16415 RET_ENDP
16416 #undef v
16417
16418 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16419 movl 4(v), %edx
16420 addl $1, %eax
16421 adcl $0, %edx
16422 +
16423 +#ifdef CONFIG_PAX_REFCOUNT
16424 + into
16425 +1234:
16426 + _ASM_EXTABLE(1234b, 2f)
16427 +#endif
16428 +
16429 + movl %eax, (v)
16430 + movl %edx, 4(v)
16431 +
16432 +#ifdef CONFIG_PAX_REFCOUNT
16433 +2:
16434 +#endif
16435 +
16436 +RET_ENDP
16437 +BEGIN(inc_return_unchecked)
16438 + movl (v), %eax
16439 + movl 4(v), %edx
16440 + addl $1, %eax
16441 + adcl $0, %edx
16442 movl %eax, (v)
16443 movl %edx, 4(v)
16444 RET_ENDP
16445 @@ -123,6 +232,20 @@ RET_ENDP
16446 BEGIN(dec)
16447 subl $1, (v)
16448 sbbl $0, 4(v)
16449 +
16450 +#ifdef CONFIG_PAX_REFCOUNT
16451 + jno 0f
16452 + addl $1, (v)
16453 + adcl $0, 4(v)
16454 + int $4
16455 +0:
16456 + _ASM_EXTABLE(0b, 0b)
16457 +#endif
16458 +
16459 +RET_ENDP
16460 +BEGIN(dec_unchecked)
16461 + subl $1, (v)
16462 + sbbl $0, 4(v)
16463 RET_ENDP
16464 #undef v
16465
16466 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16467 movl 4(v), %edx
16468 subl $1, %eax
16469 sbbl $0, %edx
16470 +
16471 +#ifdef CONFIG_PAX_REFCOUNT
16472 + into
16473 +1234:
16474 + _ASM_EXTABLE(1234b, 2f)
16475 +#endif
16476 +
16477 + movl %eax, (v)
16478 + movl %edx, 4(v)
16479 +
16480 +#ifdef CONFIG_PAX_REFCOUNT
16481 +2:
16482 +#endif
16483 +
16484 +RET_ENDP
16485 +BEGIN(dec_return_unchecked)
16486 + movl (v), %eax
16487 + movl 4(v), %edx
16488 + subl $1, %eax
16489 + sbbl $0, %edx
16490 movl %eax, (v)
16491 movl %edx, 4(v)
16492 RET_ENDP
16493 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16494 adcl %edx, %edi
16495 addl (v), %eax
16496 adcl 4(v), %edx
16497 +
16498 +#ifdef CONFIG_PAX_REFCOUNT
16499 + into
16500 +1234:
16501 + _ASM_EXTABLE(1234b, 2f)
16502 +#endif
16503 +
16504 cmpl %eax, %esi
16505 je 3f
16506 1:
16507 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16508 1:
16509 addl $1, %eax
16510 adcl $0, %edx
16511 +
16512 +#ifdef CONFIG_PAX_REFCOUNT
16513 + into
16514 +1234:
16515 + _ASM_EXTABLE(1234b, 2f)
16516 +#endif
16517 +
16518 movl %eax, (v)
16519 movl %edx, 4(v)
16520 movl $1, %eax
16521 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16522 movl 4(v), %edx
16523 subl $1, %eax
16524 sbbl $0, %edx
16525 +
16526 +#ifdef CONFIG_PAX_REFCOUNT
16527 + into
16528 +1234:
16529 + _ASM_EXTABLE(1234b, 1f)
16530 +#endif
16531 +
16532 js 1f
16533 movl %eax, (v)
16534 movl %edx, 4(v)
16535 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S
16536 --- linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16537 +++ linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-23 21:47:55.000000000 -0400
16538 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16539 CFI_ENDPROC
16540 ENDPROC(atomic64_read_cx8)
16541
16542 +ENTRY(atomic64_read_unchecked_cx8)
16543 + CFI_STARTPROC
16544 +
16545 + read64 %ecx
16546 + ret
16547 + CFI_ENDPROC
16548 +ENDPROC(atomic64_read_unchecked_cx8)
16549 +
16550 ENTRY(atomic64_set_cx8)
16551 CFI_STARTPROC
16552
16553 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16554 CFI_ENDPROC
16555 ENDPROC(atomic64_set_cx8)
16556
16557 +ENTRY(atomic64_set_unchecked_cx8)
16558 + CFI_STARTPROC
16559 +
16560 +1:
16561 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16562 + * are atomic on 586 and newer */
16563 + cmpxchg8b (%esi)
16564 + jne 1b
16565 +
16566 + ret
16567 + CFI_ENDPROC
16568 +ENDPROC(atomic64_set_unchecked_cx8)
16569 +
16570 ENTRY(atomic64_xchg_cx8)
16571 CFI_STARTPROC
16572
16573 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16574 CFI_ENDPROC
16575 ENDPROC(atomic64_xchg_cx8)
16576
16577 -.macro addsub_return func ins insc
16578 -ENTRY(atomic64_\func\()_return_cx8)
16579 +.macro addsub_return func ins insc unchecked=""
16580 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16581 CFI_STARTPROC
16582 SAVE ebp
16583 SAVE ebx
16584 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16585 movl %edx, %ecx
16586 \ins\()l %esi, %ebx
16587 \insc\()l %edi, %ecx
16588 +
16589 +.ifb \unchecked
16590 +#ifdef CONFIG_PAX_REFCOUNT
16591 + into
16592 +2:
16593 + _ASM_EXTABLE(2b, 3f)
16594 +#endif
16595 +.endif
16596 +
16597 LOCK_PREFIX
16598 cmpxchg8b (%ebp)
16599 jne 1b
16600 -
16601 -10:
16602 movl %ebx, %eax
16603 movl %ecx, %edx
16604 +
16605 +.ifb \unchecked
16606 +#ifdef CONFIG_PAX_REFCOUNT
16607 +3:
16608 +#endif
16609 +.endif
16610 +
16611 RESTORE edi
16612 RESTORE esi
16613 RESTORE ebx
16614 RESTORE ebp
16615 ret
16616 CFI_ENDPROC
16617 -ENDPROC(atomic64_\func\()_return_cx8)
16618 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16619 .endm
16620
16621 addsub_return add add adc
16622 addsub_return sub sub sbb
16623 +addsub_return add add adc _unchecked
16624 +addsub_return sub sub sbb _unchecked
16625
16626 -.macro incdec_return func ins insc
16627 -ENTRY(atomic64_\func\()_return_cx8)
16628 +.macro incdec_return func ins insc unchecked
16629 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16630 CFI_STARTPROC
16631 SAVE ebx
16632
16633 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16634 movl %edx, %ecx
16635 \ins\()l $1, %ebx
16636 \insc\()l $0, %ecx
16637 +
16638 +.ifb \unchecked
16639 +#ifdef CONFIG_PAX_REFCOUNT
16640 + into
16641 +2:
16642 + _ASM_EXTABLE(2b, 3f)
16643 +#endif
16644 +.endif
16645 +
16646 LOCK_PREFIX
16647 cmpxchg8b (%esi)
16648 jne 1b
16649
16650 -10:
16651 movl %ebx, %eax
16652 movl %ecx, %edx
16653 +
16654 +.ifb \unchecked
16655 +#ifdef CONFIG_PAX_REFCOUNT
16656 +3:
16657 +#endif
16658 +.endif
16659 +
16660 RESTORE ebx
16661 ret
16662 CFI_ENDPROC
16663 -ENDPROC(atomic64_\func\()_return_cx8)
16664 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16665 .endm
16666
16667 incdec_return inc add adc
16668 incdec_return dec sub sbb
16669 +incdec_return inc add adc _unchecked
16670 +incdec_return dec sub sbb _unchecked
16671
16672 ENTRY(atomic64_dec_if_positive_cx8)
16673 CFI_STARTPROC
16674 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16675 movl %edx, %ecx
16676 subl $1, %ebx
16677 sbb $0, %ecx
16678 +
16679 +#ifdef CONFIG_PAX_REFCOUNT
16680 + into
16681 +1234:
16682 + _ASM_EXTABLE(1234b, 2f)
16683 +#endif
16684 +
16685 js 2f
16686 LOCK_PREFIX
16687 cmpxchg8b (%esi)
16688 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16689 movl %edx, %ecx
16690 addl %esi, %ebx
16691 adcl %edi, %ecx
16692 +
16693 +#ifdef CONFIG_PAX_REFCOUNT
16694 + into
16695 +1234:
16696 + _ASM_EXTABLE(1234b, 3f)
16697 +#endif
16698 +
16699 LOCK_PREFIX
16700 cmpxchg8b (%ebp)
16701 jne 1b
16702 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16703 movl %edx, %ecx
16704 addl $1, %ebx
16705 adcl $0, %ecx
16706 +
16707 +#ifdef CONFIG_PAX_REFCOUNT
16708 + into
16709 +1234:
16710 + _ASM_EXTABLE(1234b, 3f)
16711 +#endif
16712 +
16713 LOCK_PREFIX
16714 cmpxchg8b (%esi)
16715 jne 1b
16716 diff -urNp linux-3.0.4/arch/x86/lib/checksum_32.S linux-3.0.4/arch/x86/lib/checksum_32.S
16717 --- linux-3.0.4/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16718 +++ linux-3.0.4/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16719 @@ -28,7 +28,8 @@
16720 #include <linux/linkage.h>
16721 #include <asm/dwarf2.h>
16722 #include <asm/errno.h>
16723 -
16724 +#include <asm/segment.h>
16725 +
16726 /*
16727 * computes a partial checksum, e.g. for TCP/UDP fragments
16728 */
16729 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16730
16731 #define ARGBASE 16
16732 #define FP 12
16733 -
16734 -ENTRY(csum_partial_copy_generic)
16735 +
16736 +ENTRY(csum_partial_copy_generic_to_user)
16737 CFI_STARTPROC
16738 +
16739 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16740 + pushl_cfi %gs
16741 + popl_cfi %es
16742 + jmp csum_partial_copy_generic
16743 +#endif
16744 +
16745 +ENTRY(csum_partial_copy_generic_from_user)
16746 +
16747 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16748 + pushl_cfi %gs
16749 + popl_cfi %ds
16750 +#endif
16751 +
16752 +ENTRY(csum_partial_copy_generic)
16753 subl $4,%esp
16754 CFI_ADJUST_CFA_OFFSET 4
16755 pushl_cfi %edi
16756 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16757 jmp 4f
16758 SRC(1: movw (%esi), %bx )
16759 addl $2, %esi
16760 -DST( movw %bx, (%edi) )
16761 +DST( movw %bx, %es:(%edi) )
16762 addl $2, %edi
16763 addw %bx, %ax
16764 adcl $0, %eax
16765 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16766 SRC(1: movl (%esi), %ebx )
16767 SRC( movl 4(%esi), %edx )
16768 adcl %ebx, %eax
16769 -DST( movl %ebx, (%edi) )
16770 +DST( movl %ebx, %es:(%edi) )
16771 adcl %edx, %eax
16772 -DST( movl %edx, 4(%edi) )
16773 +DST( movl %edx, %es:4(%edi) )
16774
16775 SRC( movl 8(%esi), %ebx )
16776 SRC( movl 12(%esi), %edx )
16777 adcl %ebx, %eax
16778 -DST( movl %ebx, 8(%edi) )
16779 +DST( movl %ebx, %es:8(%edi) )
16780 adcl %edx, %eax
16781 -DST( movl %edx, 12(%edi) )
16782 +DST( movl %edx, %es:12(%edi) )
16783
16784 SRC( movl 16(%esi), %ebx )
16785 SRC( movl 20(%esi), %edx )
16786 adcl %ebx, %eax
16787 -DST( movl %ebx, 16(%edi) )
16788 +DST( movl %ebx, %es:16(%edi) )
16789 adcl %edx, %eax
16790 -DST( movl %edx, 20(%edi) )
16791 +DST( movl %edx, %es:20(%edi) )
16792
16793 SRC( movl 24(%esi), %ebx )
16794 SRC( movl 28(%esi), %edx )
16795 adcl %ebx, %eax
16796 -DST( movl %ebx, 24(%edi) )
16797 +DST( movl %ebx, %es:24(%edi) )
16798 adcl %edx, %eax
16799 -DST( movl %edx, 28(%edi) )
16800 +DST( movl %edx, %es:28(%edi) )
16801
16802 lea 32(%esi), %esi
16803 lea 32(%edi), %edi
16804 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16805 shrl $2, %edx # This clears CF
16806 SRC(3: movl (%esi), %ebx )
16807 adcl %ebx, %eax
16808 -DST( movl %ebx, (%edi) )
16809 +DST( movl %ebx, %es:(%edi) )
16810 lea 4(%esi), %esi
16811 lea 4(%edi), %edi
16812 dec %edx
16813 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16814 jb 5f
16815 SRC( movw (%esi), %cx )
16816 leal 2(%esi), %esi
16817 -DST( movw %cx, (%edi) )
16818 +DST( movw %cx, %es:(%edi) )
16819 leal 2(%edi), %edi
16820 je 6f
16821 shll $16,%ecx
16822 SRC(5: movb (%esi), %cl )
16823 -DST( movb %cl, (%edi) )
16824 +DST( movb %cl, %es:(%edi) )
16825 6: addl %ecx, %eax
16826 adcl $0, %eax
16827 7:
16828 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16829
16830 6001:
16831 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16832 - movl $-EFAULT, (%ebx)
16833 + movl $-EFAULT, %ss:(%ebx)
16834
16835 # zero the complete destination - computing the rest
16836 # is too much work
16837 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16838
16839 6002:
16840 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16841 - movl $-EFAULT,(%ebx)
16842 + movl $-EFAULT,%ss:(%ebx)
16843 jmp 5000b
16844
16845 .previous
16846
16847 + pushl_cfi %ss
16848 + popl_cfi %ds
16849 + pushl_cfi %ss
16850 + popl_cfi %es
16851 popl_cfi %ebx
16852 CFI_RESTORE ebx
16853 popl_cfi %esi
16854 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16855 popl_cfi %ecx # equivalent to addl $4,%esp
16856 ret
16857 CFI_ENDPROC
16858 -ENDPROC(csum_partial_copy_generic)
16859 +ENDPROC(csum_partial_copy_generic_to_user)
16860
16861 #else
16862
16863 /* Version for PentiumII/PPro */
16864
16865 #define ROUND1(x) \
16866 + nop; nop; nop; \
16867 SRC(movl x(%esi), %ebx ) ; \
16868 addl %ebx, %eax ; \
16869 - DST(movl %ebx, x(%edi) ) ;
16870 + DST(movl %ebx, %es:x(%edi)) ;
16871
16872 #define ROUND(x) \
16873 + nop; nop; nop; \
16874 SRC(movl x(%esi), %ebx ) ; \
16875 adcl %ebx, %eax ; \
16876 - DST(movl %ebx, x(%edi) ) ;
16877 + DST(movl %ebx, %es:x(%edi)) ;
16878
16879 #define ARGBASE 12
16880 -
16881 -ENTRY(csum_partial_copy_generic)
16882 +
16883 +ENTRY(csum_partial_copy_generic_to_user)
16884 CFI_STARTPROC
16885 +
16886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16887 + pushl_cfi %gs
16888 + popl_cfi %es
16889 + jmp csum_partial_copy_generic
16890 +#endif
16891 +
16892 +ENTRY(csum_partial_copy_generic_from_user)
16893 +
16894 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16895 + pushl_cfi %gs
16896 + popl_cfi %ds
16897 +#endif
16898 +
16899 +ENTRY(csum_partial_copy_generic)
16900 pushl_cfi %ebx
16901 CFI_REL_OFFSET ebx, 0
16902 pushl_cfi %edi
16903 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16904 subl %ebx, %edi
16905 lea -1(%esi),%edx
16906 andl $-32,%edx
16907 - lea 3f(%ebx,%ebx), %ebx
16908 + lea 3f(%ebx,%ebx,2), %ebx
16909 testl %esi, %esi
16910 jmp *%ebx
16911 1: addl $64,%esi
16912 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16913 jb 5f
16914 SRC( movw (%esi), %dx )
16915 leal 2(%esi), %esi
16916 -DST( movw %dx, (%edi) )
16917 +DST( movw %dx, %es:(%edi) )
16918 leal 2(%edi), %edi
16919 je 6f
16920 shll $16,%edx
16921 5:
16922 SRC( movb (%esi), %dl )
16923 -DST( movb %dl, (%edi) )
16924 +DST( movb %dl, %es:(%edi) )
16925 6: addl %edx, %eax
16926 adcl $0, %eax
16927 7:
16928 .section .fixup, "ax"
16929 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16930 - movl $-EFAULT, (%ebx)
16931 + movl $-EFAULT, %ss:(%ebx)
16932 # zero the complete destination (computing the rest is too much work)
16933 movl ARGBASE+8(%esp),%edi # dst
16934 movl ARGBASE+12(%esp),%ecx # len
16935 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16936 rep; stosb
16937 jmp 7b
16938 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16939 - movl $-EFAULT, (%ebx)
16940 + movl $-EFAULT, %ss:(%ebx)
16941 jmp 7b
16942 .previous
16943
16944 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16945 + pushl_cfi %ss
16946 + popl_cfi %ds
16947 + pushl_cfi %ss
16948 + popl_cfi %es
16949 +#endif
16950 +
16951 popl_cfi %esi
16952 CFI_RESTORE esi
16953 popl_cfi %edi
16954 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
16955 CFI_RESTORE ebx
16956 ret
16957 CFI_ENDPROC
16958 -ENDPROC(csum_partial_copy_generic)
16959 +ENDPROC(csum_partial_copy_generic_to_user)
16960
16961 #undef ROUND
16962 #undef ROUND1
16963 diff -urNp linux-3.0.4/arch/x86/lib/clear_page_64.S linux-3.0.4/arch/x86/lib/clear_page_64.S
16964 --- linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
16965 +++ linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-08-23 21:47:55.000000000 -0400
16966 @@ -58,7 +58,7 @@ ENDPROC(clear_page)
16967
16968 #include <asm/cpufeature.h>
16969
16970 - .section .altinstr_replacement,"ax"
16971 + .section .altinstr_replacement,"a"
16972 1: .byte 0xeb /* jmp <disp8> */
16973 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
16974 2: .byte 0xeb /* jmp <disp8> */
16975 diff -urNp linux-3.0.4/arch/x86/lib/copy_page_64.S linux-3.0.4/arch/x86/lib/copy_page_64.S
16976 --- linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
16977 +++ linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-08-23 21:47:55.000000000 -0400
16978 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
16979
16980 #include <asm/cpufeature.h>
16981
16982 - .section .altinstr_replacement,"ax"
16983 + .section .altinstr_replacement,"a"
16984 1: .byte 0xeb /* jmp <disp8> */
16985 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
16986 2:
16987 diff -urNp linux-3.0.4/arch/x86/lib/copy_user_64.S linux-3.0.4/arch/x86/lib/copy_user_64.S
16988 --- linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
16989 +++ linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-08-23 21:47:55.000000000 -0400
16990 @@ -16,6 +16,7 @@
16991 #include <asm/thread_info.h>
16992 #include <asm/cpufeature.h>
16993 #include <asm/alternative-asm.h>
16994 +#include <asm/pgtable.h>
16995
16996 /*
16997 * By placing feature2 after feature1 in altinstructions section, we logically
16998 @@ -29,7 +30,7 @@
16999 .byte 0xe9 /* 32bit jump */
17000 .long \orig-1f /* by default jump to orig */
17001 1:
17002 - .section .altinstr_replacement,"ax"
17003 + .section .altinstr_replacement,"a"
17004 2: .byte 0xe9 /* near jump with 32bit immediate */
17005 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17006 3: .byte 0xe9 /* near jump with 32bit immediate */
17007 @@ -71,41 +72,13 @@
17008 #endif
17009 .endm
17010
17011 -/* Standard copy_to_user with segment limit checking */
17012 -ENTRY(_copy_to_user)
17013 - CFI_STARTPROC
17014 - GET_THREAD_INFO(%rax)
17015 - movq %rdi,%rcx
17016 - addq %rdx,%rcx
17017 - jc bad_to_user
17018 - cmpq TI_addr_limit(%rax),%rcx
17019 - ja bad_to_user
17020 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17021 - copy_user_generic_unrolled,copy_user_generic_string, \
17022 - copy_user_enhanced_fast_string
17023 - CFI_ENDPROC
17024 -ENDPROC(_copy_to_user)
17025 -
17026 -/* Standard copy_from_user with segment limit checking */
17027 -ENTRY(_copy_from_user)
17028 - CFI_STARTPROC
17029 - GET_THREAD_INFO(%rax)
17030 - movq %rsi,%rcx
17031 - addq %rdx,%rcx
17032 - jc bad_from_user
17033 - cmpq TI_addr_limit(%rax),%rcx
17034 - ja bad_from_user
17035 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17036 - copy_user_generic_unrolled,copy_user_generic_string, \
17037 - copy_user_enhanced_fast_string
17038 - CFI_ENDPROC
17039 -ENDPROC(_copy_from_user)
17040 -
17041 .section .fixup,"ax"
17042 /* must zero dest */
17043 ENTRY(bad_from_user)
17044 bad_from_user:
17045 CFI_STARTPROC
17046 + testl %edx,%edx
17047 + js bad_to_user
17048 movl %edx,%ecx
17049 xorl %eax,%eax
17050 rep
17051 diff -urNp linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S
17052 --- linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17053 +++ linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-23 21:47:55.000000000 -0400
17054 @@ -14,6 +14,7 @@
17055 #include <asm/current.h>
17056 #include <asm/asm-offsets.h>
17057 #include <asm/thread_info.h>
17058 +#include <asm/pgtable.h>
17059
17060 .macro ALIGN_DESTINATION
17061 #ifdef FIX_ALIGNMENT
17062 @@ -50,6 +51,15 @@
17063 */
17064 ENTRY(__copy_user_nocache)
17065 CFI_STARTPROC
17066 +
17067 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17068 + mov $PAX_USER_SHADOW_BASE,%rcx
17069 + cmp %rcx,%rsi
17070 + jae 1f
17071 + add %rcx,%rsi
17072 +1:
17073 +#endif
17074 +
17075 cmpl $8,%edx
17076 jb 20f /* less then 8 bytes, go to byte copy loop */
17077 ALIGN_DESTINATION
17078 diff -urNp linux-3.0.4/arch/x86/lib/csum-wrappers_64.c linux-3.0.4/arch/x86/lib/csum-wrappers_64.c
17079 --- linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17080 +++ linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17081 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17082 len -= 2;
17083 }
17084 }
17085 +
17086 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17087 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17088 + src += PAX_USER_SHADOW_BASE;
17089 +#endif
17090 +
17091 isum = csum_partial_copy_generic((__force const void *)src,
17092 dst, len, isum, errp, NULL);
17093 if (unlikely(*errp))
17094 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17095 }
17096
17097 *errp = 0;
17098 +
17099 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17100 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17101 + dst += PAX_USER_SHADOW_BASE;
17102 +#endif
17103 +
17104 return csum_partial_copy_generic(src, (void __force *)dst,
17105 len, isum, NULL, errp);
17106 }
17107 diff -urNp linux-3.0.4/arch/x86/lib/getuser.S linux-3.0.4/arch/x86/lib/getuser.S
17108 --- linux-3.0.4/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17109 +++ linux-3.0.4/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17110 @@ -33,14 +33,35 @@
17111 #include <asm/asm-offsets.h>
17112 #include <asm/thread_info.h>
17113 #include <asm/asm.h>
17114 +#include <asm/segment.h>
17115 +#include <asm/pgtable.h>
17116 +
17117 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17118 +#define __copyuser_seg gs;
17119 +#else
17120 +#define __copyuser_seg
17121 +#endif
17122
17123 .text
17124 ENTRY(__get_user_1)
17125 CFI_STARTPROC
17126 +
17127 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17128 GET_THREAD_INFO(%_ASM_DX)
17129 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17130 jae bad_get_user
17131 -1: movzb (%_ASM_AX),%edx
17132 +
17133 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17134 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17135 + cmp %_ASM_DX,%_ASM_AX
17136 + jae 1234f
17137 + add %_ASM_DX,%_ASM_AX
17138 +1234:
17139 +#endif
17140 +
17141 +#endif
17142 +
17143 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17144 xor %eax,%eax
17145 ret
17146 CFI_ENDPROC
17147 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17148 ENTRY(__get_user_2)
17149 CFI_STARTPROC
17150 add $1,%_ASM_AX
17151 +
17152 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17153 jc bad_get_user
17154 GET_THREAD_INFO(%_ASM_DX)
17155 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17156 jae bad_get_user
17157 -2: movzwl -1(%_ASM_AX),%edx
17158 +
17159 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17160 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17161 + cmp %_ASM_DX,%_ASM_AX
17162 + jae 1234f
17163 + add %_ASM_DX,%_ASM_AX
17164 +1234:
17165 +#endif
17166 +
17167 +#endif
17168 +
17169 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17170 xor %eax,%eax
17171 ret
17172 CFI_ENDPROC
17173 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17174 ENTRY(__get_user_4)
17175 CFI_STARTPROC
17176 add $3,%_ASM_AX
17177 +
17178 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17179 jc bad_get_user
17180 GET_THREAD_INFO(%_ASM_DX)
17181 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17182 jae bad_get_user
17183 -3: mov -3(%_ASM_AX),%edx
17184 +
17185 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17186 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17187 + cmp %_ASM_DX,%_ASM_AX
17188 + jae 1234f
17189 + add %_ASM_DX,%_ASM_AX
17190 +1234:
17191 +#endif
17192 +
17193 +#endif
17194 +
17195 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17196 xor %eax,%eax
17197 ret
17198 CFI_ENDPROC
17199 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17200 GET_THREAD_INFO(%_ASM_DX)
17201 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17202 jae bad_get_user
17203 +
17204 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17205 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17206 + cmp %_ASM_DX,%_ASM_AX
17207 + jae 1234f
17208 + add %_ASM_DX,%_ASM_AX
17209 +1234:
17210 +#endif
17211 +
17212 4: movq -7(%_ASM_AX),%_ASM_DX
17213 xor %eax,%eax
17214 ret
17215 diff -urNp linux-3.0.4/arch/x86/lib/insn.c linux-3.0.4/arch/x86/lib/insn.c
17216 --- linux-3.0.4/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17217 +++ linux-3.0.4/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17218 @@ -21,6 +21,11 @@
17219 #include <linux/string.h>
17220 #include <asm/inat.h>
17221 #include <asm/insn.h>
17222 +#ifdef __KERNEL__
17223 +#include <asm/pgtable_types.h>
17224 +#else
17225 +#define ktla_ktva(addr) addr
17226 +#endif
17227
17228 #define get_next(t, insn) \
17229 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17230 @@ -40,8 +45,8 @@
17231 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17232 {
17233 memset(insn, 0, sizeof(*insn));
17234 - insn->kaddr = kaddr;
17235 - insn->next_byte = kaddr;
17236 + insn->kaddr = ktla_ktva(kaddr);
17237 + insn->next_byte = ktla_ktva(kaddr);
17238 insn->x86_64 = x86_64 ? 1 : 0;
17239 insn->opnd_bytes = 4;
17240 if (x86_64)
17241 diff -urNp linux-3.0.4/arch/x86/lib/mmx_32.c linux-3.0.4/arch/x86/lib/mmx_32.c
17242 --- linux-3.0.4/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17243 +++ linux-3.0.4/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17244 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17245 {
17246 void *p;
17247 int i;
17248 + unsigned long cr0;
17249
17250 if (unlikely(in_interrupt()))
17251 return __memcpy(to, from, len);
17252 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17253 kernel_fpu_begin();
17254
17255 __asm__ __volatile__ (
17256 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17257 - " prefetch 64(%0)\n"
17258 - " prefetch 128(%0)\n"
17259 - " prefetch 192(%0)\n"
17260 - " prefetch 256(%0)\n"
17261 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17262 + " prefetch 64(%1)\n"
17263 + " prefetch 128(%1)\n"
17264 + " prefetch 192(%1)\n"
17265 + " prefetch 256(%1)\n"
17266 "2: \n"
17267 ".section .fixup, \"ax\"\n"
17268 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17269 + "3: \n"
17270 +
17271 +#ifdef CONFIG_PAX_KERNEXEC
17272 + " movl %%cr0, %0\n"
17273 + " movl %0, %%eax\n"
17274 + " andl $0xFFFEFFFF, %%eax\n"
17275 + " movl %%eax, %%cr0\n"
17276 +#endif
17277 +
17278 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17279 +
17280 +#ifdef CONFIG_PAX_KERNEXEC
17281 + " movl %0, %%cr0\n"
17282 +#endif
17283 +
17284 " jmp 2b\n"
17285 ".previous\n"
17286 _ASM_EXTABLE(1b, 3b)
17287 - : : "r" (from));
17288 + : "=&r" (cr0) : "r" (from) : "ax");
17289
17290 for ( ; i > 5; i--) {
17291 __asm__ __volatile__ (
17292 - "1: prefetch 320(%0)\n"
17293 - "2: movq (%0), %%mm0\n"
17294 - " movq 8(%0), %%mm1\n"
17295 - " movq 16(%0), %%mm2\n"
17296 - " movq 24(%0), %%mm3\n"
17297 - " movq %%mm0, (%1)\n"
17298 - " movq %%mm1, 8(%1)\n"
17299 - " movq %%mm2, 16(%1)\n"
17300 - " movq %%mm3, 24(%1)\n"
17301 - " movq 32(%0), %%mm0\n"
17302 - " movq 40(%0), %%mm1\n"
17303 - " movq 48(%0), %%mm2\n"
17304 - " movq 56(%0), %%mm3\n"
17305 - " movq %%mm0, 32(%1)\n"
17306 - " movq %%mm1, 40(%1)\n"
17307 - " movq %%mm2, 48(%1)\n"
17308 - " movq %%mm3, 56(%1)\n"
17309 + "1: prefetch 320(%1)\n"
17310 + "2: movq (%1), %%mm0\n"
17311 + " movq 8(%1), %%mm1\n"
17312 + " movq 16(%1), %%mm2\n"
17313 + " movq 24(%1), %%mm3\n"
17314 + " movq %%mm0, (%2)\n"
17315 + " movq %%mm1, 8(%2)\n"
17316 + " movq %%mm2, 16(%2)\n"
17317 + " movq %%mm3, 24(%2)\n"
17318 + " movq 32(%1), %%mm0\n"
17319 + " movq 40(%1), %%mm1\n"
17320 + " movq 48(%1), %%mm2\n"
17321 + " movq 56(%1), %%mm3\n"
17322 + " movq %%mm0, 32(%2)\n"
17323 + " movq %%mm1, 40(%2)\n"
17324 + " movq %%mm2, 48(%2)\n"
17325 + " movq %%mm3, 56(%2)\n"
17326 ".section .fixup, \"ax\"\n"
17327 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17328 + "3:\n"
17329 +
17330 +#ifdef CONFIG_PAX_KERNEXEC
17331 + " movl %%cr0, %0\n"
17332 + " movl %0, %%eax\n"
17333 + " andl $0xFFFEFFFF, %%eax\n"
17334 + " movl %%eax, %%cr0\n"
17335 +#endif
17336 +
17337 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17338 +
17339 +#ifdef CONFIG_PAX_KERNEXEC
17340 + " movl %0, %%cr0\n"
17341 +#endif
17342 +
17343 " jmp 2b\n"
17344 ".previous\n"
17345 _ASM_EXTABLE(1b, 3b)
17346 - : : "r" (from), "r" (to) : "memory");
17347 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17348
17349 from += 64;
17350 to += 64;
17351 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17352 static void fast_copy_page(void *to, void *from)
17353 {
17354 int i;
17355 + unsigned long cr0;
17356
17357 kernel_fpu_begin();
17358
17359 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17360 * but that is for later. -AV
17361 */
17362 __asm__ __volatile__(
17363 - "1: prefetch (%0)\n"
17364 - " prefetch 64(%0)\n"
17365 - " prefetch 128(%0)\n"
17366 - " prefetch 192(%0)\n"
17367 - " prefetch 256(%0)\n"
17368 + "1: prefetch (%1)\n"
17369 + " prefetch 64(%1)\n"
17370 + " prefetch 128(%1)\n"
17371 + " prefetch 192(%1)\n"
17372 + " prefetch 256(%1)\n"
17373 "2: \n"
17374 ".section .fixup, \"ax\"\n"
17375 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17376 + "3: \n"
17377 +
17378 +#ifdef CONFIG_PAX_KERNEXEC
17379 + " movl %%cr0, %0\n"
17380 + " movl %0, %%eax\n"
17381 + " andl $0xFFFEFFFF, %%eax\n"
17382 + " movl %%eax, %%cr0\n"
17383 +#endif
17384 +
17385 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17386 +
17387 +#ifdef CONFIG_PAX_KERNEXEC
17388 + " movl %0, %%cr0\n"
17389 +#endif
17390 +
17391 " jmp 2b\n"
17392 ".previous\n"
17393 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17394 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17395
17396 for (i = 0; i < (4096-320)/64; i++) {
17397 __asm__ __volatile__ (
17398 - "1: prefetch 320(%0)\n"
17399 - "2: movq (%0), %%mm0\n"
17400 - " movntq %%mm0, (%1)\n"
17401 - " movq 8(%0), %%mm1\n"
17402 - " movntq %%mm1, 8(%1)\n"
17403 - " movq 16(%0), %%mm2\n"
17404 - " movntq %%mm2, 16(%1)\n"
17405 - " movq 24(%0), %%mm3\n"
17406 - " movntq %%mm3, 24(%1)\n"
17407 - " movq 32(%0), %%mm4\n"
17408 - " movntq %%mm4, 32(%1)\n"
17409 - " movq 40(%0), %%mm5\n"
17410 - " movntq %%mm5, 40(%1)\n"
17411 - " movq 48(%0), %%mm6\n"
17412 - " movntq %%mm6, 48(%1)\n"
17413 - " movq 56(%0), %%mm7\n"
17414 - " movntq %%mm7, 56(%1)\n"
17415 + "1: prefetch 320(%1)\n"
17416 + "2: movq (%1), %%mm0\n"
17417 + " movntq %%mm0, (%2)\n"
17418 + " movq 8(%1), %%mm1\n"
17419 + " movntq %%mm1, 8(%2)\n"
17420 + " movq 16(%1), %%mm2\n"
17421 + " movntq %%mm2, 16(%2)\n"
17422 + " movq 24(%1), %%mm3\n"
17423 + " movntq %%mm3, 24(%2)\n"
17424 + " movq 32(%1), %%mm4\n"
17425 + " movntq %%mm4, 32(%2)\n"
17426 + " movq 40(%1), %%mm5\n"
17427 + " movntq %%mm5, 40(%2)\n"
17428 + " movq 48(%1), %%mm6\n"
17429 + " movntq %%mm6, 48(%2)\n"
17430 + " movq 56(%1), %%mm7\n"
17431 + " movntq %%mm7, 56(%2)\n"
17432 ".section .fixup, \"ax\"\n"
17433 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17434 + "3:\n"
17435 +
17436 +#ifdef CONFIG_PAX_KERNEXEC
17437 + " movl %%cr0, %0\n"
17438 + " movl %0, %%eax\n"
17439 + " andl $0xFFFEFFFF, %%eax\n"
17440 + " movl %%eax, %%cr0\n"
17441 +#endif
17442 +
17443 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17444 +
17445 +#ifdef CONFIG_PAX_KERNEXEC
17446 + " movl %0, %%cr0\n"
17447 +#endif
17448 +
17449 " jmp 2b\n"
17450 ".previous\n"
17451 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17452 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17453
17454 from += 64;
17455 to += 64;
17456 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17457 static void fast_copy_page(void *to, void *from)
17458 {
17459 int i;
17460 + unsigned long cr0;
17461
17462 kernel_fpu_begin();
17463
17464 __asm__ __volatile__ (
17465 - "1: prefetch (%0)\n"
17466 - " prefetch 64(%0)\n"
17467 - " prefetch 128(%0)\n"
17468 - " prefetch 192(%0)\n"
17469 - " prefetch 256(%0)\n"
17470 + "1: prefetch (%1)\n"
17471 + " prefetch 64(%1)\n"
17472 + " prefetch 128(%1)\n"
17473 + " prefetch 192(%1)\n"
17474 + " prefetch 256(%1)\n"
17475 "2: \n"
17476 ".section .fixup, \"ax\"\n"
17477 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17478 + "3: \n"
17479 +
17480 +#ifdef CONFIG_PAX_KERNEXEC
17481 + " movl %%cr0, %0\n"
17482 + " movl %0, %%eax\n"
17483 + " andl $0xFFFEFFFF, %%eax\n"
17484 + " movl %%eax, %%cr0\n"
17485 +#endif
17486 +
17487 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17488 +
17489 +#ifdef CONFIG_PAX_KERNEXEC
17490 + " movl %0, %%cr0\n"
17491 +#endif
17492 +
17493 " jmp 2b\n"
17494 ".previous\n"
17495 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17496 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17497
17498 for (i = 0; i < 4096/64; i++) {
17499 __asm__ __volatile__ (
17500 - "1: prefetch 320(%0)\n"
17501 - "2: movq (%0), %%mm0\n"
17502 - " movq 8(%0), %%mm1\n"
17503 - " movq 16(%0), %%mm2\n"
17504 - " movq 24(%0), %%mm3\n"
17505 - " movq %%mm0, (%1)\n"
17506 - " movq %%mm1, 8(%1)\n"
17507 - " movq %%mm2, 16(%1)\n"
17508 - " movq %%mm3, 24(%1)\n"
17509 - " movq 32(%0), %%mm0\n"
17510 - " movq 40(%0), %%mm1\n"
17511 - " movq 48(%0), %%mm2\n"
17512 - " movq 56(%0), %%mm3\n"
17513 - " movq %%mm0, 32(%1)\n"
17514 - " movq %%mm1, 40(%1)\n"
17515 - " movq %%mm2, 48(%1)\n"
17516 - " movq %%mm3, 56(%1)\n"
17517 + "1: prefetch 320(%1)\n"
17518 + "2: movq (%1), %%mm0\n"
17519 + " movq 8(%1), %%mm1\n"
17520 + " movq 16(%1), %%mm2\n"
17521 + " movq 24(%1), %%mm3\n"
17522 + " movq %%mm0, (%2)\n"
17523 + " movq %%mm1, 8(%2)\n"
17524 + " movq %%mm2, 16(%2)\n"
17525 + " movq %%mm3, 24(%2)\n"
17526 + " movq 32(%1), %%mm0\n"
17527 + " movq 40(%1), %%mm1\n"
17528 + " movq 48(%1), %%mm2\n"
17529 + " movq 56(%1), %%mm3\n"
17530 + " movq %%mm0, 32(%2)\n"
17531 + " movq %%mm1, 40(%2)\n"
17532 + " movq %%mm2, 48(%2)\n"
17533 + " movq %%mm3, 56(%2)\n"
17534 ".section .fixup, \"ax\"\n"
17535 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17536 + "3:\n"
17537 +
17538 +#ifdef CONFIG_PAX_KERNEXEC
17539 + " movl %%cr0, %0\n"
17540 + " movl %0, %%eax\n"
17541 + " andl $0xFFFEFFFF, %%eax\n"
17542 + " movl %%eax, %%cr0\n"
17543 +#endif
17544 +
17545 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17546 +
17547 +#ifdef CONFIG_PAX_KERNEXEC
17548 + " movl %0, %%cr0\n"
17549 +#endif
17550 +
17551 " jmp 2b\n"
17552 ".previous\n"
17553 _ASM_EXTABLE(1b, 3b)
17554 - : : "r" (from), "r" (to) : "memory");
17555 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17556
17557 from += 64;
17558 to += 64;
17559 diff -urNp linux-3.0.4/arch/x86/lib/putuser.S linux-3.0.4/arch/x86/lib/putuser.S
17560 --- linux-3.0.4/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17561 +++ linux-3.0.4/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17562 @@ -15,7 +15,8 @@
17563 #include <asm/thread_info.h>
17564 #include <asm/errno.h>
17565 #include <asm/asm.h>
17566 -
17567 +#include <asm/segment.h>
17568 +#include <asm/pgtable.h>
17569
17570 /*
17571 * __put_user_X
17572 @@ -29,52 +30,119 @@
17573 * as they get called from within inline assembly.
17574 */
17575
17576 -#define ENTER CFI_STARTPROC ; \
17577 - GET_THREAD_INFO(%_ASM_BX)
17578 +#define ENTER CFI_STARTPROC
17579 #define EXIT ret ; \
17580 CFI_ENDPROC
17581
17582 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17583 +#define _DEST %_ASM_CX,%_ASM_BX
17584 +#else
17585 +#define _DEST %_ASM_CX
17586 +#endif
17587 +
17588 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17589 +#define __copyuser_seg gs;
17590 +#else
17591 +#define __copyuser_seg
17592 +#endif
17593 +
17594 .text
17595 ENTRY(__put_user_1)
17596 ENTER
17597 +
17598 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17599 + GET_THREAD_INFO(%_ASM_BX)
17600 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17601 jae bad_put_user
17602 -1: movb %al,(%_ASM_CX)
17603 +
17604 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17605 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17606 + cmp %_ASM_BX,%_ASM_CX
17607 + jb 1234f
17608 + xor %ebx,%ebx
17609 +1234:
17610 +#endif
17611 +
17612 +#endif
17613 +
17614 +1: __copyuser_seg movb %al,(_DEST)
17615 xor %eax,%eax
17616 EXIT
17617 ENDPROC(__put_user_1)
17618
17619 ENTRY(__put_user_2)
17620 ENTER
17621 +
17622 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17623 + GET_THREAD_INFO(%_ASM_BX)
17624 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17625 sub $1,%_ASM_BX
17626 cmp %_ASM_BX,%_ASM_CX
17627 jae bad_put_user
17628 -2: movw %ax,(%_ASM_CX)
17629 +
17630 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17631 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17632 + cmp %_ASM_BX,%_ASM_CX
17633 + jb 1234f
17634 + xor %ebx,%ebx
17635 +1234:
17636 +#endif
17637 +
17638 +#endif
17639 +
17640 +2: __copyuser_seg movw %ax,(_DEST)
17641 xor %eax,%eax
17642 EXIT
17643 ENDPROC(__put_user_2)
17644
17645 ENTRY(__put_user_4)
17646 ENTER
17647 +
17648 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17649 + GET_THREAD_INFO(%_ASM_BX)
17650 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17651 sub $3,%_ASM_BX
17652 cmp %_ASM_BX,%_ASM_CX
17653 jae bad_put_user
17654 -3: movl %eax,(%_ASM_CX)
17655 +
17656 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17657 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17658 + cmp %_ASM_BX,%_ASM_CX
17659 + jb 1234f
17660 + xor %ebx,%ebx
17661 +1234:
17662 +#endif
17663 +
17664 +#endif
17665 +
17666 +3: __copyuser_seg movl %eax,(_DEST)
17667 xor %eax,%eax
17668 EXIT
17669 ENDPROC(__put_user_4)
17670
17671 ENTRY(__put_user_8)
17672 ENTER
17673 +
17674 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17675 + GET_THREAD_INFO(%_ASM_BX)
17676 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17677 sub $7,%_ASM_BX
17678 cmp %_ASM_BX,%_ASM_CX
17679 jae bad_put_user
17680 -4: mov %_ASM_AX,(%_ASM_CX)
17681 +
17682 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17683 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17684 + cmp %_ASM_BX,%_ASM_CX
17685 + jb 1234f
17686 + xor %ebx,%ebx
17687 +1234:
17688 +#endif
17689 +
17690 +#endif
17691 +
17692 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
17693 #ifdef CONFIG_X86_32
17694 -5: movl %edx,4(%_ASM_CX)
17695 +5: __copyuser_seg movl %edx,4(_DEST)
17696 #endif
17697 xor %eax,%eax
17698 EXIT
17699 diff -urNp linux-3.0.4/arch/x86/lib/usercopy_32.c linux-3.0.4/arch/x86/lib/usercopy_32.c
17700 --- linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
17701 +++ linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
17702 @@ -43,7 +43,7 @@ do { \
17703 __asm__ __volatile__( \
17704 " testl %1,%1\n" \
17705 " jz 2f\n" \
17706 - "0: lodsb\n" \
17707 + "0: "__copyuser_seg"lodsb\n" \
17708 " stosb\n" \
17709 " testb %%al,%%al\n" \
17710 " jz 1f\n" \
17711 @@ -128,10 +128,12 @@ do { \
17712 int __d0; \
17713 might_fault(); \
17714 __asm__ __volatile__( \
17715 + __COPYUSER_SET_ES \
17716 "0: rep; stosl\n" \
17717 " movl %2,%0\n" \
17718 "1: rep; stosb\n" \
17719 "2:\n" \
17720 + __COPYUSER_RESTORE_ES \
17721 ".section .fixup,\"ax\"\n" \
17722 "3: lea 0(%2,%0,4),%0\n" \
17723 " jmp 2b\n" \
17724 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17725 might_fault();
17726
17727 __asm__ __volatile__(
17728 + __COPYUSER_SET_ES
17729 " testl %0, %0\n"
17730 " jz 3f\n"
17731 " andl %0,%%ecx\n"
17732 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17733 " subl %%ecx,%0\n"
17734 " addl %0,%%eax\n"
17735 "1:\n"
17736 + __COPYUSER_RESTORE_ES
17737 ".section .fixup,\"ax\"\n"
17738 "2: xorl %%eax,%%eax\n"
17739 " jmp 1b\n"
17740 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17741
17742 #ifdef CONFIG_X86_INTEL_USERCOPY
17743 static unsigned long
17744 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17745 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17746 {
17747 int d0, d1;
17748 __asm__ __volatile__(
17749 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17750 " .align 2,0x90\n"
17751 "3: movl 0(%4), %%eax\n"
17752 "4: movl 4(%4), %%edx\n"
17753 - "5: movl %%eax, 0(%3)\n"
17754 - "6: movl %%edx, 4(%3)\n"
17755 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17756 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17757 "7: movl 8(%4), %%eax\n"
17758 "8: movl 12(%4),%%edx\n"
17759 - "9: movl %%eax, 8(%3)\n"
17760 - "10: movl %%edx, 12(%3)\n"
17761 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17762 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17763 "11: movl 16(%4), %%eax\n"
17764 "12: movl 20(%4), %%edx\n"
17765 - "13: movl %%eax, 16(%3)\n"
17766 - "14: movl %%edx, 20(%3)\n"
17767 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17768 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17769 "15: movl 24(%4), %%eax\n"
17770 "16: movl 28(%4), %%edx\n"
17771 - "17: movl %%eax, 24(%3)\n"
17772 - "18: movl %%edx, 28(%3)\n"
17773 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17774 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17775 "19: movl 32(%4), %%eax\n"
17776 "20: movl 36(%4), %%edx\n"
17777 - "21: movl %%eax, 32(%3)\n"
17778 - "22: movl %%edx, 36(%3)\n"
17779 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17780 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17781 "23: movl 40(%4), %%eax\n"
17782 "24: movl 44(%4), %%edx\n"
17783 - "25: movl %%eax, 40(%3)\n"
17784 - "26: movl %%edx, 44(%3)\n"
17785 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17786 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17787 "27: movl 48(%4), %%eax\n"
17788 "28: movl 52(%4), %%edx\n"
17789 - "29: movl %%eax, 48(%3)\n"
17790 - "30: movl %%edx, 52(%3)\n"
17791 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17792 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17793 "31: movl 56(%4), %%eax\n"
17794 "32: movl 60(%4), %%edx\n"
17795 - "33: movl %%eax, 56(%3)\n"
17796 - "34: movl %%edx, 60(%3)\n"
17797 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17798 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17799 " addl $-64, %0\n"
17800 " addl $64, %4\n"
17801 " addl $64, %3\n"
17802 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17803 " shrl $2, %0\n"
17804 " andl $3, %%eax\n"
17805 " cld\n"
17806 + __COPYUSER_SET_ES
17807 "99: rep; movsl\n"
17808 "36: movl %%eax, %0\n"
17809 "37: rep; movsb\n"
17810 "100:\n"
17811 + __COPYUSER_RESTORE_ES
17812 + ".section .fixup,\"ax\"\n"
17813 + "101: lea 0(%%eax,%0,4),%0\n"
17814 + " jmp 100b\n"
17815 + ".previous\n"
17816 + ".section __ex_table,\"a\"\n"
17817 + " .align 4\n"
17818 + " .long 1b,100b\n"
17819 + " .long 2b,100b\n"
17820 + " .long 3b,100b\n"
17821 + " .long 4b,100b\n"
17822 + " .long 5b,100b\n"
17823 + " .long 6b,100b\n"
17824 + " .long 7b,100b\n"
17825 + " .long 8b,100b\n"
17826 + " .long 9b,100b\n"
17827 + " .long 10b,100b\n"
17828 + " .long 11b,100b\n"
17829 + " .long 12b,100b\n"
17830 + " .long 13b,100b\n"
17831 + " .long 14b,100b\n"
17832 + " .long 15b,100b\n"
17833 + " .long 16b,100b\n"
17834 + " .long 17b,100b\n"
17835 + " .long 18b,100b\n"
17836 + " .long 19b,100b\n"
17837 + " .long 20b,100b\n"
17838 + " .long 21b,100b\n"
17839 + " .long 22b,100b\n"
17840 + " .long 23b,100b\n"
17841 + " .long 24b,100b\n"
17842 + " .long 25b,100b\n"
17843 + " .long 26b,100b\n"
17844 + " .long 27b,100b\n"
17845 + " .long 28b,100b\n"
17846 + " .long 29b,100b\n"
17847 + " .long 30b,100b\n"
17848 + " .long 31b,100b\n"
17849 + " .long 32b,100b\n"
17850 + " .long 33b,100b\n"
17851 + " .long 34b,100b\n"
17852 + " .long 35b,100b\n"
17853 + " .long 36b,100b\n"
17854 + " .long 37b,100b\n"
17855 + " .long 99b,101b\n"
17856 + ".previous"
17857 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
17858 + : "1"(to), "2"(from), "0"(size)
17859 + : "eax", "edx", "memory");
17860 + return size;
17861 +}
17862 +
17863 +static unsigned long
17864 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17865 +{
17866 + int d0, d1;
17867 + __asm__ __volatile__(
17868 + " .align 2,0x90\n"
17869 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17870 + " cmpl $67, %0\n"
17871 + " jbe 3f\n"
17872 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17873 + " .align 2,0x90\n"
17874 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17875 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17876 + "5: movl %%eax, 0(%3)\n"
17877 + "6: movl %%edx, 4(%3)\n"
17878 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17879 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17880 + "9: movl %%eax, 8(%3)\n"
17881 + "10: movl %%edx, 12(%3)\n"
17882 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17883 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17884 + "13: movl %%eax, 16(%3)\n"
17885 + "14: movl %%edx, 20(%3)\n"
17886 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17887 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17888 + "17: movl %%eax, 24(%3)\n"
17889 + "18: movl %%edx, 28(%3)\n"
17890 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17891 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17892 + "21: movl %%eax, 32(%3)\n"
17893 + "22: movl %%edx, 36(%3)\n"
17894 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17895 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17896 + "25: movl %%eax, 40(%3)\n"
17897 + "26: movl %%edx, 44(%3)\n"
17898 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17899 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17900 + "29: movl %%eax, 48(%3)\n"
17901 + "30: movl %%edx, 52(%3)\n"
17902 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17903 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17904 + "33: movl %%eax, 56(%3)\n"
17905 + "34: movl %%edx, 60(%3)\n"
17906 + " addl $-64, %0\n"
17907 + " addl $64, %4\n"
17908 + " addl $64, %3\n"
17909 + " cmpl $63, %0\n"
17910 + " ja 1b\n"
17911 + "35: movl %0, %%eax\n"
17912 + " shrl $2, %0\n"
17913 + " andl $3, %%eax\n"
17914 + " cld\n"
17915 + "99: rep; "__copyuser_seg" movsl\n"
17916 + "36: movl %%eax, %0\n"
17917 + "37: rep; "__copyuser_seg" movsb\n"
17918 + "100:\n"
17919 ".section .fixup,\"ax\"\n"
17920 "101: lea 0(%%eax,%0,4),%0\n"
17921 " jmp 100b\n"
17922 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17923 int d0, d1;
17924 __asm__ __volatile__(
17925 " .align 2,0x90\n"
17926 - "0: movl 32(%4), %%eax\n"
17927 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17928 " cmpl $67, %0\n"
17929 " jbe 2f\n"
17930 - "1: movl 64(%4), %%eax\n"
17931 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17932 " .align 2,0x90\n"
17933 - "2: movl 0(%4), %%eax\n"
17934 - "21: movl 4(%4), %%edx\n"
17935 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17936 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17937 " movl %%eax, 0(%3)\n"
17938 " movl %%edx, 4(%3)\n"
17939 - "3: movl 8(%4), %%eax\n"
17940 - "31: movl 12(%4),%%edx\n"
17941 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
17942 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
17943 " movl %%eax, 8(%3)\n"
17944 " movl %%edx, 12(%3)\n"
17945 - "4: movl 16(%4), %%eax\n"
17946 - "41: movl 20(%4), %%edx\n"
17947 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
17948 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
17949 " movl %%eax, 16(%3)\n"
17950 " movl %%edx, 20(%3)\n"
17951 - "10: movl 24(%4), %%eax\n"
17952 - "51: movl 28(%4), %%edx\n"
17953 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
17954 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
17955 " movl %%eax, 24(%3)\n"
17956 " movl %%edx, 28(%3)\n"
17957 - "11: movl 32(%4), %%eax\n"
17958 - "61: movl 36(%4), %%edx\n"
17959 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
17960 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
17961 " movl %%eax, 32(%3)\n"
17962 " movl %%edx, 36(%3)\n"
17963 - "12: movl 40(%4), %%eax\n"
17964 - "71: movl 44(%4), %%edx\n"
17965 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
17966 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
17967 " movl %%eax, 40(%3)\n"
17968 " movl %%edx, 44(%3)\n"
17969 - "13: movl 48(%4), %%eax\n"
17970 - "81: movl 52(%4), %%edx\n"
17971 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
17972 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
17973 " movl %%eax, 48(%3)\n"
17974 " movl %%edx, 52(%3)\n"
17975 - "14: movl 56(%4), %%eax\n"
17976 - "91: movl 60(%4), %%edx\n"
17977 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
17978 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
17979 " movl %%eax, 56(%3)\n"
17980 " movl %%edx, 60(%3)\n"
17981 " addl $-64, %0\n"
17982 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
17983 " shrl $2, %0\n"
17984 " andl $3, %%eax\n"
17985 " cld\n"
17986 - "6: rep; movsl\n"
17987 + "6: rep; "__copyuser_seg" movsl\n"
17988 " movl %%eax,%0\n"
17989 - "7: rep; movsb\n"
17990 + "7: rep; "__copyuser_seg" movsb\n"
17991 "8:\n"
17992 ".section .fixup,\"ax\"\n"
17993 "9: lea 0(%%eax,%0,4),%0\n"
17994 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
17995
17996 __asm__ __volatile__(
17997 " .align 2,0x90\n"
17998 - "0: movl 32(%4), %%eax\n"
17999 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18000 " cmpl $67, %0\n"
18001 " jbe 2f\n"
18002 - "1: movl 64(%4), %%eax\n"
18003 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18004 " .align 2,0x90\n"
18005 - "2: movl 0(%4), %%eax\n"
18006 - "21: movl 4(%4), %%edx\n"
18007 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18008 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18009 " movnti %%eax, 0(%3)\n"
18010 " movnti %%edx, 4(%3)\n"
18011 - "3: movl 8(%4), %%eax\n"
18012 - "31: movl 12(%4),%%edx\n"
18013 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18014 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18015 " movnti %%eax, 8(%3)\n"
18016 " movnti %%edx, 12(%3)\n"
18017 - "4: movl 16(%4), %%eax\n"
18018 - "41: movl 20(%4), %%edx\n"
18019 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18020 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18021 " movnti %%eax, 16(%3)\n"
18022 " movnti %%edx, 20(%3)\n"
18023 - "10: movl 24(%4), %%eax\n"
18024 - "51: movl 28(%4), %%edx\n"
18025 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18026 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18027 " movnti %%eax, 24(%3)\n"
18028 " movnti %%edx, 28(%3)\n"
18029 - "11: movl 32(%4), %%eax\n"
18030 - "61: movl 36(%4), %%edx\n"
18031 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18032 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18033 " movnti %%eax, 32(%3)\n"
18034 " movnti %%edx, 36(%3)\n"
18035 - "12: movl 40(%4), %%eax\n"
18036 - "71: movl 44(%4), %%edx\n"
18037 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18038 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18039 " movnti %%eax, 40(%3)\n"
18040 " movnti %%edx, 44(%3)\n"
18041 - "13: movl 48(%4), %%eax\n"
18042 - "81: movl 52(%4), %%edx\n"
18043 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18044 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18045 " movnti %%eax, 48(%3)\n"
18046 " movnti %%edx, 52(%3)\n"
18047 - "14: movl 56(%4), %%eax\n"
18048 - "91: movl 60(%4), %%edx\n"
18049 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18050 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18051 " movnti %%eax, 56(%3)\n"
18052 " movnti %%edx, 60(%3)\n"
18053 " addl $-64, %0\n"
18054 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18055 " shrl $2, %0\n"
18056 " andl $3, %%eax\n"
18057 " cld\n"
18058 - "6: rep; movsl\n"
18059 + "6: rep; "__copyuser_seg" movsl\n"
18060 " movl %%eax,%0\n"
18061 - "7: rep; movsb\n"
18062 + "7: rep; "__copyuser_seg" movsb\n"
18063 "8:\n"
18064 ".section .fixup,\"ax\"\n"
18065 "9: lea 0(%%eax,%0,4),%0\n"
18066 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18067
18068 __asm__ __volatile__(
18069 " .align 2,0x90\n"
18070 - "0: movl 32(%4), %%eax\n"
18071 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18072 " cmpl $67, %0\n"
18073 " jbe 2f\n"
18074 - "1: movl 64(%4), %%eax\n"
18075 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18076 " .align 2,0x90\n"
18077 - "2: movl 0(%4), %%eax\n"
18078 - "21: movl 4(%4), %%edx\n"
18079 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18080 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18081 " movnti %%eax, 0(%3)\n"
18082 " movnti %%edx, 4(%3)\n"
18083 - "3: movl 8(%4), %%eax\n"
18084 - "31: movl 12(%4),%%edx\n"
18085 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18086 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18087 " movnti %%eax, 8(%3)\n"
18088 " movnti %%edx, 12(%3)\n"
18089 - "4: movl 16(%4), %%eax\n"
18090 - "41: movl 20(%4), %%edx\n"
18091 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18092 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18093 " movnti %%eax, 16(%3)\n"
18094 " movnti %%edx, 20(%3)\n"
18095 - "10: movl 24(%4), %%eax\n"
18096 - "51: movl 28(%4), %%edx\n"
18097 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18098 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18099 " movnti %%eax, 24(%3)\n"
18100 " movnti %%edx, 28(%3)\n"
18101 - "11: movl 32(%4), %%eax\n"
18102 - "61: movl 36(%4), %%edx\n"
18103 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18104 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18105 " movnti %%eax, 32(%3)\n"
18106 " movnti %%edx, 36(%3)\n"
18107 - "12: movl 40(%4), %%eax\n"
18108 - "71: movl 44(%4), %%edx\n"
18109 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18110 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18111 " movnti %%eax, 40(%3)\n"
18112 " movnti %%edx, 44(%3)\n"
18113 - "13: movl 48(%4), %%eax\n"
18114 - "81: movl 52(%4), %%edx\n"
18115 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18116 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18117 " movnti %%eax, 48(%3)\n"
18118 " movnti %%edx, 52(%3)\n"
18119 - "14: movl 56(%4), %%eax\n"
18120 - "91: movl 60(%4), %%edx\n"
18121 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18122 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18123 " movnti %%eax, 56(%3)\n"
18124 " movnti %%edx, 60(%3)\n"
18125 " addl $-64, %0\n"
18126 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18127 " shrl $2, %0\n"
18128 " andl $3, %%eax\n"
18129 " cld\n"
18130 - "6: rep; movsl\n"
18131 + "6: rep; "__copyuser_seg" movsl\n"
18132 " movl %%eax,%0\n"
18133 - "7: rep; movsb\n"
18134 + "7: rep; "__copyuser_seg" movsb\n"
18135 "8:\n"
18136 ".section .fixup,\"ax\"\n"
18137 "9: lea 0(%%eax,%0,4),%0\n"
18138 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18139 */
18140 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18141 unsigned long size);
18142 -unsigned long __copy_user_intel(void __user *to, const void *from,
18143 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18144 + unsigned long size);
18145 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18146 unsigned long size);
18147 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18148 const void __user *from, unsigned long size);
18149 #endif /* CONFIG_X86_INTEL_USERCOPY */
18150
18151 /* Generic arbitrary sized copy. */
18152 -#define __copy_user(to, from, size) \
18153 +#define __copy_user(to, from, size, prefix, set, restore) \
18154 do { \
18155 int __d0, __d1, __d2; \
18156 __asm__ __volatile__( \
18157 + set \
18158 " cmp $7,%0\n" \
18159 " jbe 1f\n" \
18160 " movl %1,%0\n" \
18161 " negl %0\n" \
18162 " andl $7,%0\n" \
18163 " subl %0,%3\n" \
18164 - "4: rep; movsb\n" \
18165 + "4: rep; "prefix"movsb\n" \
18166 " movl %3,%0\n" \
18167 " shrl $2,%0\n" \
18168 " andl $3,%3\n" \
18169 " .align 2,0x90\n" \
18170 - "0: rep; movsl\n" \
18171 + "0: rep; "prefix"movsl\n" \
18172 " movl %3,%0\n" \
18173 - "1: rep; movsb\n" \
18174 + "1: rep; "prefix"movsb\n" \
18175 "2:\n" \
18176 + restore \
18177 ".section .fixup,\"ax\"\n" \
18178 "5: addl %3,%0\n" \
18179 " jmp 2b\n" \
18180 @@ -682,14 +799,14 @@ do { \
18181 " negl %0\n" \
18182 " andl $7,%0\n" \
18183 " subl %0,%3\n" \
18184 - "4: rep; movsb\n" \
18185 + "4: rep; "__copyuser_seg"movsb\n" \
18186 " movl %3,%0\n" \
18187 " shrl $2,%0\n" \
18188 " andl $3,%3\n" \
18189 " .align 2,0x90\n" \
18190 - "0: rep; movsl\n" \
18191 + "0: rep; "__copyuser_seg"movsl\n" \
18192 " movl %3,%0\n" \
18193 - "1: rep; movsb\n" \
18194 + "1: rep; "__copyuser_seg"movsb\n" \
18195 "2:\n" \
18196 ".section .fixup,\"ax\"\n" \
18197 "5: addl %3,%0\n" \
18198 @@ -775,9 +892,9 @@ survive:
18199 }
18200 #endif
18201 if (movsl_is_ok(to, from, n))
18202 - __copy_user(to, from, n);
18203 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18204 else
18205 - n = __copy_user_intel(to, from, n);
18206 + n = __generic_copy_to_user_intel(to, from, n);
18207 return n;
18208 }
18209 EXPORT_SYMBOL(__copy_to_user_ll);
18210 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18211 unsigned long n)
18212 {
18213 if (movsl_is_ok(to, from, n))
18214 - __copy_user(to, from, n);
18215 + __copy_user(to, from, n, __copyuser_seg, "", "");
18216 else
18217 - n = __copy_user_intel((void __user *)to,
18218 - (const void *)from, n);
18219 + n = __generic_copy_from_user_intel(to, from, n);
18220 return n;
18221 }
18222 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18223 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18224 if (n > 64 && cpu_has_xmm2)
18225 n = __copy_user_intel_nocache(to, from, n);
18226 else
18227 - __copy_user(to, from, n);
18228 + __copy_user(to, from, n, __copyuser_seg, "", "");
18229 #else
18230 - __copy_user(to, from, n);
18231 + __copy_user(to, from, n, __copyuser_seg, "", "");
18232 #endif
18233 return n;
18234 }
18235 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18236
18237 -/**
18238 - * copy_to_user: - Copy a block of data into user space.
18239 - * @to: Destination address, in user space.
18240 - * @from: Source address, in kernel space.
18241 - * @n: Number of bytes to copy.
18242 - *
18243 - * Context: User context only. This function may sleep.
18244 - *
18245 - * Copy data from kernel space to user space.
18246 - *
18247 - * Returns number of bytes that could not be copied.
18248 - * On success, this will be zero.
18249 - */
18250 -unsigned long
18251 -copy_to_user(void __user *to, const void *from, unsigned long n)
18252 +void copy_from_user_overflow(void)
18253 {
18254 - if (access_ok(VERIFY_WRITE, to, n))
18255 - n = __copy_to_user(to, from, n);
18256 - return n;
18257 + WARN(1, "Buffer overflow detected!\n");
18258 }
18259 -EXPORT_SYMBOL(copy_to_user);
18260 +EXPORT_SYMBOL(copy_from_user_overflow);
18261
18262 -/**
18263 - * copy_from_user: - Copy a block of data from user space.
18264 - * @to: Destination address, in kernel space.
18265 - * @from: Source address, in user space.
18266 - * @n: Number of bytes to copy.
18267 - *
18268 - * Context: User context only. This function may sleep.
18269 - *
18270 - * Copy data from user space to kernel space.
18271 - *
18272 - * Returns number of bytes that could not be copied.
18273 - * On success, this will be zero.
18274 - *
18275 - * If some data could not be copied, this function will pad the copied
18276 - * data to the requested size using zero bytes.
18277 - */
18278 -unsigned long
18279 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18280 +void copy_to_user_overflow(void)
18281 {
18282 - if (access_ok(VERIFY_READ, from, n))
18283 - n = __copy_from_user(to, from, n);
18284 - else
18285 - memset(to, 0, n);
18286 - return n;
18287 + WARN(1, "Buffer overflow detected!\n");
18288 }
18289 -EXPORT_SYMBOL(_copy_from_user);
18290 +EXPORT_SYMBOL(copy_to_user_overflow);
18291
18292 -void copy_from_user_overflow(void)
18293 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18294 +void __set_fs(mm_segment_t x)
18295 {
18296 - WARN(1, "Buffer overflow detected!\n");
18297 + switch (x.seg) {
18298 + case 0:
18299 + loadsegment(gs, 0);
18300 + break;
18301 + case TASK_SIZE_MAX:
18302 + loadsegment(gs, __USER_DS);
18303 + break;
18304 + case -1UL:
18305 + loadsegment(gs, __KERNEL_DS);
18306 + break;
18307 + default:
18308 + BUG();
18309 + }
18310 + return;
18311 }
18312 -EXPORT_SYMBOL(copy_from_user_overflow);
18313 +EXPORT_SYMBOL(__set_fs);
18314 +
18315 +void set_fs(mm_segment_t x)
18316 +{
18317 + current_thread_info()->addr_limit = x;
18318 + __set_fs(x);
18319 +}
18320 +EXPORT_SYMBOL(set_fs);
18321 +#endif
18322 diff -urNp linux-3.0.4/arch/x86/lib/usercopy_64.c linux-3.0.4/arch/x86/lib/usercopy_64.c
18323 --- linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18324 +++ linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18325 @@ -42,6 +42,12 @@ long
18326 __strncpy_from_user(char *dst, const char __user *src, long count)
18327 {
18328 long res;
18329 +
18330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18331 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18332 + src += PAX_USER_SHADOW_BASE;
18333 +#endif
18334 +
18335 __do_strncpy_from_user(dst, src, count, res);
18336 return res;
18337 }
18338 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18339 {
18340 long __d0;
18341 might_fault();
18342 +
18343 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18344 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18345 + addr += PAX_USER_SHADOW_BASE;
18346 +#endif
18347 +
18348 /* no memory constraint because it doesn't change any memory gcc knows
18349 about */
18350 asm volatile(
18351 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18352
18353 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18354 {
18355 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18356 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18357 +
18358 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18359 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18360 + to += PAX_USER_SHADOW_BASE;
18361 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18362 + from += PAX_USER_SHADOW_BASE;
18363 +#endif
18364 +
18365 return copy_user_generic((__force void *)to, (__force void *)from, len);
18366 - }
18367 - return len;
18368 + }
18369 + return len;
18370 }
18371 EXPORT_SYMBOL(copy_in_user);
18372
18373 diff -urNp linux-3.0.4/arch/x86/Makefile linux-3.0.4/arch/x86/Makefile
18374 --- linux-3.0.4/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18375 +++ linux-3.0.4/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18376 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18377 else
18378 BITS := 64
18379 UTS_MACHINE := x86_64
18380 + biarch := $(call cc-option,-m64)
18381 CHECKFLAGS += -D__x86_64__ -m64
18382
18383 KBUILD_AFLAGS += -m64
18384 @@ -195,3 +196,12 @@ define archhelp
18385 echo ' FDARGS="..." arguments for the booted kernel'
18386 echo ' FDINITRD=file initrd for the booted kernel'
18387 endef
18388 +
18389 +define OLD_LD
18390 +
18391 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18392 +*** Please upgrade your binutils to 2.18 or newer
18393 +endef
18394 +
18395 +archprepare:
18396 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18397 diff -urNp linux-3.0.4/arch/x86/mm/extable.c linux-3.0.4/arch/x86/mm/extable.c
18398 --- linux-3.0.4/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18399 +++ linux-3.0.4/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18400 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18401 const struct exception_table_entry *fixup;
18402
18403 #ifdef CONFIG_PNPBIOS
18404 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18405 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18406 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18407 extern u32 pnp_bios_is_utter_crap;
18408 pnp_bios_is_utter_crap = 1;
18409 diff -urNp linux-3.0.4/arch/x86/mm/fault.c linux-3.0.4/arch/x86/mm/fault.c
18410 --- linux-3.0.4/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18411 +++ linux-3.0.4/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18412 @@ -13,10 +13,18 @@
18413 #include <linux/perf_event.h> /* perf_sw_event */
18414 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18415 #include <linux/prefetch.h> /* prefetchw */
18416 +#include <linux/unistd.h>
18417 +#include <linux/compiler.h>
18418
18419 #include <asm/traps.h> /* dotraplinkage, ... */
18420 #include <asm/pgalloc.h> /* pgd_*(), ... */
18421 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18422 +#include <asm/vsyscall.h>
18423 +#include <asm/tlbflush.h>
18424 +
18425 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18426 +#include <asm/stacktrace.h>
18427 +#endif
18428
18429 /*
18430 * Page fault error code bits:
18431 @@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18432 int ret = 0;
18433
18434 /* kprobe_running() needs smp_processor_id() */
18435 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18436 + if (kprobes_built_in() && !user_mode(regs)) {
18437 preempt_disable();
18438 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18439 ret = 1;
18440 @@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18441 return !instr_lo || (instr_lo>>1) == 1;
18442 case 0x00:
18443 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18444 - if (probe_kernel_address(instr, opcode))
18445 + if (user_mode(regs)) {
18446 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18447 + return 0;
18448 + } else if (probe_kernel_address(instr, opcode))
18449 return 0;
18450
18451 *prefetch = (instr_lo == 0xF) &&
18452 @@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18453 while (instr < max_instr) {
18454 unsigned char opcode;
18455
18456 - if (probe_kernel_address(instr, opcode))
18457 + if (user_mode(regs)) {
18458 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18459 + break;
18460 + } else if (probe_kernel_address(instr, opcode))
18461 break;
18462
18463 instr++;
18464 @@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18465 force_sig_info(si_signo, &info, tsk);
18466 }
18467
18468 +#ifdef CONFIG_PAX_EMUTRAMP
18469 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18470 +#endif
18471 +
18472 +#ifdef CONFIG_PAX_PAGEEXEC
18473 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18474 +{
18475 + pgd_t *pgd;
18476 + pud_t *pud;
18477 + pmd_t *pmd;
18478 +
18479 + pgd = pgd_offset(mm, address);
18480 + if (!pgd_present(*pgd))
18481 + return NULL;
18482 + pud = pud_offset(pgd, address);
18483 + if (!pud_present(*pud))
18484 + return NULL;
18485 + pmd = pmd_offset(pud, address);
18486 + if (!pmd_present(*pmd))
18487 + return NULL;
18488 + return pmd;
18489 +}
18490 +#endif
18491 +
18492 DEFINE_SPINLOCK(pgd_lock);
18493 LIST_HEAD(pgd_list);
18494
18495 @@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18496 for (address = VMALLOC_START & PMD_MASK;
18497 address >= TASK_SIZE && address < FIXADDR_TOP;
18498 address += PMD_SIZE) {
18499 +
18500 +#ifdef CONFIG_PAX_PER_CPU_PGD
18501 + unsigned long cpu;
18502 +#else
18503 struct page *page;
18504 +#endif
18505
18506 spin_lock(&pgd_lock);
18507 +
18508 +#ifdef CONFIG_PAX_PER_CPU_PGD
18509 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18510 + pgd_t *pgd = get_cpu_pgd(cpu);
18511 + pmd_t *ret;
18512 +#else
18513 list_for_each_entry(page, &pgd_list, lru) {
18514 + pgd_t *pgd = page_address(page);
18515 spinlock_t *pgt_lock;
18516 pmd_t *ret;
18517
18518 @@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18519 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18520
18521 spin_lock(pgt_lock);
18522 - ret = vmalloc_sync_one(page_address(page), address);
18523 +#endif
18524 +
18525 + ret = vmalloc_sync_one(pgd, address);
18526 +
18527 +#ifndef CONFIG_PAX_PER_CPU_PGD
18528 spin_unlock(pgt_lock);
18529 +#endif
18530
18531 if (!ret)
18532 break;
18533 @@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18534 * an interrupt in the middle of a task switch..
18535 */
18536 pgd_paddr = read_cr3();
18537 +
18538 +#ifdef CONFIG_PAX_PER_CPU_PGD
18539 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18540 +#endif
18541 +
18542 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18543 if (!pmd_k)
18544 return -1;
18545 @@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
18546 * happen within a race in page table update. In the later
18547 * case just flush:
18548 */
18549 +
18550 +#ifdef CONFIG_PAX_PER_CPU_PGD
18551 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18552 + pgd = pgd_offset_cpu(smp_processor_id(), address);
18553 +#else
18554 pgd = pgd_offset(current->active_mm, address);
18555 +#endif
18556 +
18557 pgd_ref = pgd_offset_k(address);
18558 if (pgd_none(*pgd_ref))
18559 return -1;
18560 @@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
18561 static int is_errata100(struct pt_regs *regs, unsigned long address)
18562 {
18563 #ifdef CONFIG_X86_64
18564 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18565 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18566 return 1;
18567 #endif
18568 return 0;
18569 @@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
18570 }
18571
18572 static const char nx_warning[] = KERN_CRIT
18573 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18574 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18575
18576 static void
18577 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18578 @@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
18579 if (!oops_may_print())
18580 return;
18581
18582 - if (error_code & PF_INSTR) {
18583 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18584 unsigned int level;
18585
18586 pte_t *pte = lookup_address(address, &level);
18587
18588 if (pte && pte_present(*pte) && !pte_exec(*pte))
18589 - printk(nx_warning, current_uid());
18590 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18591 + }
18592 +
18593 +#ifdef CONFIG_PAX_KERNEXEC
18594 + if (init_mm.start_code <= address && address < init_mm.end_code) {
18595 + if (current->signal->curr_ip)
18596 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18597 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18598 + else
18599 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18600 + current->comm, task_pid_nr(current), current_uid(), current_euid());
18601 }
18602 +#endif
18603
18604 printk(KERN_ALERT "BUG: unable to handle kernel ");
18605 if (address < PAGE_SIZE)
18606 @@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
18607 unsigned long address, int si_code)
18608 {
18609 struct task_struct *tsk = current;
18610 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18611 + struct mm_struct *mm = tsk->mm;
18612 +#endif
18613 +
18614 +#ifdef CONFIG_X86_64
18615 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18616 + if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
18617 + regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
18618 + regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
18619 + regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
18620 + return;
18621 + }
18622 + }
18623 +#endif
18624 +
18625 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18626 + if (mm && (error_code & PF_USER)) {
18627 + unsigned long ip = regs->ip;
18628 +
18629 + if (v8086_mode(regs))
18630 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18631 +
18632 + /*
18633 + * It's possible to have interrupts off here:
18634 + */
18635 + local_irq_enable();
18636 +
18637 +#ifdef CONFIG_PAX_PAGEEXEC
18638 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18639 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18640 +
18641 +#ifdef CONFIG_PAX_EMUTRAMP
18642 + switch (pax_handle_fetch_fault(regs)) {
18643 + case 2:
18644 + return;
18645 + }
18646 +#endif
18647 +
18648 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18649 + do_group_exit(SIGKILL);
18650 + }
18651 +#endif
18652 +
18653 +#ifdef CONFIG_PAX_SEGMEXEC
18654 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18655 +
18656 +#ifdef CONFIG_PAX_EMUTRAMP
18657 + switch (pax_handle_fetch_fault(regs)) {
18658 + case 2:
18659 + return;
18660 + }
18661 +#endif
18662 +
18663 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18664 + do_group_exit(SIGKILL);
18665 + }
18666 +#endif
18667 +
18668 + }
18669 +#endif
18670
18671 /* User mode accesses just cause a SIGSEGV */
18672 if (error_code & PF_USER) {
18673 @@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
18674 return 1;
18675 }
18676
18677 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18678 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18679 +{
18680 + pte_t *pte;
18681 + pmd_t *pmd;
18682 + spinlock_t *ptl;
18683 + unsigned char pte_mask;
18684 +
18685 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18686 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
18687 + return 0;
18688 +
18689 + /* PaX: it's our fault, let's handle it if we can */
18690 +
18691 + /* PaX: take a look at read faults before acquiring any locks */
18692 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18693 + /* instruction fetch attempt from a protected page in user mode */
18694 + up_read(&mm->mmap_sem);
18695 +
18696 +#ifdef CONFIG_PAX_EMUTRAMP
18697 + switch (pax_handle_fetch_fault(regs)) {
18698 + case 2:
18699 + return 1;
18700 + }
18701 +#endif
18702 +
18703 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18704 + do_group_exit(SIGKILL);
18705 + }
18706 +
18707 + pmd = pax_get_pmd(mm, address);
18708 + if (unlikely(!pmd))
18709 + return 0;
18710 +
18711 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18712 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18713 + pte_unmap_unlock(pte, ptl);
18714 + return 0;
18715 + }
18716 +
18717 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18718 + /* write attempt to a protected page in user mode */
18719 + pte_unmap_unlock(pte, ptl);
18720 + return 0;
18721 + }
18722 +
18723 +#ifdef CONFIG_SMP
18724 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18725 +#else
18726 + if (likely(address > get_limit(regs->cs)))
18727 +#endif
18728 + {
18729 + set_pte(pte, pte_mkread(*pte));
18730 + __flush_tlb_one(address);
18731 + pte_unmap_unlock(pte, ptl);
18732 + up_read(&mm->mmap_sem);
18733 + return 1;
18734 + }
18735 +
18736 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18737 +
18738 + /*
18739 + * PaX: fill DTLB with user rights and retry
18740 + */
18741 + __asm__ __volatile__ (
18742 + "orb %2,(%1)\n"
18743 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18744 +/*
18745 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18746 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18747 + * page fault when examined during a TLB load attempt. this is true not only
18748 + * for PTEs holding a non-present entry but also present entries that will
18749 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18750 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18751 + * for our target pages since their PTEs are simply not in the TLBs at all.
18752 +
18753 + * the best thing in omitting it is that we gain around 15-20% speed in the
18754 + * fast path of the page fault handler and can get rid of tracing since we
18755 + * can no longer flush unintended entries.
18756 + */
18757 + "invlpg (%0)\n"
18758 +#endif
18759 + __copyuser_seg"testb $0,(%0)\n"
18760 + "xorb %3,(%1)\n"
18761 + :
18762 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18763 + : "memory", "cc");
18764 + pte_unmap_unlock(pte, ptl);
18765 + up_read(&mm->mmap_sem);
18766 + return 1;
18767 +}
18768 +#endif
18769 +
18770 /*
18771 * Handle a spurious fault caused by a stale TLB entry.
18772 *
18773 @@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
18774 static inline int
18775 access_error(unsigned long error_code, struct vm_area_struct *vma)
18776 {
18777 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18778 + return 1;
18779 +
18780 if (error_code & PF_WRITE) {
18781 /* write, present and write, not present: */
18782 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18783 @@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
18784 {
18785 struct vm_area_struct *vma;
18786 struct task_struct *tsk;
18787 - unsigned long address;
18788 struct mm_struct *mm;
18789 int fault;
18790 int write = error_code & PF_WRITE;
18791 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
18792 (write ? FAULT_FLAG_WRITE : 0);
18793
18794 + /* Get the faulting address: */
18795 + unsigned long address = read_cr2();
18796 +
18797 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18798 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18799 + if (!search_exception_tables(regs->ip)) {
18800 + bad_area_nosemaphore(regs, error_code, address);
18801 + return;
18802 + }
18803 + if (address < PAX_USER_SHADOW_BASE) {
18804 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18805 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18806 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18807 + } else
18808 + address -= PAX_USER_SHADOW_BASE;
18809 + }
18810 +#endif
18811 +
18812 tsk = current;
18813 mm = tsk->mm;
18814
18815 - /* Get the faulting address: */
18816 - address = read_cr2();
18817 -
18818 /*
18819 * Detect and handle instructions that would cause a page fault for
18820 * both a tracked kernel page and a userspace page.
18821 @@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
18822 * User-mode registers count as a user access even for any
18823 * potential system fault or CPU buglet:
18824 */
18825 - if (user_mode_vm(regs)) {
18826 + if (user_mode(regs)) {
18827 local_irq_enable();
18828 error_code |= PF_USER;
18829 } else {
18830 @@ -1103,6 +1351,11 @@ retry:
18831 might_sleep();
18832 }
18833
18834 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18835 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18836 + return;
18837 +#endif
18838 +
18839 vma = find_vma(mm, address);
18840 if (unlikely(!vma)) {
18841 bad_area(regs, error_code, address);
18842 @@ -1114,18 +1367,24 @@ retry:
18843 bad_area(regs, error_code, address);
18844 return;
18845 }
18846 - if (error_code & PF_USER) {
18847 - /*
18848 - * Accessing the stack below %sp is always a bug.
18849 - * The large cushion allows instructions like enter
18850 - * and pusha to work. ("enter $65535, $31" pushes
18851 - * 32 pointers and then decrements %sp by 65535.)
18852 - */
18853 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18854 - bad_area(regs, error_code, address);
18855 - return;
18856 - }
18857 + /*
18858 + * Accessing the stack below %sp is always a bug.
18859 + * The large cushion allows instructions like enter
18860 + * and pusha to work. ("enter $65535, $31" pushes
18861 + * 32 pointers and then decrements %sp by 65535.)
18862 + */
18863 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18864 + bad_area(regs, error_code, address);
18865 + return;
18866 }
18867 +
18868 +#ifdef CONFIG_PAX_SEGMEXEC
18869 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18870 + bad_area(regs, error_code, address);
18871 + return;
18872 + }
18873 +#endif
18874 +
18875 if (unlikely(expand_stack(vma, address))) {
18876 bad_area(regs, error_code, address);
18877 return;
18878 @@ -1180,3 +1439,199 @@ good_area:
18879
18880 up_read(&mm->mmap_sem);
18881 }
18882 +
18883 +#ifdef CONFIG_PAX_EMUTRAMP
18884 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
18885 +{
18886 + int err;
18887 +
18888 + do { /* PaX: gcc trampoline emulation #1 */
18889 + unsigned char mov1, mov2;
18890 + unsigned short jmp;
18891 + unsigned int addr1, addr2;
18892 +
18893 +#ifdef CONFIG_X86_64
18894 + if ((regs->ip + 11) >> 32)
18895 + break;
18896 +#endif
18897 +
18898 + err = get_user(mov1, (unsigned char __user *)regs->ip);
18899 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18900 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
18901 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18902 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
18903 +
18904 + if (err)
18905 + break;
18906 +
18907 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
18908 + regs->cx = addr1;
18909 + regs->ax = addr2;
18910 + regs->ip = addr2;
18911 + return 2;
18912 + }
18913 + } while (0);
18914 +
18915 + do { /* PaX: gcc trampoline emulation #2 */
18916 + unsigned char mov, jmp;
18917 + unsigned int addr1, addr2;
18918 +
18919 +#ifdef CONFIG_X86_64
18920 + if ((regs->ip + 9) >> 32)
18921 + break;
18922 +#endif
18923 +
18924 + err = get_user(mov, (unsigned char __user *)regs->ip);
18925 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18926 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
18927 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18928 +
18929 + if (err)
18930 + break;
18931 +
18932 + if (mov == 0xB9 && jmp == 0xE9) {
18933 + regs->cx = addr1;
18934 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
18935 + return 2;
18936 + }
18937 + } while (0);
18938 +
18939 + return 1; /* PaX in action */
18940 +}
18941 +
18942 +#ifdef CONFIG_X86_64
18943 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
18944 +{
18945 + int err;
18946 +
18947 + do { /* PaX: gcc trampoline emulation #1 */
18948 + unsigned short mov1, mov2, jmp1;
18949 + unsigned char jmp2;
18950 + unsigned int addr1;
18951 + unsigned long addr2;
18952 +
18953 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18954 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
18955 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18956 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18957 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18958 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18959 +
18960 + if (err)
18961 + break;
18962 +
18963 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18964 + regs->r11 = addr1;
18965 + regs->r10 = addr2;
18966 + regs->ip = addr1;
18967 + return 2;
18968 + }
18969 + } while (0);
18970 +
18971 + do { /* PaX: gcc trampoline emulation #2 */
18972 + unsigned short mov1, mov2, jmp1;
18973 + unsigned char jmp2;
18974 + unsigned long addr1, addr2;
18975 +
18976 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18977 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18978 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18979 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18980 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18981 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18982 +
18983 + if (err)
18984 + break;
18985 +
18986 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18987 + regs->r11 = addr1;
18988 + regs->r10 = addr2;
18989 + regs->ip = addr1;
18990 + return 2;
18991 + }
18992 + } while (0);
18993 +
18994 + return 1; /* PaX in action */
18995 +}
18996 +#endif
18997 +
18998 +/*
18999 + * PaX: decide what to do with offenders (regs->ip = fault address)
19000 + *
19001 + * returns 1 when task should be killed
19002 + * 2 when gcc trampoline was detected
19003 + */
19004 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19005 +{
19006 + if (v8086_mode(regs))
19007 + return 1;
19008 +
19009 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19010 + return 1;
19011 +
19012 +#ifdef CONFIG_X86_32
19013 + return pax_handle_fetch_fault_32(regs);
19014 +#else
19015 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19016 + return pax_handle_fetch_fault_32(regs);
19017 + else
19018 + return pax_handle_fetch_fault_64(regs);
19019 +#endif
19020 +}
19021 +#endif
19022 +
19023 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19024 +void pax_report_insns(void *pc, void *sp)
19025 +{
19026 + long i;
19027 +
19028 + printk(KERN_ERR "PAX: bytes at PC: ");
19029 + for (i = 0; i < 20; i++) {
19030 + unsigned char c;
19031 + if (get_user(c, (__force unsigned char __user *)pc+i))
19032 + printk(KERN_CONT "?? ");
19033 + else
19034 + printk(KERN_CONT "%02x ", c);
19035 + }
19036 + printk("\n");
19037 +
19038 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19039 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19040 + unsigned long c;
19041 + if (get_user(c, (__force unsigned long __user *)sp+i))
19042 +#ifdef CONFIG_X86_32
19043 + printk(KERN_CONT "???????? ");
19044 +#else
19045 + printk(KERN_CONT "???????????????? ");
19046 +#endif
19047 + else
19048 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19049 + }
19050 + printk("\n");
19051 +}
19052 +#endif
19053 +
19054 +/**
19055 + * probe_kernel_write(): safely attempt to write to a location
19056 + * @dst: address to write to
19057 + * @src: pointer to the data that shall be written
19058 + * @size: size of the data chunk
19059 + *
19060 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19061 + * happens, handle that and return -EFAULT.
19062 + */
19063 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19064 +{
19065 + long ret;
19066 + mm_segment_t old_fs = get_fs();
19067 +
19068 + set_fs(KERNEL_DS);
19069 + pagefault_disable();
19070 + pax_open_kernel();
19071 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19072 + pax_close_kernel();
19073 + pagefault_enable();
19074 + set_fs(old_fs);
19075 +
19076 + return ret ? -EFAULT : 0;
19077 +}
19078 diff -urNp linux-3.0.4/arch/x86/mm/gup.c linux-3.0.4/arch/x86/mm/gup.c
19079 --- linux-3.0.4/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19080 +++ linux-3.0.4/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19081 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19082 addr = start;
19083 len = (unsigned long) nr_pages << PAGE_SHIFT;
19084 end = start + len;
19085 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19086 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19087 (void __user *)start, len)))
19088 return 0;
19089
19090 diff -urNp linux-3.0.4/arch/x86/mm/highmem_32.c linux-3.0.4/arch/x86/mm/highmem_32.c
19091 --- linux-3.0.4/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19092 +++ linux-3.0.4/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19093 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19094 idx = type + KM_TYPE_NR*smp_processor_id();
19095 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19096 BUG_ON(!pte_none(*(kmap_pte-idx)));
19097 +
19098 + pax_open_kernel();
19099 set_pte(kmap_pte-idx, mk_pte(page, prot));
19100 + pax_close_kernel();
19101
19102 return (void *)vaddr;
19103 }
19104 diff -urNp linux-3.0.4/arch/x86/mm/hugetlbpage.c linux-3.0.4/arch/x86/mm/hugetlbpage.c
19105 --- linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19106 +++ linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19107 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19108 struct hstate *h = hstate_file(file);
19109 struct mm_struct *mm = current->mm;
19110 struct vm_area_struct *vma;
19111 - unsigned long start_addr;
19112 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19113 +
19114 +#ifdef CONFIG_PAX_SEGMEXEC
19115 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19116 + pax_task_size = SEGMEXEC_TASK_SIZE;
19117 +#endif
19118 +
19119 + pax_task_size -= PAGE_SIZE;
19120
19121 if (len > mm->cached_hole_size) {
19122 - start_addr = mm->free_area_cache;
19123 + start_addr = mm->free_area_cache;
19124 } else {
19125 - start_addr = TASK_UNMAPPED_BASE;
19126 - mm->cached_hole_size = 0;
19127 + start_addr = mm->mmap_base;
19128 + mm->cached_hole_size = 0;
19129 }
19130
19131 full_search:
19132 @@ -280,26 +287,27 @@ full_search:
19133
19134 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19135 /* At this point: (!vma || addr < vma->vm_end). */
19136 - if (TASK_SIZE - len < addr) {
19137 + if (pax_task_size - len < addr) {
19138 /*
19139 * Start a new search - just in case we missed
19140 * some holes.
19141 */
19142 - if (start_addr != TASK_UNMAPPED_BASE) {
19143 - start_addr = TASK_UNMAPPED_BASE;
19144 + if (start_addr != mm->mmap_base) {
19145 + start_addr = mm->mmap_base;
19146 mm->cached_hole_size = 0;
19147 goto full_search;
19148 }
19149 return -ENOMEM;
19150 }
19151 - if (!vma || addr + len <= vma->vm_start) {
19152 - mm->free_area_cache = addr + len;
19153 - return addr;
19154 - }
19155 + if (check_heap_stack_gap(vma, addr, len))
19156 + break;
19157 if (addr + mm->cached_hole_size < vma->vm_start)
19158 mm->cached_hole_size = vma->vm_start - addr;
19159 addr = ALIGN(vma->vm_end, huge_page_size(h));
19160 }
19161 +
19162 + mm->free_area_cache = addr + len;
19163 + return addr;
19164 }
19165
19166 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19167 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19168 {
19169 struct hstate *h = hstate_file(file);
19170 struct mm_struct *mm = current->mm;
19171 - struct vm_area_struct *vma, *prev_vma;
19172 - unsigned long base = mm->mmap_base, addr = addr0;
19173 + struct vm_area_struct *vma;
19174 + unsigned long base = mm->mmap_base, addr;
19175 unsigned long largest_hole = mm->cached_hole_size;
19176 - int first_time = 1;
19177
19178 /* don't allow allocations above current base */
19179 if (mm->free_area_cache > base)
19180 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19181 largest_hole = 0;
19182 mm->free_area_cache = base;
19183 }
19184 -try_again:
19185 +
19186 /* make sure it can fit in the remaining address space */
19187 if (mm->free_area_cache < len)
19188 goto fail;
19189
19190 /* either no address requested or can't fit in requested address hole */
19191 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19192 + addr = (mm->free_area_cache - len);
19193 do {
19194 + addr &= huge_page_mask(h);
19195 + vma = find_vma(mm, addr);
19196 /*
19197 * Lookup failure means no vma is above this address,
19198 * i.e. return with success:
19199 - */
19200 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19201 - return addr;
19202 -
19203 - /*
19204 * new region fits between prev_vma->vm_end and
19205 * vma->vm_start, use it:
19206 */
19207 - if (addr + len <= vma->vm_start &&
19208 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19209 + if (check_heap_stack_gap(vma, addr, len)) {
19210 /* remember the address as a hint for next time */
19211 - mm->cached_hole_size = largest_hole;
19212 - return (mm->free_area_cache = addr);
19213 - } else {
19214 - /* pull free_area_cache down to the first hole */
19215 - if (mm->free_area_cache == vma->vm_end) {
19216 - mm->free_area_cache = vma->vm_start;
19217 - mm->cached_hole_size = largest_hole;
19218 - }
19219 + mm->cached_hole_size = largest_hole;
19220 + return (mm->free_area_cache = addr);
19221 + }
19222 + /* pull free_area_cache down to the first hole */
19223 + if (mm->free_area_cache == vma->vm_end) {
19224 + mm->free_area_cache = vma->vm_start;
19225 + mm->cached_hole_size = largest_hole;
19226 }
19227
19228 /* remember the largest hole we saw so far */
19229 if (addr + largest_hole < vma->vm_start)
19230 - largest_hole = vma->vm_start - addr;
19231 + largest_hole = vma->vm_start - addr;
19232
19233 /* try just below the current vma->vm_start */
19234 - addr = (vma->vm_start - len) & huge_page_mask(h);
19235 - } while (len <= vma->vm_start);
19236 + addr = skip_heap_stack_gap(vma, len);
19237 + } while (!IS_ERR_VALUE(addr));
19238
19239 fail:
19240 /*
19241 - * if hint left us with no space for the requested
19242 - * mapping then try again:
19243 - */
19244 - if (first_time) {
19245 - mm->free_area_cache = base;
19246 - largest_hole = 0;
19247 - first_time = 0;
19248 - goto try_again;
19249 - }
19250 - /*
19251 * A failed mmap() very likely causes application failure,
19252 * so fall back to the bottom-up function here. This scenario
19253 * can happen with large stack limits and large mmap()
19254 * allocations.
19255 */
19256 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19257 +
19258 +#ifdef CONFIG_PAX_SEGMEXEC
19259 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19260 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19261 + else
19262 +#endif
19263 +
19264 + mm->mmap_base = TASK_UNMAPPED_BASE;
19265 +
19266 +#ifdef CONFIG_PAX_RANDMMAP
19267 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19268 + mm->mmap_base += mm->delta_mmap;
19269 +#endif
19270 +
19271 + mm->free_area_cache = mm->mmap_base;
19272 mm->cached_hole_size = ~0UL;
19273 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19274 len, pgoff, flags);
19275 @@ -386,6 +392,7 @@ fail:
19276 /*
19277 * Restore the topdown base:
19278 */
19279 + mm->mmap_base = base;
19280 mm->free_area_cache = base;
19281 mm->cached_hole_size = ~0UL;
19282
19283 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19284 struct hstate *h = hstate_file(file);
19285 struct mm_struct *mm = current->mm;
19286 struct vm_area_struct *vma;
19287 + unsigned long pax_task_size = TASK_SIZE;
19288
19289 if (len & ~huge_page_mask(h))
19290 return -EINVAL;
19291 - if (len > TASK_SIZE)
19292 +
19293 +#ifdef CONFIG_PAX_SEGMEXEC
19294 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19295 + pax_task_size = SEGMEXEC_TASK_SIZE;
19296 +#endif
19297 +
19298 + pax_task_size -= PAGE_SIZE;
19299 +
19300 + if (len > pax_task_size)
19301 return -ENOMEM;
19302
19303 if (flags & MAP_FIXED) {
19304 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19305 if (addr) {
19306 addr = ALIGN(addr, huge_page_size(h));
19307 vma = find_vma(mm, addr);
19308 - if (TASK_SIZE - len >= addr &&
19309 - (!vma || addr + len <= vma->vm_start))
19310 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19311 return addr;
19312 }
19313 if (mm->get_unmapped_area == arch_get_unmapped_area)
19314 diff -urNp linux-3.0.4/arch/x86/mm/init_32.c linux-3.0.4/arch/x86/mm/init_32.c
19315 --- linux-3.0.4/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19316 +++ linux-3.0.4/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19317 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19318 }
19319
19320 /*
19321 - * Creates a middle page table and puts a pointer to it in the
19322 - * given global directory entry. This only returns the gd entry
19323 - * in non-PAE compilation mode, since the middle layer is folded.
19324 - */
19325 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19326 -{
19327 - pud_t *pud;
19328 - pmd_t *pmd_table;
19329 -
19330 -#ifdef CONFIG_X86_PAE
19331 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19332 - if (after_bootmem)
19333 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19334 - else
19335 - pmd_table = (pmd_t *)alloc_low_page();
19336 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19337 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19338 - pud = pud_offset(pgd, 0);
19339 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19340 -
19341 - return pmd_table;
19342 - }
19343 -#endif
19344 - pud = pud_offset(pgd, 0);
19345 - pmd_table = pmd_offset(pud, 0);
19346 -
19347 - return pmd_table;
19348 -}
19349 -
19350 -/*
19351 * Create a page table and place a pointer to it in a middle page
19352 * directory entry:
19353 */
19354 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19355 page_table = (pte_t *)alloc_low_page();
19356
19357 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19358 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19359 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19360 +#else
19361 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19362 +#endif
19363 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19364 }
19365
19366 return pte_offset_kernel(pmd, 0);
19367 }
19368
19369 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19370 +{
19371 + pud_t *pud;
19372 + pmd_t *pmd_table;
19373 +
19374 + pud = pud_offset(pgd, 0);
19375 + pmd_table = pmd_offset(pud, 0);
19376 +
19377 + return pmd_table;
19378 +}
19379 +
19380 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19381 {
19382 int pgd_idx = pgd_index(vaddr);
19383 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19384 int pgd_idx, pmd_idx;
19385 unsigned long vaddr;
19386 pgd_t *pgd;
19387 + pud_t *pud;
19388 pmd_t *pmd;
19389 pte_t *pte = NULL;
19390
19391 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19392 pgd = pgd_base + pgd_idx;
19393
19394 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19395 - pmd = one_md_table_init(pgd);
19396 - pmd = pmd + pmd_index(vaddr);
19397 + pud = pud_offset(pgd, vaddr);
19398 + pmd = pmd_offset(pud, vaddr);
19399 +
19400 +#ifdef CONFIG_X86_PAE
19401 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19402 +#endif
19403 +
19404 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19405 pmd++, pmd_idx++) {
19406 pte = page_table_kmap_check(one_page_table_init(pmd),
19407 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19408 }
19409 }
19410
19411 -static inline int is_kernel_text(unsigned long addr)
19412 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19413 {
19414 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19415 - return 1;
19416 - return 0;
19417 + if ((start > ktla_ktva((unsigned long)_etext) ||
19418 + end <= ktla_ktva((unsigned long)_stext)) &&
19419 + (start > ktla_ktva((unsigned long)_einittext) ||
19420 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19421 +
19422 +#ifdef CONFIG_ACPI_SLEEP
19423 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19424 +#endif
19425 +
19426 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19427 + return 0;
19428 + return 1;
19429 }
19430
19431 /*
19432 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19433 unsigned long last_map_addr = end;
19434 unsigned long start_pfn, end_pfn;
19435 pgd_t *pgd_base = swapper_pg_dir;
19436 - int pgd_idx, pmd_idx, pte_ofs;
19437 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19438 unsigned long pfn;
19439 pgd_t *pgd;
19440 + pud_t *pud;
19441 pmd_t *pmd;
19442 pte_t *pte;
19443 unsigned pages_2m, pages_4k;
19444 @@ -281,8 +282,13 @@ repeat:
19445 pfn = start_pfn;
19446 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19447 pgd = pgd_base + pgd_idx;
19448 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19449 - pmd = one_md_table_init(pgd);
19450 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19451 + pud = pud_offset(pgd, 0);
19452 + pmd = pmd_offset(pud, 0);
19453 +
19454 +#ifdef CONFIG_X86_PAE
19455 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19456 +#endif
19457
19458 if (pfn >= end_pfn)
19459 continue;
19460 @@ -294,14 +300,13 @@ repeat:
19461 #endif
19462 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19463 pmd++, pmd_idx++) {
19464 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19465 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19466
19467 /*
19468 * Map with big pages if possible, otherwise
19469 * create normal page tables:
19470 */
19471 if (use_pse) {
19472 - unsigned int addr2;
19473 pgprot_t prot = PAGE_KERNEL_LARGE;
19474 /*
19475 * first pass will use the same initial
19476 @@ -311,11 +316,7 @@ repeat:
19477 __pgprot(PTE_IDENT_ATTR |
19478 _PAGE_PSE);
19479
19480 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19481 - PAGE_OFFSET + PAGE_SIZE-1;
19482 -
19483 - if (is_kernel_text(addr) ||
19484 - is_kernel_text(addr2))
19485 + if (is_kernel_text(address, address + PMD_SIZE))
19486 prot = PAGE_KERNEL_LARGE_EXEC;
19487
19488 pages_2m++;
19489 @@ -332,7 +333,7 @@ repeat:
19490 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19491 pte += pte_ofs;
19492 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19493 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19494 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19495 pgprot_t prot = PAGE_KERNEL;
19496 /*
19497 * first pass will use the same initial
19498 @@ -340,7 +341,7 @@ repeat:
19499 */
19500 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19501
19502 - if (is_kernel_text(addr))
19503 + if (is_kernel_text(address, address + PAGE_SIZE))
19504 prot = PAGE_KERNEL_EXEC;
19505
19506 pages_4k++;
19507 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19508
19509 pud = pud_offset(pgd, va);
19510 pmd = pmd_offset(pud, va);
19511 - if (!pmd_present(*pmd))
19512 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19513 break;
19514
19515 pte = pte_offset_kernel(pmd, va);
19516 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19517
19518 static void __init pagetable_init(void)
19519 {
19520 - pgd_t *pgd_base = swapper_pg_dir;
19521 -
19522 - permanent_kmaps_init(pgd_base);
19523 + permanent_kmaps_init(swapper_pg_dir);
19524 }
19525
19526 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19527 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19528 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19529
19530 /* user-defined highmem size */
19531 @@ -757,6 +756,12 @@ void __init mem_init(void)
19532
19533 pci_iommu_alloc();
19534
19535 +#ifdef CONFIG_PAX_PER_CPU_PGD
19536 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19537 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19538 + KERNEL_PGD_PTRS);
19539 +#endif
19540 +
19541 #ifdef CONFIG_FLATMEM
19542 BUG_ON(!mem_map);
19543 #endif
19544 @@ -774,7 +779,7 @@ void __init mem_init(void)
19545 set_highmem_pages_init();
19546
19547 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19548 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19549 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19550 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19551
19552 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19553 @@ -815,10 +820,10 @@ void __init mem_init(void)
19554 ((unsigned long)&__init_end -
19555 (unsigned long)&__init_begin) >> 10,
19556
19557 - (unsigned long)&_etext, (unsigned long)&_edata,
19558 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19559 + (unsigned long)&_sdata, (unsigned long)&_edata,
19560 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19561
19562 - (unsigned long)&_text, (unsigned long)&_etext,
19563 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19564 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19565
19566 /*
19567 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
19568 if (!kernel_set_to_readonly)
19569 return;
19570
19571 + start = ktla_ktva(start);
19572 pr_debug("Set kernel text: %lx - %lx for read write\n",
19573 start, start+size);
19574
19575 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
19576 if (!kernel_set_to_readonly)
19577 return;
19578
19579 + start = ktla_ktva(start);
19580 pr_debug("Set kernel text: %lx - %lx for read only\n",
19581 start, start+size);
19582
19583 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
19584 unsigned long start = PFN_ALIGN(_text);
19585 unsigned long size = PFN_ALIGN(_etext) - start;
19586
19587 + start = ktla_ktva(start);
19588 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19589 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19590 size >> 10);
19591 diff -urNp linux-3.0.4/arch/x86/mm/init_64.c linux-3.0.4/arch/x86/mm/init_64.c
19592 --- linux-3.0.4/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
19593 +++ linux-3.0.4/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
19594 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
19595 * around without checking the pgd every time.
19596 */
19597
19598 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19599 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19600 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19601
19602 int force_personality32;
19603 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
19604
19605 for (address = start; address <= end; address += PGDIR_SIZE) {
19606 const pgd_t *pgd_ref = pgd_offset_k(address);
19607 +
19608 +#ifdef CONFIG_PAX_PER_CPU_PGD
19609 + unsigned long cpu;
19610 +#else
19611 struct page *page;
19612 +#endif
19613
19614 if (pgd_none(*pgd_ref))
19615 continue;
19616
19617 spin_lock(&pgd_lock);
19618 +
19619 +#ifdef CONFIG_PAX_PER_CPU_PGD
19620 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19621 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19622 +#else
19623 list_for_each_entry(page, &pgd_list, lru) {
19624 pgd_t *pgd;
19625 spinlock_t *pgt_lock;
19626 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
19627 /* the pgt_lock only for Xen */
19628 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19629 spin_lock(pgt_lock);
19630 +#endif
19631
19632 if (pgd_none(*pgd))
19633 set_pgd(pgd, *pgd_ref);
19634 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
19635 BUG_ON(pgd_page_vaddr(*pgd)
19636 != pgd_page_vaddr(*pgd_ref));
19637
19638 +#ifndef CONFIG_PAX_PER_CPU_PGD
19639 spin_unlock(pgt_lock);
19640 +#endif
19641 +
19642 }
19643 spin_unlock(&pgd_lock);
19644 }
19645 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19646 pmd = fill_pmd(pud, vaddr);
19647 pte = fill_pte(pmd, vaddr);
19648
19649 + pax_open_kernel();
19650 set_pte(pte, new_pte);
19651 + pax_close_kernel();
19652
19653 /*
19654 * It's enough to flush this one mapping.
19655 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
19656 pgd = pgd_offset_k((unsigned long)__va(phys));
19657 if (pgd_none(*pgd)) {
19658 pud = (pud_t *) spp_getpage();
19659 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19660 - _PAGE_USER));
19661 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19662 }
19663 pud = pud_offset(pgd, (unsigned long)__va(phys));
19664 if (pud_none(*pud)) {
19665 pmd = (pmd_t *) spp_getpage();
19666 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19667 - _PAGE_USER));
19668 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19669 }
19670 pmd = pmd_offset(pud, phys);
19671 BUG_ON(!pmd_none(*pmd));
19672 @@ -693,6 +707,12 @@ void __init mem_init(void)
19673
19674 pci_iommu_alloc();
19675
19676 +#ifdef CONFIG_PAX_PER_CPU_PGD
19677 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19678 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19679 + KERNEL_PGD_PTRS);
19680 +#endif
19681 +
19682 /* clear_bss() already clear the empty_zero_page */
19683
19684 reservedpages = 0;
19685 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
19686 static struct vm_area_struct gate_vma = {
19687 .vm_start = VSYSCALL_START,
19688 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19689 - .vm_page_prot = PAGE_READONLY_EXEC,
19690 - .vm_flags = VM_READ | VM_EXEC
19691 + .vm_page_prot = PAGE_READONLY,
19692 + .vm_flags = VM_READ
19693 };
19694
19695 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19696 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
19697
19698 const char *arch_vma_name(struct vm_area_struct *vma)
19699 {
19700 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19701 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19702 return "[vdso]";
19703 if (vma == &gate_vma)
19704 return "[vsyscall]";
19705 diff -urNp linux-3.0.4/arch/x86/mm/init.c linux-3.0.4/arch/x86/mm/init.c
19706 --- linux-3.0.4/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
19707 +++ linux-3.0.4/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
19708 @@ -31,7 +31,7 @@ int direct_gbpages
19709 static void __init find_early_table_space(unsigned long end, int use_pse,
19710 int use_gbpages)
19711 {
19712 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19713 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19714 phys_addr_t base;
19715
19716 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19717 @@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
19718 */
19719 int devmem_is_allowed(unsigned long pagenr)
19720 {
19721 - if (pagenr <= 256)
19722 +#ifdef CONFIG_GRKERNSEC_KMEM
19723 + /* allow BDA */
19724 + if (!pagenr)
19725 + return 1;
19726 + /* allow EBDA */
19727 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19728 + return 1;
19729 +#else
19730 + if (!pagenr)
19731 + return 1;
19732 +#ifdef CONFIG_VM86
19733 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19734 + return 1;
19735 +#endif
19736 +#endif
19737 +
19738 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19739 return 1;
19740 +#ifdef CONFIG_GRKERNSEC_KMEM
19741 + /* throw out everything else below 1MB */
19742 + if (pagenr <= 256)
19743 + return 0;
19744 +#endif
19745 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19746 return 0;
19747 if (!page_is_ram(pagenr))
19748 return 1;
19749 +
19750 return 0;
19751 }
19752
19753 @@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
19754
19755 void free_initmem(void)
19756 {
19757 +
19758 +#ifdef CONFIG_PAX_KERNEXEC
19759 +#ifdef CONFIG_X86_32
19760 + /* PaX: limit KERNEL_CS to actual size */
19761 + unsigned long addr, limit;
19762 + struct desc_struct d;
19763 + int cpu;
19764 +
19765 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19766 + limit = (limit - 1UL) >> PAGE_SHIFT;
19767 +
19768 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19769 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19770 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19771 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19772 + }
19773 +
19774 + /* PaX: make KERNEL_CS read-only */
19775 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19776 + if (!paravirt_enabled())
19777 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19778 +/*
19779 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19780 + pgd = pgd_offset_k(addr);
19781 + pud = pud_offset(pgd, addr);
19782 + pmd = pmd_offset(pud, addr);
19783 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19784 + }
19785 +*/
19786 +#ifdef CONFIG_X86_PAE
19787 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19788 +/*
19789 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19790 + pgd = pgd_offset_k(addr);
19791 + pud = pud_offset(pgd, addr);
19792 + pmd = pmd_offset(pud, addr);
19793 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19794 + }
19795 +*/
19796 +#endif
19797 +
19798 +#ifdef CONFIG_MODULES
19799 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19800 +#endif
19801 +
19802 +#else
19803 + pgd_t *pgd;
19804 + pud_t *pud;
19805 + pmd_t *pmd;
19806 + unsigned long addr, end;
19807 +
19808 + /* PaX: make kernel code/rodata read-only, rest non-executable */
19809 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19810 + pgd = pgd_offset_k(addr);
19811 + pud = pud_offset(pgd, addr);
19812 + pmd = pmd_offset(pud, addr);
19813 + if (!pmd_present(*pmd))
19814 + continue;
19815 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19816 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19817 + else
19818 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19819 + }
19820 +
19821 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19822 + end = addr + KERNEL_IMAGE_SIZE;
19823 + for (; addr < end; addr += PMD_SIZE) {
19824 + pgd = pgd_offset_k(addr);
19825 + pud = pud_offset(pgd, addr);
19826 + pmd = pmd_offset(pud, addr);
19827 + if (!pmd_present(*pmd))
19828 + continue;
19829 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19830 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19831 + }
19832 +#endif
19833 +
19834 + flush_tlb_all();
19835 +#endif
19836 +
19837 free_init_pages("unused kernel memory",
19838 (unsigned long)(&__init_begin),
19839 (unsigned long)(&__init_end));
19840 diff -urNp linux-3.0.4/arch/x86/mm/iomap_32.c linux-3.0.4/arch/x86/mm/iomap_32.c
19841 --- linux-3.0.4/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
19842 +++ linux-3.0.4/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
19843 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19844 type = kmap_atomic_idx_push();
19845 idx = type + KM_TYPE_NR * smp_processor_id();
19846 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19847 +
19848 + pax_open_kernel();
19849 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19850 + pax_close_kernel();
19851 +
19852 arch_flush_lazy_mmu_mode();
19853
19854 return (void *)vaddr;
19855 diff -urNp linux-3.0.4/arch/x86/mm/ioremap.c linux-3.0.4/arch/x86/mm/ioremap.c
19856 --- linux-3.0.4/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
19857 +++ linux-3.0.4/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
19858 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
19859 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19860 int is_ram = page_is_ram(pfn);
19861
19862 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19863 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19864 return NULL;
19865 WARN_ON_ONCE(is_ram);
19866 }
19867 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19868 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19869
19870 static __initdata int after_paging_init;
19871 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19872 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19873
19874 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
19875 {
19876 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
19877 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
19878
19879 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
19880 - memset(bm_pte, 0, sizeof(bm_pte));
19881 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
19882 + pmd_populate_user(&init_mm, pmd, bm_pte);
19883
19884 /*
19885 * The boot-ioremap range spans multiple pmds, for which
19886 diff -urNp linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c
19887 --- linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
19888 +++ linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
19889 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
19890 * memory (e.g. tracked pages)? For now, we need this to avoid
19891 * invoking kmemcheck for PnP BIOS calls.
19892 */
19893 - if (regs->flags & X86_VM_MASK)
19894 + if (v8086_mode(regs))
19895 return false;
19896 - if (regs->cs != __KERNEL_CS)
19897 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
19898 return false;
19899
19900 pte = kmemcheck_pte_lookup(address);
19901 diff -urNp linux-3.0.4/arch/x86/mm/mmap.c linux-3.0.4/arch/x86/mm/mmap.c
19902 --- linux-3.0.4/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
19903 +++ linux-3.0.4/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
19904 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
19905 * Leave an at least ~128 MB hole with possible stack randomization.
19906 */
19907 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
19908 -#define MAX_GAP (TASK_SIZE/6*5)
19909 +#define MAX_GAP (pax_task_size/6*5)
19910
19911 /*
19912 * True on X86_32 or when emulating IA32 on X86_64
19913 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
19914 return rnd << PAGE_SHIFT;
19915 }
19916
19917 -static unsigned long mmap_base(void)
19918 +static unsigned long mmap_base(struct mm_struct *mm)
19919 {
19920 unsigned long gap = rlimit(RLIMIT_STACK);
19921 + unsigned long pax_task_size = TASK_SIZE;
19922 +
19923 +#ifdef CONFIG_PAX_SEGMEXEC
19924 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19925 + pax_task_size = SEGMEXEC_TASK_SIZE;
19926 +#endif
19927
19928 if (gap < MIN_GAP)
19929 gap = MIN_GAP;
19930 else if (gap > MAX_GAP)
19931 gap = MAX_GAP;
19932
19933 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
19934 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
19935 }
19936
19937 /*
19938 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
19939 * does, but not when emulating X86_32
19940 */
19941 -static unsigned long mmap_legacy_base(void)
19942 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
19943 {
19944 - if (mmap_is_ia32())
19945 + if (mmap_is_ia32()) {
19946 +
19947 +#ifdef CONFIG_PAX_SEGMEXEC
19948 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19949 + return SEGMEXEC_TASK_UNMAPPED_BASE;
19950 + else
19951 +#endif
19952 +
19953 return TASK_UNMAPPED_BASE;
19954 - else
19955 + } else
19956 return TASK_UNMAPPED_BASE + mmap_rnd();
19957 }
19958
19959 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
19960 void arch_pick_mmap_layout(struct mm_struct *mm)
19961 {
19962 if (mmap_is_legacy()) {
19963 - mm->mmap_base = mmap_legacy_base();
19964 + mm->mmap_base = mmap_legacy_base(mm);
19965 +
19966 +#ifdef CONFIG_PAX_RANDMMAP
19967 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19968 + mm->mmap_base += mm->delta_mmap;
19969 +#endif
19970 +
19971 mm->get_unmapped_area = arch_get_unmapped_area;
19972 mm->unmap_area = arch_unmap_area;
19973 } else {
19974 - mm->mmap_base = mmap_base();
19975 + mm->mmap_base = mmap_base(mm);
19976 +
19977 +#ifdef CONFIG_PAX_RANDMMAP
19978 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19979 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19980 +#endif
19981 +
19982 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19983 mm->unmap_area = arch_unmap_area_topdown;
19984 }
19985 diff -urNp linux-3.0.4/arch/x86/mm/mmio-mod.c linux-3.0.4/arch/x86/mm/mmio-mod.c
19986 --- linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
19987 +++ linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
19988 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
19989 break;
19990 default:
19991 {
19992 - unsigned char *ip = (unsigned char *)instptr;
19993 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
19994 my_trace->opcode = MMIO_UNKNOWN_OP;
19995 my_trace->width = 0;
19996 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
19997 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
19998 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
19999 void __iomem *addr)
20000 {
20001 - static atomic_t next_id;
20002 + static atomic_unchecked_t next_id;
20003 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20004 /* These are page-unaligned. */
20005 struct mmiotrace_map map = {
20006 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20007 .private = trace
20008 },
20009 .phys = offset,
20010 - .id = atomic_inc_return(&next_id)
20011 + .id = atomic_inc_return_unchecked(&next_id)
20012 };
20013 map.map_id = trace->id;
20014
20015 diff -urNp linux-3.0.4/arch/x86/mm/pageattr.c linux-3.0.4/arch/x86/mm/pageattr.c
20016 --- linux-3.0.4/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20017 +++ linux-3.0.4/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20018 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20019 */
20020 #ifdef CONFIG_PCI_BIOS
20021 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20022 - pgprot_val(forbidden) |= _PAGE_NX;
20023 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20024 #endif
20025
20026 /*
20027 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20028 * Does not cover __inittext since that is gone later on. On
20029 * 64bit we do not enforce !NX on the low mapping
20030 */
20031 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20032 - pgprot_val(forbidden) |= _PAGE_NX;
20033 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20034 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20035
20036 +#ifdef CONFIG_DEBUG_RODATA
20037 /*
20038 * The .rodata section needs to be read-only. Using the pfn
20039 * catches all aliases.
20040 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20041 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20042 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20043 pgprot_val(forbidden) |= _PAGE_RW;
20044 +#endif
20045
20046 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20047 /*
20048 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20049 }
20050 #endif
20051
20052 +#ifdef CONFIG_PAX_KERNEXEC
20053 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20054 + pgprot_val(forbidden) |= _PAGE_RW;
20055 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20056 + }
20057 +#endif
20058 +
20059 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20060
20061 return prot;
20062 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20063 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20064 {
20065 /* change init_mm */
20066 + pax_open_kernel();
20067 set_pte_atomic(kpte, pte);
20068 +
20069 #ifdef CONFIG_X86_32
20070 if (!SHARED_KERNEL_PMD) {
20071 +
20072 +#ifdef CONFIG_PAX_PER_CPU_PGD
20073 + unsigned long cpu;
20074 +#else
20075 struct page *page;
20076 +#endif
20077
20078 +#ifdef CONFIG_PAX_PER_CPU_PGD
20079 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20080 + pgd_t *pgd = get_cpu_pgd(cpu);
20081 +#else
20082 list_for_each_entry(page, &pgd_list, lru) {
20083 - pgd_t *pgd;
20084 + pgd_t *pgd = (pgd_t *)page_address(page);
20085 +#endif
20086 +
20087 pud_t *pud;
20088 pmd_t *pmd;
20089
20090 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20091 + pgd += pgd_index(address);
20092 pud = pud_offset(pgd, address);
20093 pmd = pmd_offset(pud, address);
20094 set_pte_atomic((pte_t *)pmd, pte);
20095 }
20096 }
20097 #endif
20098 + pax_close_kernel();
20099 }
20100
20101 static int
20102 diff -urNp linux-3.0.4/arch/x86/mm/pageattr-test.c linux-3.0.4/arch/x86/mm/pageattr-test.c
20103 --- linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20104 +++ linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20105 @@ -36,7 +36,7 @@ enum {
20106
20107 static int pte_testbit(pte_t pte)
20108 {
20109 - return pte_flags(pte) & _PAGE_UNUSED1;
20110 + return pte_flags(pte) & _PAGE_CPA_TEST;
20111 }
20112
20113 struct split_state {
20114 diff -urNp linux-3.0.4/arch/x86/mm/pat.c linux-3.0.4/arch/x86/mm/pat.c
20115 --- linux-3.0.4/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20116 +++ linux-3.0.4/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20117 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20118
20119 if (!entry) {
20120 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20121 - current->comm, current->pid, start, end);
20122 + current->comm, task_pid_nr(current), start, end);
20123 return -EINVAL;
20124 }
20125
20126 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20127 while (cursor < to) {
20128 if (!devmem_is_allowed(pfn)) {
20129 printk(KERN_INFO
20130 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20131 - current->comm, from, to);
20132 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20133 + current->comm, from, to, cursor);
20134 return 0;
20135 }
20136 cursor += PAGE_SIZE;
20137 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20138 printk(KERN_INFO
20139 "%s:%d ioremap_change_attr failed %s "
20140 "for %Lx-%Lx\n",
20141 - current->comm, current->pid,
20142 + current->comm, task_pid_nr(current),
20143 cattr_name(flags),
20144 base, (unsigned long long)(base + size));
20145 return -EINVAL;
20146 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20147 if (want_flags != flags) {
20148 printk(KERN_WARNING
20149 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20150 - current->comm, current->pid,
20151 + current->comm, task_pid_nr(current),
20152 cattr_name(want_flags),
20153 (unsigned long long)paddr,
20154 (unsigned long long)(paddr + size),
20155 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20156 free_memtype(paddr, paddr + size);
20157 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20158 " for %Lx-%Lx, got %s\n",
20159 - current->comm, current->pid,
20160 + current->comm, task_pid_nr(current),
20161 cattr_name(want_flags),
20162 (unsigned long long)paddr,
20163 (unsigned long long)(paddr + size),
20164 diff -urNp linux-3.0.4/arch/x86/mm/pf_in.c linux-3.0.4/arch/x86/mm/pf_in.c
20165 --- linux-3.0.4/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20166 +++ linux-3.0.4/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20167 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20168 int i;
20169 enum reason_type rv = OTHERS;
20170
20171 - p = (unsigned char *)ins_addr;
20172 + p = (unsigned char *)ktla_ktva(ins_addr);
20173 p += skip_prefix(p, &prf);
20174 p += get_opcode(p, &opcode);
20175
20176 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20177 struct prefix_bits prf;
20178 int i;
20179
20180 - p = (unsigned char *)ins_addr;
20181 + p = (unsigned char *)ktla_ktva(ins_addr);
20182 p += skip_prefix(p, &prf);
20183 p += get_opcode(p, &opcode);
20184
20185 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20186 struct prefix_bits prf;
20187 int i;
20188
20189 - p = (unsigned char *)ins_addr;
20190 + p = (unsigned char *)ktla_ktva(ins_addr);
20191 p += skip_prefix(p, &prf);
20192 p += get_opcode(p, &opcode);
20193
20194 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20195 struct prefix_bits prf;
20196 int i;
20197
20198 - p = (unsigned char *)ins_addr;
20199 + p = (unsigned char *)ktla_ktva(ins_addr);
20200 p += skip_prefix(p, &prf);
20201 p += get_opcode(p, &opcode);
20202 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20203 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20204 struct prefix_bits prf;
20205 int i;
20206
20207 - p = (unsigned char *)ins_addr;
20208 + p = (unsigned char *)ktla_ktva(ins_addr);
20209 p += skip_prefix(p, &prf);
20210 p += get_opcode(p, &opcode);
20211 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20212 diff -urNp linux-3.0.4/arch/x86/mm/pgtable_32.c linux-3.0.4/arch/x86/mm/pgtable_32.c
20213 --- linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20214 +++ linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20215 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20216 return;
20217 }
20218 pte = pte_offset_kernel(pmd, vaddr);
20219 +
20220 + pax_open_kernel();
20221 if (pte_val(pteval))
20222 set_pte_at(&init_mm, vaddr, pte, pteval);
20223 else
20224 pte_clear(&init_mm, vaddr, pte);
20225 + pax_close_kernel();
20226
20227 /*
20228 * It's enough to flush this one mapping.
20229 diff -urNp linux-3.0.4/arch/x86/mm/pgtable.c linux-3.0.4/arch/x86/mm/pgtable.c
20230 --- linux-3.0.4/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20231 +++ linux-3.0.4/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20232 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20233 list_del(&page->lru);
20234 }
20235
20236 -#define UNSHARED_PTRS_PER_PGD \
20237 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20238 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20239 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20240
20241 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20242 +{
20243 + while (count--)
20244 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20245 +}
20246 +#endif
20247 +
20248 +#ifdef CONFIG_PAX_PER_CPU_PGD
20249 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20250 +{
20251 + while (count--)
20252 +
20253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20254 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20255 +#else
20256 + *dst++ = *src++;
20257 +#endif
20258
20259 +}
20260 +#endif
20261 +
20262 +#ifdef CONFIG_X86_64
20263 +#define pxd_t pud_t
20264 +#define pyd_t pgd_t
20265 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20266 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20267 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20268 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20269 +#define PYD_SIZE PGDIR_SIZE
20270 +#else
20271 +#define pxd_t pmd_t
20272 +#define pyd_t pud_t
20273 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20274 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20275 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20276 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20277 +#define PYD_SIZE PUD_SIZE
20278 +#endif
20279 +
20280 +#ifdef CONFIG_PAX_PER_CPU_PGD
20281 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20282 +static inline void pgd_dtor(pgd_t *pgd) {}
20283 +#else
20284 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20285 {
20286 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20287 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20288 pgd_list_del(pgd);
20289 spin_unlock(&pgd_lock);
20290 }
20291 +#endif
20292
20293 /*
20294 * List of all pgd's needed for non-PAE so it can invalidate entries
20295 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20296 * -- wli
20297 */
20298
20299 -#ifdef CONFIG_X86_PAE
20300 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20301 /*
20302 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20303 * updating the top-level pagetable entries to guarantee the
20304 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20305 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20306 * and initialize the kernel pmds here.
20307 */
20308 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20309 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20310
20311 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20312 {
20313 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20314 */
20315 flush_tlb_mm(mm);
20316 }
20317 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20318 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20319 #else /* !CONFIG_X86_PAE */
20320
20321 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20322 -#define PREALLOCATED_PMDS 0
20323 +#define PREALLOCATED_PXDS 0
20324
20325 #endif /* CONFIG_X86_PAE */
20326
20327 -static void free_pmds(pmd_t *pmds[])
20328 +static void free_pxds(pxd_t *pxds[])
20329 {
20330 int i;
20331
20332 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20333 - if (pmds[i])
20334 - free_page((unsigned long)pmds[i]);
20335 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20336 + if (pxds[i])
20337 + free_page((unsigned long)pxds[i]);
20338 }
20339
20340 -static int preallocate_pmds(pmd_t *pmds[])
20341 +static int preallocate_pxds(pxd_t *pxds[])
20342 {
20343 int i;
20344 bool failed = false;
20345
20346 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20347 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20348 - if (pmd == NULL)
20349 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20350 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20351 + if (pxd == NULL)
20352 failed = true;
20353 - pmds[i] = pmd;
20354 + pxds[i] = pxd;
20355 }
20356
20357 if (failed) {
20358 - free_pmds(pmds);
20359 + free_pxds(pxds);
20360 return -ENOMEM;
20361 }
20362
20363 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20364 * preallocate which never got a corresponding vma will need to be
20365 * freed manually.
20366 */
20367 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20368 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20369 {
20370 int i;
20371
20372 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20373 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20374 pgd_t pgd = pgdp[i];
20375
20376 if (pgd_val(pgd) != 0) {
20377 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20378 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20379
20380 - pgdp[i] = native_make_pgd(0);
20381 + set_pgd(pgdp + i, native_make_pgd(0));
20382
20383 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20384 - pmd_free(mm, pmd);
20385 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20386 + pxd_free(mm, pxd);
20387 }
20388 }
20389 }
20390
20391 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20392 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20393 {
20394 - pud_t *pud;
20395 + pyd_t *pyd;
20396 unsigned long addr;
20397 int i;
20398
20399 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20400 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20401 return;
20402
20403 - pud = pud_offset(pgd, 0);
20404 +#ifdef CONFIG_X86_64
20405 + pyd = pyd_offset(mm, 0L);
20406 +#else
20407 + pyd = pyd_offset(pgd, 0L);
20408 +#endif
20409
20410 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20411 - i++, pud++, addr += PUD_SIZE) {
20412 - pmd_t *pmd = pmds[i];
20413 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20414 + i++, pyd++, addr += PYD_SIZE) {
20415 + pxd_t *pxd = pxds[i];
20416
20417 if (i >= KERNEL_PGD_BOUNDARY)
20418 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20419 - sizeof(pmd_t) * PTRS_PER_PMD);
20420 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20421 + sizeof(pxd_t) * PTRS_PER_PMD);
20422
20423 - pud_populate(mm, pud, pmd);
20424 + pyd_populate(mm, pyd, pxd);
20425 }
20426 }
20427
20428 pgd_t *pgd_alloc(struct mm_struct *mm)
20429 {
20430 pgd_t *pgd;
20431 - pmd_t *pmds[PREALLOCATED_PMDS];
20432 + pxd_t *pxds[PREALLOCATED_PXDS];
20433
20434 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20435
20436 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20437
20438 mm->pgd = pgd;
20439
20440 - if (preallocate_pmds(pmds) != 0)
20441 + if (preallocate_pxds(pxds) != 0)
20442 goto out_free_pgd;
20443
20444 if (paravirt_pgd_alloc(mm) != 0)
20445 - goto out_free_pmds;
20446 + goto out_free_pxds;
20447
20448 /*
20449 * Make sure that pre-populating the pmds is atomic with
20450 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20451 spin_lock(&pgd_lock);
20452
20453 pgd_ctor(mm, pgd);
20454 - pgd_prepopulate_pmd(mm, pgd, pmds);
20455 + pgd_prepopulate_pxd(mm, pgd, pxds);
20456
20457 spin_unlock(&pgd_lock);
20458
20459 return pgd;
20460
20461 -out_free_pmds:
20462 - free_pmds(pmds);
20463 +out_free_pxds:
20464 + free_pxds(pxds);
20465 out_free_pgd:
20466 free_page((unsigned long)pgd);
20467 out:
20468 @@ -295,7 +344,7 @@ out:
20469
20470 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20471 {
20472 - pgd_mop_up_pmds(mm, pgd);
20473 + pgd_mop_up_pxds(mm, pgd);
20474 pgd_dtor(pgd);
20475 paravirt_pgd_free(mm, pgd);
20476 free_page((unsigned long)pgd);
20477 diff -urNp linux-3.0.4/arch/x86/mm/setup_nx.c linux-3.0.4/arch/x86/mm/setup_nx.c
20478 --- linux-3.0.4/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20479 +++ linux-3.0.4/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20480 @@ -5,8 +5,10 @@
20481 #include <asm/pgtable.h>
20482 #include <asm/proto.h>
20483
20484 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20485 static int disable_nx __cpuinitdata;
20486
20487 +#ifndef CONFIG_PAX_PAGEEXEC
20488 /*
20489 * noexec = on|off
20490 *
20491 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20492 return 0;
20493 }
20494 early_param("noexec", noexec_setup);
20495 +#endif
20496 +
20497 +#endif
20498
20499 void __cpuinit x86_configure_nx(void)
20500 {
20501 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20502 if (cpu_has_nx && !disable_nx)
20503 __supported_pte_mask |= _PAGE_NX;
20504 else
20505 +#endif
20506 __supported_pte_mask &= ~_PAGE_NX;
20507 }
20508
20509 diff -urNp linux-3.0.4/arch/x86/mm/tlb.c linux-3.0.4/arch/x86/mm/tlb.c
20510 --- linux-3.0.4/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20511 +++ linux-3.0.4/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20512 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20513 BUG();
20514 cpumask_clear_cpu(cpu,
20515 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20516 +
20517 +#ifndef CONFIG_PAX_PER_CPU_PGD
20518 load_cr3(swapper_pg_dir);
20519 +#endif
20520 +
20521 }
20522 EXPORT_SYMBOL_GPL(leave_mm);
20523
20524 diff -urNp linux-3.0.4/arch/x86/net/bpf_jit_comp.c linux-3.0.4/arch/x86/net/bpf_jit_comp.c
20525 --- linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20526 +++ linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20527 @@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20528 module_free(NULL, image);
20529 return;
20530 }
20531 + pax_open_kernel();
20532 memcpy(image + proglen, temp, ilen);
20533 + pax_close_kernel();
20534 }
20535 proglen += ilen;
20536 addrs[i] = proglen;
20537 @@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
20538 break;
20539 }
20540 if (proglen == oldproglen) {
20541 - image = module_alloc(max_t(unsigned int,
20542 + image = module_alloc_exec(max_t(unsigned int,
20543 proglen,
20544 sizeof(struct work_struct)));
20545 if (!image)
20546 diff -urNp linux-3.0.4/arch/x86/oprofile/backtrace.c linux-3.0.4/arch/x86/oprofile/backtrace.c
20547 --- linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-08-23 21:44:40.000000000 -0400
20548 +++ linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
20549 @@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
20550 {
20551 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20552
20553 - if (!user_mode_vm(regs)) {
20554 + if (!user_mode(regs)) {
20555 unsigned long stack = kernel_stack_pointer(regs);
20556 if (depth)
20557 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20558 diff -urNp linux-3.0.4/arch/x86/pci/mrst.c linux-3.0.4/arch/x86/pci/mrst.c
20559 --- linux-3.0.4/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
20560 +++ linux-3.0.4/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
20561 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20562 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20563 pci_mmcfg_late_init();
20564 pcibios_enable_irq = mrst_pci_irq_enable;
20565 - pci_root_ops = pci_mrst_ops;
20566 + pax_open_kernel();
20567 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20568 + pax_close_kernel();
20569 /* Continue with standard init */
20570 return 1;
20571 }
20572 diff -urNp linux-3.0.4/arch/x86/pci/pcbios.c linux-3.0.4/arch/x86/pci/pcbios.c
20573 --- linux-3.0.4/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
20574 +++ linux-3.0.4/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
20575 @@ -79,50 +79,93 @@ union bios32 {
20576 static struct {
20577 unsigned long address;
20578 unsigned short segment;
20579 -} bios32_indirect = { 0, __KERNEL_CS };
20580 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20581
20582 /*
20583 * Returns the entry point for the given service, NULL on error
20584 */
20585
20586 -static unsigned long bios32_service(unsigned long service)
20587 +static unsigned long __devinit bios32_service(unsigned long service)
20588 {
20589 unsigned char return_code; /* %al */
20590 unsigned long address; /* %ebx */
20591 unsigned long length; /* %ecx */
20592 unsigned long entry; /* %edx */
20593 unsigned long flags;
20594 + struct desc_struct d, *gdt;
20595
20596 local_irq_save(flags);
20597 - __asm__("lcall *(%%edi); cld"
20598 +
20599 + gdt = get_cpu_gdt_table(smp_processor_id());
20600 +
20601 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20602 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20603 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20604 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20605 +
20606 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20607 : "=a" (return_code),
20608 "=b" (address),
20609 "=c" (length),
20610 "=d" (entry)
20611 : "0" (service),
20612 "1" (0),
20613 - "D" (&bios32_indirect));
20614 + "D" (&bios32_indirect),
20615 + "r"(__PCIBIOS_DS)
20616 + : "memory");
20617 +
20618 + pax_open_kernel();
20619 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20620 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20621 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20622 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20623 + pax_close_kernel();
20624 +
20625 local_irq_restore(flags);
20626
20627 switch (return_code) {
20628 - case 0:
20629 - return address + entry;
20630 - case 0x80: /* Not present */
20631 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20632 - return 0;
20633 - default: /* Shouldn't happen */
20634 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20635 - service, return_code);
20636 + case 0: {
20637 + int cpu;
20638 + unsigned char flags;
20639 +
20640 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20641 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20642 + printk(KERN_WARNING "bios32_service: not valid\n");
20643 return 0;
20644 + }
20645 + address = address + PAGE_OFFSET;
20646 + length += 16UL; /* some BIOSs underreport this... */
20647 + flags = 4;
20648 + if (length >= 64*1024*1024) {
20649 + length >>= PAGE_SHIFT;
20650 + flags |= 8;
20651 + }
20652 +
20653 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20654 + gdt = get_cpu_gdt_table(cpu);
20655 + pack_descriptor(&d, address, length, 0x9b, flags);
20656 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20657 + pack_descriptor(&d, address, length, 0x93, flags);
20658 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20659 + }
20660 + return entry;
20661 + }
20662 + case 0x80: /* Not present */
20663 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20664 + return 0;
20665 + default: /* Shouldn't happen */
20666 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20667 + service, return_code);
20668 + return 0;
20669 }
20670 }
20671
20672 static struct {
20673 unsigned long address;
20674 unsigned short segment;
20675 -} pci_indirect = { 0, __KERNEL_CS };
20676 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20677
20678 -static int pci_bios_present;
20679 +static int pci_bios_present __read_only;
20680
20681 static int __devinit check_pcibios(void)
20682 {
20683 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20684 unsigned long flags, pcibios_entry;
20685
20686 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20687 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20688 + pci_indirect.address = pcibios_entry;
20689
20690 local_irq_save(flags);
20691 - __asm__(
20692 - "lcall *(%%edi); cld\n\t"
20693 + __asm__("movw %w6, %%ds\n\t"
20694 + "lcall *%%ss:(%%edi); cld\n\t"
20695 + "push %%ss\n\t"
20696 + "pop %%ds\n\t"
20697 "jc 1f\n\t"
20698 "xor %%ah, %%ah\n"
20699 "1:"
20700 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20701 "=b" (ebx),
20702 "=c" (ecx)
20703 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20704 - "D" (&pci_indirect)
20705 + "D" (&pci_indirect),
20706 + "r" (__PCIBIOS_DS)
20707 : "memory");
20708 local_irq_restore(flags);
20709
20710 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20711
20712 switch (len) {
20713 case 1:
20714 - __asm__("lcall *(%%esi); cld\n\t"
20715 + __asm__("movw %w6, %%ds\n\t"
20716 + "lcall *%%ss:(%%esi); cld\n\t"
20717 + "push %%ss\n\t"
20718 + "pop %%ds\n\t"
20719 "jc 1f\n\t"
20720 "xor %%ah, %%ah\n"
20721 "1:"
20722 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20723 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20724 "b" (bx),
20725 "D" ((long)reg),
20726 - "S" (&pci_indirect));
20727 + "S" (&pci_indirect),
20728 + "r" (__PCIBIOS_DS));
20729 /*
20730 * Zero-extend the result beyond 8 bits, do not trust the
20731 * BIOS having done it:
20732 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20733 *value &= 0xff;
20734 break;
20735 case 2:
20736 - __asm__("lcall *(%%esi); cld\n\t"
20737 + __asm__("movw %w6, %%ds\n\t"
20738 + "lcall *%%ss:(%%esi); cld\n\t"
20739 + "push %%ss\n\t"
20740 + "pop %%ds\n\t"
20741 "jc 1f\n\t"
20742 "xor %%ah, %%ah\n"
20743 "1:"
20744 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20745 : "1" (PCIBIOS_READ_CONFIG_WORD),
20746 "b" (bx),
20747 "D" ((long)reg),
20748 - "S" (&pci_indirect));
20749 + "S" (&pci_indirect),
20750 + "r" (__PCIBIOS_DS));
20751 /*
20752 * Zero-extend the result beyond 16 bits, do not trust the
20753 * BIOS having done it:
20754 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20755 *value &= 0xffff;
20756 break;
20757 case 4:
20758 - __asm__("lcall *(%%esi); cld\n\t"
20759 + __asm__("movw %w6, %%ds\n\t"
20760 + "lcall *%%ss:(%%esi); cld\n\t"
20761 + "push %%ss\n\t"
20762 + "pop %%ds\n\t"
20763 "jc 1f\n\t"
20764 "xor %%ah, %%ah\n"
20765 "1:"
20766 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20767 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20768 "b" (bx),
20769 "D" ((long)reg),
20770 - "S" (&pci_indirect));
20771 + "S" (&pci_indirect),
20772 + "r" (__PCIBIOS_DS));
20773 break;
20774 }
20775
20776 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20777
20778 switch (len) {
20779 case 1:
20780 - __asm__("lcall *(%%esi); cld\n\t"
20781 + __asm__("movw %w6, %%ds\n\t"
20782 + "lcall *%%ss:(%%esi); cld\n\t"
20783 + "push %%ss\n\t"
20784 + "pop %%ds\n\t"
20785 "jc 1f\n\t"
20786 "xor %%ah, %%ah\n"
20787 "1:"
20788 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20789 "c" (value),
20790 "b" (bx),
20791 "D" ((long)reg),
20792 - "S" (&pci_indirect));
20793 + "S" (&pci_indirect),
20794 + "r" (__PCIBIOS_DS));
20795 break;
20796 case 2:
20797 - __asm__("lcall *(%%esi); cld\n\t"
20798 + __asm__("movw %w6, %%ds\n\t"
20799 + "lcall *%%ss:(%%esi); cld\n\t"
20800 + "push %%ss\n\t"
20801 + "pop %%ds\n\t"
20802 "jc 1f\n\t"
20803 "xor %%ah, %%ah\n"
20804 "1:"
20805 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20806 "c" (value),
20807 "b" (bx),
20808 "D" ((long)reg),
20809 - "S" (&pci_indirect));
20810 + "S" (&pci_indirect),
20811 + "r" (__PCIBIOS_DS));
20812 break;
20813 case 4:
20814 - __asm__("lcall *(%%esi); cld\n\t"
20815 + __asm__("movw %w6, %%ds\n\t"
20816 + "lcall *%%ss:(%%esi); cld\n\t"
20817 + "push %%ss\n\t"
20818 + "pop %%ds\n\t"
20819 "jc 1f\n\t"
20820 "xor %%ah, %%ah\n"
20821 "1:"
20822 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20823 "c" (value),
20824 "b" (bx),
20825 "D" ((long)reg),
20826 - "S" (&pci_indirect));
20827 + "S" (&pci_indirect),
20828 + "r" (__PCIBIOS_DS));
20829 break;
20830 }
20831
20832 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20833
20834 DBG("PCI: Fetching IRQ routing table... ");
20835 __asm__("push %%es\n\t"
20836 + "movw %w8, %%ds\n\t"
20837 "push %%ds\n\t"
20838 "pop %%es\n\t"
20839 - "lcall *(%%esi); cld\n\t"
20840 + "lcall *%%ss:(%%esi); cld\n\t"
20841 "pop %%es\n\t"
20842 + "push %%ss\n\t"
20843 + "pop %%ds\n"
20844 "jc 1f\n\t"
20845 "xor %%ah, %%ah\n"
20846 "1:"
20847 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20848 "1" (0),
20849 "D" ((long) &opt),
20850 "S" (&pci_indirect),
20851 - "m" (opt)
20852 + "m" (opt),
20853 + "r" (__PCIBIOS_DS)
20854 : "memory");
20855 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20856 if (ret & 0xff00)
20857 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20858 {
20859 int ret;
20860
20861 - __asm__("lcall *(%%esi); cld\n\t"
20862 + __asm__("movw %w5, %%ds\n\t"
20863 + "lcall *%%ss:(%%esi); cld\n\t"
20864 + "push %%ss\n\t"
20865 + "pop %%ds\n"
20866 "jc 1f\n\t"
20867 "xor %%ah, %%ah\n"
20868 "1:"
20869 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20870 : "0" (PCIBIOS_SET_PCI_HW_INT),
20871 "b" ((dev->bus->number << 8) | dev->devfn),
20872 "c" ((irq << 8) | (pin + 10)),
20873 - "S" (&pci_indirect));
20874 + "S" (&pci_indirect),
20875 + "r" (__PCIBIOS_DS));
20876 return !(ret & 0xff00);
20877 }
20878 EXPORT_SYMBOL(pcibios_set_irq_routing);
20879 diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_32.c linux-3.0.4/arch/x86/platform/efi/efi_32.c
20880 --- linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
20881 +++ linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-08-23 21:47:55.000000000 -0400
20882 @@ -38,70 +38,37 @@
20883 */
20884
20885 static unsigned long efi_rt_eflags;
20886 -static pgd_t efi_bak_pg_dir_pointer[2];
20887 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
20888
20889 -void efi_call_phys_prelog(void)
20890 +void __init efi_call_phys_prelog(void)
20891 {
20892 - unsigned long cr4;
20893 - unsigned long temp;
20894 struct desc_ptr gdt_descr;
20895
20896 local_irq_save(efi_rt_eflags);
20897
20898 - /*
20899 - * If I don't have PAE, I should just duplicate two entries in page
20900 - * directory. If I have PAE, I just need to duplicate one entry in
20901 - * page directory.
20902 - */
20903 - cr4 = read_cr4_safe();
20904 -
20905 - if (cr4 & X86_CR4_PAE) {
20906 - efi_bak_pg_dir_pointer[0].pgd =
20907 - swapper_pg_dir[pgd_index(0)].pgd;
20908 - swapper_pg_dir[0].pgd =
20909 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20910 - } else {
20911 - efi_bak_pg_dir_pointer[0].pgd =
20912 - swapper_pg_dir[pgd_index(0)].pgd;
20913 - efi_bak_pg_dir_pointer[1].pgd =
20914 - swapper_pg_dir[pgd_index(0x400000)].pgd;
20915 - swapper_pg_dir[pgd_index(0)].pgd =
20916 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20917 - temp = PAGE_OFFSET + 0x400000;
20918 - swapper_pg_dir[pgd_index(0x400000)].pgd =
20919 - swapper_pg_dir[pgd_index(temp)].pgd;
20920 - }
20921 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
20922 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20923 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20924
20925 /*
20926 * After the lock is released, the original page table is restored.
20927 */
20928 __flush_tlb_all();
20929
20930 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
20931 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
20932 gdt_descr.size = GDT_SIZE - 1;
20933 load_gdt(&gdt_descr);
20934 }
20935
20936 -void efi_call_phys_epilog(void)
20937 +void __init efi_call_phys_epilog(void)
20938 {
20939 - unsigned long cr4;
20940 struct desc_ptr gdt_descr;
20941
20942 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
20943 + gdt_descr.address = get_cpu_gdt_table(0);
20944 gdt_descr.size = GDT_SIZE - 1;
20945 load_gdt(&gdt_descr);
20946
20947 - cr4 = read_cr4_safe();
20948 -
20949 - if (cr4 & X86_CR4_PAE) {
20950 - swapper_pg_dir[pgd_index(0)].pgd =
20951 - efi_bak_pg_dir_pointer[0].pgd;
20952 - } else {
20953 - swapper_pg_dir[pgd_index(0)].pgd =
20954 - efi_bak_pg_dir_pointer[0].pgd;
20955 - swapper_pg_dir[pgd_index(0x400000)].pgd =
20956 - efi_bak_pg_dir_pointer[1].pgd;
20957 - }
20958 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
20959
20960 /*
20961 * After the lock is released, the original page table is restored.
20962 diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S
20963 --- linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
20964 +++ linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-08-23 21:47:55.000000000 -0400
20965 @@ -6,6 +6,7 @@
20966 */
20967
20968 #include <linux/linkage.h>
20969 +#include <linux/init.h>
20970 #include <asm/page_types.h>
20971
20972 /*
20973 @@ -20,7 +21,7 @@
20974 * service functions will comply with gcc calling convention, too.
20975 */
20976
20977 -.text
20978 +__INIT
20979 ENTRY(efi_call_phys)
20980 /*
20981 * 0. The function can only be called in Linux kernel. So CS has been
20982 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
20983 * The mapping of lower virtual memory has been created in prelog and
20984 * epilog.
20985 */
20986 - movl $1f, %edx
20987 - subl $__PAGE_OFFSET, %edx
20988 - jmp *%edx
20989 + jmp 1f-__PAGE_OFFSET
20990 1:
20991
20992 /*
20993 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
20994 * parameter 2, ..., param n. To make things easy, we save the return
20995 * address of efi_call_phys in a global variable.
20996 */
20997 - popl %edx
20998 - movl %edx, saved_return_addr
20999 - /* get the function pointer into ECX*/
21000 - popl %ecx
21001 - movl %ecx, efi_rt_function_ptr
21002 - movl $2f, %edx
21003 - subl $__PAGE_OFFSET, %edx
21004 - pushl %edx
21005 + popl (saved_return_addr)
21006 + popl (efi_rt_function_ptr)
21007
21008 /*
21009 * 3. Clear PG bit in %CR0.
21010 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21011 /*
21012 * 5. Call the physical function.
21013 */
21014 - jmp *%ecx
21015 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21016
21017 -2:
21018 /*
21019 * 6. After EFI runtime service returns, control will return to
21020 * following instruction. We'd better readjust stack pointer first.
21021 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21022 movl %cr0, %edx
21023 orl $0x80000000, %edx
21024 movl %edx, %cr0
21025 - jmp 1f
21026 -1:
21027 +
21028 /*
21029 * 8. Now restore the virtual mode from flat mode by
21030 * adding EIP with PAGE_OFFSET.
21031 */
21032 - movl $1f, %edx
21033 - jmp *%edx
21034 + jmp 1f+__PAGE_OFFSET
21035 1:
21036
21037 /*
21038 * 9. Balance the stack. And because EAX contain the return value,
21039 * we'd better not clobber it.
21040 */
21041 - leal efi_rt_function_ptr, %edx
21042 - movl (%edx), %ecx
21043 - pushl %ecx
21044 + pushl (efi_rt_function_ptr)
21045
21046 /*
21047 - * 10. Push the saved return address onto the stack and return.
21048 + * 10. Return to the saved return address.
21049 */
21050 - leal saved_return_addr, %edx
21051 - movl (%edx), %ecx
21052 - pushl %ecx
21053 - ret
21054 + jmpl *(saved_return_addr)
21055 ENDPROC(efi_call_phys)
21056 .previous
21057
21058 -.data
21059 +__INITDATA
21060 saved_return_addr:
21061 .long 0
21062 efi_rt_function_ptr:
21063 diff -urNp linux-3.0.4/arch/x86/platform/mrst/mrst.c linux-3.0.4/arch/x86/platform/mrst/mrst.c
21064 --- linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21065 +++ linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21066 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21067 }
21068
21069 /* Reboot and power off are handled by the SCU on a MID device */
21070 -static void mrst_power_off(void)
21071 +static __noreturn void mrst_power_off(void)
21072 {
21073 intel_scu_ipc_simple_command(0xf1, 1);
21074 + BUG();
21075 }
21076
21077 -static void mrst_reboot(void)
21078 +static __noreturn void mrst_reboot(void)
21079 {
21080 intel_scu_ipc_simple_command(0xf1, 0);
21081 + BUG();
21082 }
21083
21084 /*
21085 diff -urNp linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c
21086 --- linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c 2011-07-21 22:17:23.000000000 -0400
21087 +++ linux-3.0.4/arch/x86/platform/olpc/olpc_dt.c 2011-08-29 22:31:19.000000000 -0400
21088 @@ -163,7 +163,7 @@ static struct of_pdt_ops prom_olpc_ops _
21089 .getchild = olpc_dt_getchild,
21090 .getsibling = olpc_dt_getsibling,
21091 .pkg2path = olpc_dt_pkg2path,
21092 -};
21093 +} __no_const;
21094
21095 void __init olpc_dt_build_devicetree(void)
21096 {
21097 diff -urNp linux-3.0.4/arch/x86/platform/uv/tlb_uv.c linux-3.0.4/arch/x86/platform/uv/tlb_uv.c
21098 --- linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21099 +++ linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21100 @@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21101 cpumask_t mask;
21102 struct reset_args reset_args;
21103
21104 + pax_track_stack();
21105 +
21106 reset_args.sender = sender;
21107 cpus_clear(mask);
21108 /* find a single cpu for each uvhub in this distribution mask */
21109 diff -urNp linux-3.0.4/arch/x86/power/cpu.c linux-3.0.4/arch/x86/power/cpu.c
21110 --- linux-3.0.4/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21111 +++ linux-3.0.4/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21112 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21113 static void fix_processor_context(void)
21114 {
21115 int cpu = smp_processor_id();
21116 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21117 + struct tss_struct *t = init_tss + cpu;
21118
21119 set_tss_desc(cpu, t); /*
21120 * This just modifies memory; should not be
21121 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21122 */
21123
21124 #ifdef CONFIG_X86_64
21125 + pax_open_kernel();
21126 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21127 + pax_close_kernel();
21128
21129 syscall_init(); /* This sets MSR_*STAR and related */
21130 #endif
21131 diff -urNp linux-3.0.4/arch/x86/vdso/Makefile linux-3.0.4/arch/x86/vdso/Makefile
21132 --- linux-3.0.4/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21133 +++ linux-3.0.4/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21134 @@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21135 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21136 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21137
21138 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21139 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21140 GCOV_PROFILE := n
21141
21142 #
21143 diff -urNp linux-3.0.4/arch/x86/vdso/vdso32-setup.c linux-3.0.4/arch/x86/vdso/vdso32-setup.c
21144 --- linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21145 +++ linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21146 @@ -25,6 +25,7 @@
21147 #include <asm/tlbflush.h>
21148 #include <asm/vdso.h>
21149 #include <asm/proto.h>
21150 +#include <asm/mman.h>
21151
21152 enum {
21153 VDSO_DISABLED = 0,
21154 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21155 void enable_sep_cpu(void)
21156 {
21157 int cpu = get_cpu();
21158 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21159 + struct tss_struct *tss = init_tss + cpu;
21160
21161 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21162 put_cpu();
21163 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21164 gate_vma.vm_start = FIXADDR_USER_START;
21165 gate_vma.vm_end = FIXADDR_USER_END;
21166 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21167 - gate_vma.vm_page_prot = __P101;
21168 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21169 /*
21170 * Make sure the vDSO gets into every core dump.
21171 * Dumping its contents makes post-mortem fully interpretable later
21172 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21173 if (compat)
21174 addr = VDSO_HIGH_BASE;
21175 else {
21176 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21177 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21178 if (IS_ERR_VALUE(addr)) {
21179 ret = addr;
21180 goto up_fail;
21181 }
21182 }
21183
21184 - current->mm->context.vdso = (void *)addr;
21185 + current->mm->context.vdso = addr;
21186
21187 if (compat_uses_vma || !compat) {
21188 /*
21189 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21190 }
21191
21192 current_thread_info()->sysenter_return =
21193 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21194 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21195
21196 up_fail:
21197 if (ret)
21198 - current->mm->context.vdso = NULL;
21199 + current->mm->context.vdso = 0;
21200
21201 up_write(&mm->mmap_sem);
21202
21203 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21204
21205 const char *arch_vma_name(struct vm_area_struct *vma)
21206 {
21207 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21208 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21209 return "[vdso]";
21210 +
21211 +#ifdef CONFIG_PAX_SEGMEXEC
21212 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21213 + return "[vdso]";
21214 +#endif
21215 +
21216 return NULL;
21217 }
21218
21219 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21220 * Check to see if the corresponding task was created in compat vdso
21221 * mode.
21222 */
21223 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21224 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21225 return &gate_vma;
21226 return NULL;
21227 }
21228 diff -urNp linux-3.0.4/arch/x86/vdso/vma.c linux-3.0.4/arch/x86/vdso/vma.c
21229 --- linux-3.0.4/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21230 +++ linux-3.0.4/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21231 @@ -15,18 +15,19 @@
21232 #include <asm/proto.h>
21233 #include <asm/vdso.h>
21234
21235 -unsigned int __read_mostly vdso_enabled = 1;
21236 -
21237 extern char vdso_start[], vdso_end[];
21238 extern unsigned short vdso_sync_cpuid;
21239 +extern char __vsyscall_0;
21240
21241 static struct page **vdso_pages;
21242 +static struct page *vsyscall_page;
21243 static unsigned vdso_size;
21244
21245 static int __init init_vdso_vars(void)
21246 {
21247 - int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21248 - int i;
21249 + size_t nbytes = vdso_end - vdso_start;
21250 + size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21251 + size_t i;
21252
21253 vdso_size = npages << PAGE_SHIFT;
21254 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21255 @@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21256 goto oom;
21257 for (i = 0; i < npages; i++) {
21258 struct page *p;
21259 - p = alloc_page(GFP_KERNEL);
21260 + p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21261 if (!p)
21262 goto oom;
21263 vdso_pages[i] = p;
21264 - copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21265 + memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21266 + nbytes -= PAGE_SIZE;
21267 }
21268 + vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21269
21270 return 0;
21271
21272 oom:
21273 - printk("Cannot allocate vdso\n");
21274 - vdso_enabled = 0;
21275 - return -ENOMEM;
21276 + panic("Cannot allocate vdso\n");
21277 }
21278 subsys_initcall(init_vdso_vars);
21279
21280 @@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21281 unsigned long addr;
21282 int ret;
21283
21284 - if (!vdso_enabled)
21285 - return 0;
21286 -
21287 down_write(&mm->mmap_sem);
21288 - addr = vdso_addr(mm->start_stack, vdso_size);
21289 - addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21290 + addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21291 + addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21292 if (IS_ERR_VALUE(addr)) {
21293 ret = addr;
21294 goto up_fail;
21295 }
21296
21297 - current->mm->context.vdso = (void *)addr;
21298 + mm->context.vdso = addr + PAGE_SIZE;
21299
21300 - ret = install_special_mapping(mm, addr, vdso_size,
21301 + ret = install_special_mapping(mm, addr, PAGE_SIZE,
21302 VM_READ|VM_EXEC|
21303 - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21304 + VM_MAYREAD|VM_MAYEXEC|
21305 VM_ALWAYSDUMP,
21306 - vdso_pages);
21307 + &vsyscall_page);
21308 if (ret) {
21309 - current->mm->context.vdso = NULL;
21310 + mm->context.vdso = 0;
21311 goto up_fail;
21312 }
21313
21314 + ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21315 + VM_READ|VM_EXEC|
21316 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21317 + VM_ALWAYSDUMP,
21318 + vdso_pages);
21319 + if (ret)
21320 + mm->context.vdso = 0;
21321 +
21322 up_fail:
21323 up_write(&mm->mmap_sem);
21324 return ret;
21325 }
21326 -
21327 -static __init int vdso_setup(char *s)
21328 -{
21329 - vdso_enabled = simple_strtoul(s, NULL, 0);
21330 - return 0;
21331 -}
21332 -__setup("vdso=", vdso_setup);
21333 diff -urNp linux-3.0.4/arch/x86/xen/enlighten.c linux-3.0.4/arch/x86/xen/enlighten.c
21334 --- linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:13.000000000 -0400
21335 +++ linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
21336 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21337
21338 struct shared_info xen_dummy_shared_info;
21339
21340 -void *xen_initial_gdt;
21341 -
21342 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21343 __read_mostly int xen_have_vector_callback;
21344 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21345 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21346 #endif
21347 };
21348
21349 -static void xen_reboot(int reason)
21350 +static __noreturn void xen_reboot(int reason)
21351 {
21352 struct sched_shutdown r = { .reason = reason };
21353
21354 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21355 BUG();
21356 }
21357
21358 -static void xen_restart(char *msg)
21359 +static __noreturn void xen_restart(char *msg)
21360 {
21361 xen_reboot(SHUTDOWN_reboot);
21362 }
21363
21364 -static void xen_emergency_restart(void)
21365 +static __noreturn void xen_emergency_restart(void)
21366 {
21367 xen_reboot(SHUTDOWN_reboot);
21368 }
21369
21370 -static void xen_machine_halt(void)
21371 +static __noreturn void xen_machine_halt(void)
21372 {
21373 xen_reboot(SHUTDOWN_poweroff);
21374 }
21375 @@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21376 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21377
21378 /* Work out if we support NX */
21379 - x86_configure_nx();
21380 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21381 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21382 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21383 + unsigned l, h;
21384 +
21385 + __supported_pte_mask |= _PAGE_NX;
21386 + rdmsr(MSR_EFER, l, h);
21387 + l |= EFER_NX;
21388 + wrmsr(MSR_EFER, l, h);
21389 + }
21390 +#endif
21391
21392 xen_setup_features();
21393
21394 @@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21395
21396 machine_ops = xen_machine_ops;
21397
21398 - /*
21399 - * The only reliable way to retain the initial address of the
21400 - * percpu gdt_page is to remember it here, so we can go and
21401 - * mark it RW later, when the initial percpu area is freed.
21402 - */
21403 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21404 -
21405 xen_smp_init();
21406
21407 #ifdef CONFIG_ACPI_NUMA
21408 diff -urNp linux-3.0.4/arch/x86/xen/mmu.c linux-3.0.4/arch/x86/xen/mmu.c
21409 --- linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:13.000000000 -0400
21410 +++ linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
21411 @@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21412 convert_pfn_mfn(init_level4_pgt);
21413 convert_pfn_mfn(level3_ident_pgt);
21414 convert_pfn_mfn(level3_kernel_pgt);
21415 + convert_pfn_mfn(level3_vmalloc_pgt);
21416 + convert_pfn_mfn(level3_vmemmap_pgt);
21417
21418 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21419 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21420 @@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21421 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21422 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21423 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21424 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21425 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21426 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21427 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21428 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21429 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21430
21431 @@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
21432 pv_mmu_ops.set_pud = xen_set_pud;
21433 #if PAGETABLE_LEVELS == 4
21434 pv_mmu_ops.set_pgd = xen_set_pgd;
21435 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21436 #endif
21437
21438 /* This will work as long as patching hasn't happened yet
21439 @@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
21440 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21441 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21442 .set_pgd = xen_set_pgd_hyper,
21443 + .set_pgd_batched = xen_set_pgd_hyper,
21444
21445 .alloc_pud = xen_alloc_pmd_init,
21446 .release_pud = xen_release_pmd_init,
21447 diff -urNp linux-3.0.4/arch/x86/xen/smp.c linux-3.0.4/arch/x86/xen/smp.c
21448 --- linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:13.000000000 -0400
21449 +++ linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:21.000000000 -0400
21450 @@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
21451 {
21452 BUG_ON(smp_processor_id() != 0);
21453 native_smp_prepare_boot_cpu();
21454 -
21455 - /* We've switched to the "real" per-cpu gdt, so make sure the
21456 - old memory can be recycled */
21457 - make_lowmem_page_readwrite(xen_initial_gdt);
21458 -
21459 xen_filter_cpu_maps();
21460 xen_setup_vcpu_info_placement();
21461 }
21462 @@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
21463 gdt = get_cpu_gdt_table(cpu);
21464
21465 ctxt->flags = VGCF_IN_KERNEL;
21466 - ctxt->user_regs.ds = __USER_DS;
21467 - ctxt->user_regs.es = __USER_DS;
21468 + ctxt->user_regs.ds = __KERNEL_DS;
21469 + ctxt->user_regs.es = __KERNEL_DS;
21470 ctxt->user_regs.ss = __KERNEL_DS;
21471 #ifdef CONFIG_X86_32
21472 ctxt->user_regs.fs = __KERNEL_PERCPU;
21473 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21474 + savesegment(gs, ctxt->user_regs.gs);
21475 #else
21476 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21477 #endif
21478 @@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
21479 int rc;
21480
21481 per_cpu(current_task, cpu) = idle;
21482 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21483 #ifdef CONFIG_X86_32
21484 irq_ctx_init(cpu);
21485 #else
21486 clear_tsk_thread_flag(idle, TIF_FORK);
21487 - per_cpu(kernel_stack, cpu) =
21488 - (unsigned long)task_stack_page(idle) -
21489 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21490 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21491 #endif
21492 xen_setup_runstate_info(cpu);
21493 xen_setup_timer(cpu);
21494 diff -urNp linux-3.0.4/arch/x86/xen/xen-asm_32.S linux-3.0.4/arch/x86/xen/xen-asm_32.S
21495 --- linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
21496 +++ linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
21497 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21498 ESP_OFFSET=4 # bytes pushed onto stack
21499
21500 /*
21501 - * Store vcpu_info pointer for easy access. Do it this way to
21502 - * avoid having to reload %fs
21503 + * Store vcpu_info pointer for easy access.
21504 */
21505 #ifdef CONFIG_SMP
21506 - GET_THREAD_INFO(%eax)
21507 - movl TI_cpu(%eax), %eax
21508 - movl __per_cpu_offset(,%eax,4), %eax
21509 - mov xen_vcpu(%eax), %eax
21510 + push %fs
21511 + mov $(__KERNEL_PERCPU), %eax
21512 + mov %eax, %fs
21513 + mov PER_CPU_VAR(xen_vcpu), %eax
21514 + pop %fs
21515 #else
21516 movl xen_vcpu, %eax
21517 #endif
21518 diff -urNp linux-3.0.4/arch/x86/xen/xen-head.S linux-3.0.4/arch/x86/xen/xen-head.S
21519 --- linux-3.0.4/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
21520 +++ linux-3.0.4/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
21521 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21522 #ifdef CONFIG_X86_32
21523 mov %esi,xen_start_info
21524 mov $init_thread_union+THREAD_SIZE,%esp
21525 +#ifdef CONFIG_SMP
21526 + movl $cpu_gdt_table,%edi
21527 + movl $__per_cpu_load,%eax
21528 + movw %ax,__KERNEL_PERCPU + 2(%edi)
21529 + rorl $16,%eax
21530 + movb %al,__KERNEL_PERCPU + 4(%edi)
21531 + movb %ah,__KERNEL_PERCPU + 7(%edi)
21532 + movl $__per_cpu_end - 1,%eax
21533 + subl $__per_cpu_start,%eax
21534 + movw %ax,__KERNEL_PERCPU + 0(%edi)
21535 +#endif
21536 #else
21537 mov %rsi,xen_start_info
21538 mov $init_thread_union+THREAD_SIZE,%rsp
21539 diff -urNp linux-3.0.4/arch/x86/xen/xen-ops.h linux-3.0.4/arch/x86/xen/xen-ops.h
21540 --- linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:44:40.000000000 -0400
21541 +++ linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
21542 @@ -10,8 +10,6 @@
21543 extern const char xen_hypervisor_callback[];
21544 extern const char xen_failsafe_callback[];
21545
21546 -extern void *xen_initial_gdt;
21547 -
21548 struct trap_info;
21549 void xen_copy_trap_info(struct trap_info *traps);
21550
21551 diff -urNp linux-3.0.4/block/blk-iopoll.c linux-3.0.4/block/blk-iopoll.c
21552 --- linux-3.0.4/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
21553 +++ linux-3.0.4/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
21554 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21555 }
21556 EXPORT_SYMBOL(blk_iopoll_complete);
21557
21558 -static void blk_iopoll_softirq(struct softirq_action *h)
21559 +static void blk_iopoll_softirq(void)
21560 {
21561 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21562 int rearm = 0, budget = blk_iopoll_budget;
21563 diff -urNp linux-3.0.4/block/blk-map.c linux-3.0.4/block/blk-map.c
21564 --- linux-3.0.4/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
21565 +++ linux-3.0.4/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
21566 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21567 if (!len || !kbuf)
21568 return -EINVAL;
21569
21570 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21571 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21572 if (do_copy)
21573 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21574 else
21575 diff -urNp linux-3.0.4/block/blk-softirq.c linux-3.0.4/block/blk-softirq.c
21576 --- linux-3.0.4/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
21577 +++ linux-3.0.4/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
21578 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21579 * Softirq action handler - move entries to local list and loop over them
21580 * while passing them to the queue registered handler.
21581 */
21582 -static void blk_done_softirq(struct softirq_action *h)
21583 +static void blk_done_softirq(void)
21584 {
21585 struct list_head *cpu_list, local_list;
21586
21587 diff -urNp linux-3.0.4/block/bsg.c linux-3.0.4/block/bsg.c
21588 --- linux-3.0.4/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
21589 +++ linux-3.0.4/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
21590 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21591 struct sg_io_v4 *hdr, struct bsg_device *bd,
21592 fmode_t has_write_perm)
21593 {
21594 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21595 + unsigned char *cmdptr;
21596 +
21597 if (hdr->request_len > BLK_MAX_CDB) {
21598 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21599 if (!rq->cmd)
21600 return -ENOMEM;
21601 - }
21602 + cmdptr = rq->cmd;
21603 + } else
21604 + cmdptr = tmpcmd;
21605
21606 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21607 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21608 hdr->request_len))
21609 return -EFAULT;
21610
21611 + if (cmdptr != rq->cmd)
21612 + memcpy(rq->cmd, cmdptr, hdr->request_len);
21613 +
21614 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21615 if (blk_verify_command(rq->cmd, has_write_perm))
21616 return -EPERM;
21617 diff -urNp linux-3.0.4/block/scsi_ioctl.c linux-3.0.4/block/scsi_ioctl.c
21618 --- linux-3.0.4/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
21619 +++ linux-3.0.4/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
21620 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21621 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21622 struct sg_io_hdr *hdr, fmode_t mode)
21623 {
21624 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21625 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21626 + unsigned char *cmdptr;
21627 +
21628 + if (rq->cmd != rq->__cmd)
21629 + cmdptr = rq->cmd;
21630 + else
21631 + cmdptr = tmpcmd;
21632 +
21633 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21634 return -EFAULT;
21635 +
21636 + if (cmdptr != rq->cmd)
21637 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21638 +
21639 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21640 return -EPERM;
21641
21642 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21643 int err;
21644 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21645 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21646 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21647 + unsigned char *cmdptr;
21648
21649 if (!sic)
21650 return -EINVAL;
21651 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21652 */
21653 err = -EFAULT;
21654 rq->cmd_len = cmdlen;
21655 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
21656 +
21657 + if (rq->cmd != rq->__cmd)
21658 + cmdptr = rq->cmd;
21659 + else
21660 + cmdptr = tmpcmd;
21661 +
21662 + if (copy_from_user(cmdptr, sic->data, cmdlen))
21663 goto error;
21664
21665 + if (rq->cmd != cmdptr)
21666 + memcpy(rq->cmd, cmdptr, cmdlen);
21667 +
21668 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21669 goto error;
21670
21671 diff -urNp linux-3.0.4/crypto/cryptd.c linux-3.0.4/crypto/cryptd.c
21672 --- linux-3.0.4/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
21673 +++ linux-3.0.4/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
21674 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21675
21676 struct cryptd_blkcipher_request_ctx {
21677 crypto_completion_t complete;
21678 -};
21679 +} __no_const;
21680
21681 struct cryptd_hash_ctx {
21682 struct crypto_shash *child;
21683 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21684
21685 struct cryptd_aead_request_ctx {
21686 crypto_completion_t complete;
21687 -};
21688 +} __no_const;
21689
21690 static void cryptd_queue_worker(struct work_struct *work);
21691
21692 diff -urNp linux-3.0.4/crypto/gf128mul.c linux-3.0.4/crypto/gf128mul.c
21693 --- linux-3.0.4/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
21694 +++ linux-3.0.4/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
21695 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21696 for (i = 0; i < 7; ++i)
21697 gf128mul_x_lle(&p[i + 1], &p[i]);
21698
21699 - memset(r, 0, sizeof(r));
21700 + memset(r, 0, sizeof(*r));
21701 for (i = 0;;) {
21702 u8 ch = ((u8 *)b)[15 - i];
21703
21704 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21705 for (i = 0; i < 7; ++i)
21706 gf128mul_x_bbe(&p[i + 1], &p[i]);
21707
21708 - memset(r, 0, sizeof(r));
21709 + memset(r, 0, sizeof(*r));
21710 for (i = 0;;) {
21711 u8 ch = ((u8 *)b)[i];
21712
21713 diff -urNp linux-3.0.4/crypto/serpent.c linux-3.0.4/crypto/serpent.c
21714 --- linux-3.0.4/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
21715 +++ linux-3.0.4/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
21716 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21717 u32 r0,r1,r2,r3,r4;
21718 int i;
21719
21720 + pax_track_stack();
21721 +
21722 /* Copy key, add padding */
21723
21724 for (i = 0; i < keylen; ++i)
21725 diff -urNp linux-3.0.4/Documentation/dontdiff linux-3.0.4/Documentation/dontdiff
21726 --- linux-3.0.4/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
21727 +++ linux-3.0.4/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
21728 @@ -5,6 +5,7 @@
21729 *.cis
21730 *.cpio
21731 *.csp
21732 +*.dbg
21733 *.dsp
21734 *.dvi
21735 *.elf
21736 @@ -48,9 +49,11 @@
21737 *.tab.h
21738 *.tex
21739 *.ver
21740 +*.vim
21741 *.xml
21742 *.xz
21743 *_MODULES
21744 +*_reg_safe.h
21745 *_vga16.c
21746 *~
21747 \#*#
21748 @@ -70,6 +73,7 @@ Kerntypes
21749 Module.markers
21750 Module.symvers
21751 PENDING
21752 +PERF*
21753 SCCS
21754 System.map*
21755 TAGS
21756 @@ -98,6 +102,8 @@ bzImage*
21757 capability_names.h
21758 capflags.c
21759 classlist.h*
21760 +clut_vga16.c
21761 +common-cmds.h
21762 comp*.log
21763 compile.h*
21764 conf
21765 @@ -126,12 +132,14 @@ fore200e_pca_fw.c*
21766 gconf
21767 gconf.glade.h
21768 gen-devlist
21769 +gen-kdb_cmds.c
21770 gen_crc32table
21771 gen_init_cpio
21772 generated
21773 genheaders
21774 genksyms
21775 *_gray256.c
21776 +hash
21777 hpet_example
21778 hugepage-mmap
21779 hugepage-shm
21780 @@ -146,7 +154,6 @@ int32.c
21781 int4.c
21782 int8.c
21783 kallsyms
21784 -kconfig
21785 keywords.c
21786 ksym.c*
21787 ksym.h*
21788 @@ -154,7 +161,6 @@ kxgettext
21789 lkc_defs.h
21790 lex.c
21791 lex.*.c
21792 -linux
21793 logo_*.c
21794 logo_*_clut224.c
21795 logo_*_mono.c
21796 @@ -174,6 +180,7 @@ mkboot
21797 mkbugboot
21798 mkcpustr
21799 mkdep
21800 +mkpiggy
21801 mkprep
21802 mkregtable
21803 mktables
21804 @@ -209,6 +216,7 @@ r300_reg_safe.h
21805 r420_reg_safe.h
21806 r600_reg_safe.h
21807 recordmcount
21808 +regdb.c
21809 relocs
21810 rlim_names.h
21811 rn50_reg_safe.h
21812 @@ -219,6 +227,7 @@ setup
21813 setup.bin
21814 setup.elf
21815 sImage
21816 +slabinfo
21817 sm_tbl*
21818 split-include
21819 syscalltab.h
21820 @@ -246,7 +255,9 @@ vmlinux
21821 vmlinux-*
21822 vmlinux.aout
21823 vmlinux.bin.all
21824 +vmlinux.bin.bz2
21825 vmlinux.lds
21826 +vmlinux.relocs
21827 vmlinuz
21828 voffset.h
21829 vsyscall.lds
21830 @@ -254,6 +265,7 @@ vsyscall_32.lds
21831 wanxlfw.inc
21832 uImage
21833 unifdef
21834 +utsrelease.h
21835 wakeup.bin
21836 wakeup.elf
21837 wakeup.lds
21838 diff -urNp linux-3.0.4/Documentation/kernel-parameters.txt linux-3.0.4/Documentation/kernel-parameters.txt
21839 --- linux-3.0.4/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
21840 +++ linux-3.0.4/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
21841 @@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
21842 the specified number of seconds. This is to be used if
21843 your oopses keep scrolling off the screen.
21844
21845 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
21846 + virtualization environments that don't cope well with the
21847 + expand down segment used by UDEREF on X86-32 or the frequent
21848 + page table updates on X86-64.
21849 +
21850 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
21851 +
21852 pcbit= [HW,ISDN]
21853
21854 pcd. [PARIDE]
21855 diff -urNp linux-3.0.4/drivers/acpi/apei/cper.c linux-3.0.4/drivers/acpi/apei/cper.c
21856 --- linux-3.0.4/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
21857 +++ linux-3.0.4/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
21858 @@ -38,12 +38,12 @@
21859 */
21860 u64 cper_next_record_id(void)
21861 {
21862 - static atomic64_t seq;
21863 + static atomic64_unchecked_t seq;
21864
21865 - if (!atomic64_read(&seq))
21866 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
21867 + if (!atomic64_read_unchecked(&seq))
21868 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
21869
21870 - return atomic64_inc_return(&seq);
21871 + return atomic64_inc_return_unchecked(&seq);
21872 }
21873 EXPORT_SYMBOL_GPL(cper_next_record_id);
21874
21875 diff -urNp linux-3.0.4/drivers/acpi/ec_sys.c linux-3.0.4/drivers/acpi/ec_sys.c
21876 --- linux-3.0.4/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
21877 +++ linux-3.0.4/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
21878 @@ -11,6 +11,7 @@
21879 #include <linux/kernel.h>
21880 #include <linux/acpi.h>
21881 #include <linux/debugfs.h>
21882 +#include <asm/uaccess.h>
21883 #include "internal.h"
21884
21885 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
21886 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
21887 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
21888 */
21889 unsigned int size = EC_SPACE_SIZE;
21890 - u8 *data = (u8 *) buf;
21891 + u8 data;
21892 loff_t init_off = *off;
21893 int err = 0;
21894
21895 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
21896 size = count;
21897
21898 while (size) {
21899 - err = ec_read(*off, &data[*off - init_off]);
21900 + err = ec_read(*off, &data);
21901 if (err)
21902 return err;
21903 + if (put_user(data, &buf[*off - init_off]))
21904 + return -EFAULT;
21905 *off += 1;
21906 size--;
21907 }
21908 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
21909
21910 unsigned int size = count;
21911 loff_t init_off = *off;
21912 - u8 *data = (u8 *) buf;
21913 int err = 0;
21914
21915 if (*off >= EC_SPACE_SIZE)
21916 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
21917 }
21918
21919 while (size) {
21920 - u8 byte_write = data[*off - init_off];
21921 + u8 byte_write;
21922 + if (get_user(byte_write, &buf[*off - init_off]))
21923 + return -EFAULT;
21924 err = ec_write(*off, byte_write);
21925 if (err)
21926 return err;
21927 diff -urNp linux-3.0.4/drivers/acpi/proc.c linux-3.0.4/drivers/acpi/proc.c
21928 --- linux-3.0.4/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
21929 +++ linux-3.0.4/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
21930 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
21931 size_t count, loff_t * ppos)
21932 {
21933 struct list_head *node, *next;
21934 - char strbuf[5];
21935 - char str[5] = "";
21936 - unsigned int len = count;
21937 -
21938 - if (len > 4)
21939 - len = 4;
21940 - if (len < 0)
21941 - return -EFAULT;
21942 + char strbuf[5] = {0};
21943
21944 - if (copy_from_user(strbuf, buffer, len))
21945 + if (count > 4)
21946 + count = 4;
21947 + if (copy_from_user(strbuf, buffer, count))
21948 return -EFAULT;
21949 - strbuf[len] = '\0';
21950 - sscanf(strbuf, "%s", str);
21951 + strbuf[count] = '\0';
21952
21953 mutex_lock(&acpi_device_lock);
21954 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
21955 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
21956 if (!dev->wakeup.flags.valid)
21957 continue;
21958
21959 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
21960 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
21961 if (device_can_wakeup(&dev->dev)) {
21962 bool enable = !device_may_wakeup(&dev->dev);
21963 device_set_wakeup_enable(&dev->dev, enable);
21964 diff -urNp linux-3.0.4/drivers/acpi/processor_driver.c linux-3.0.4/drivers/acpi/processor_driver.c
21965 --- linux-3.0.4/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
21966 +++ linux-3.0.4/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
21967 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
21968 return 0;
21969 #endif
21970
21971 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
21972 + BUG_ON(pr->id >= nr_cpu_ids);
21973
21974 /*
21975 * Buggy BIOS check
21976 diff -urNp linux-3.0.4/drivers/ata/libata-core.c linux-3.0.4/drivers/ata/libata-core.c
21977 --- linux-3.0.4/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
21978 +++ linux-3.0.4/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
21979 @@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
21980 struct ata_port *ap;
21981 unsigned int tag;
21982
21983 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21984 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21985 ap = qc->ap;
21986
21987 qc->flags = 0;
21988 @@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
21989 struct ata_port *ap;
21990 struct ata_link *link;
21991
21992 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21993 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21994 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
21995 ap = qc->ap;
21996 link = qc->dev->link;
21997 @@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
21998 return;
21999
22000 spin_lock(&lock);
22001 + pax_open_kernel();
22002
22003 for (cur = ops->inherits; cur; cur = cur->inherits) {
22004 void **inherit = (void **)cur;
22005 @@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
22006 if (IS_ERR(*pp))
22007 *pp = NULL;
22008
22009 - ops->inherits = NULL;
22010 + *(struct ata_port_operations **)&ops->inherits = NULL;
22011
22012 + pax_close_kernel();
22013 spin_unlock(&lock);
22014 }
22015
22016 diff -urNp linux-3.0.4/drivers/ata/libata-eh.c linux-3.0.4/drivers/ata/libata-eh.c
22017 --- linux-3.0.4/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22018 +++ linux-3.0.4/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22019 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22020 {
22021 struct ata_link *link;
22022
22023 + pax_track_stack();
22024 +
22025 ata_for_each_link(link, ap, HOST_FIRST)
22026 ata_eh_link_report(link);
22027 }
22028 diff -urNp linux-3.0.4/drivers/ata/pata_arasan_cf.c linux-3.0.4/drivers/ata/pata_arasan_cf.c
22029 --- linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22030 +++ linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22031 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22032 /* Handle platform specific quirks */
22033 if (pdata->quirk) {
22034 if (pdata->quirk & CF_BROKEN_PIO) {
22035 - ap->ops->set_piomode = NULL;
22036 + pax_open_kernel();
22037 + *(void **)&ap->ops->set_piomode = NULL;
22038 + pax_close_kernel();
22039 ap->pio_mask = 0;
22040 }
22041 if (pdata->quirk & CF_BROKEN_MWDMA)
22042 diff -urNp linux-3.0.4/drivers/atm/adummy.c linux-3.0.4/drivers/atm/adummy.c
22043 --- linux-3.0.4/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22044 +++ linux-3.0.4/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22045 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22046 vcc->pop(vcc, skb);
22047 else
22048 dev_kfree_skb_any(skb);
22049 - atomic_inc(&vcc->stats->tx);
22050 + atomic_inc_unchecked(&vcc->stats->tx);
22051
22052 return 0;
22053 }
22054 diff -urNp linux-3.0.4/drivers/atm/ambassador.c linux-3.0.4/drivers/atm/ambassador.c
22055 --- linux-3.0.4/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22056 +++ linux-3.0.4/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22057 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22058 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22059
22060 // VC layer stats
22061 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22062 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22063
22064 // free the descriptor
22065 kfree (tx_descr);
22066 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22067 dump_skb ("<<<", vc, skb);
22068
22069 // VC layer stats
22070 - atomic_inc(&atm_vcc->stats->rx);
22071 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22072 __net_timestamp(skb);
22073 // end of our responsibility
22074 atm_vcc->push (atm_vcc, skb);
22075 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22076 } else {
22077 PRINTK (KERN_INFO, "dropped over-size frame");
22078 // should we count this?
22079 - atomic_inc(&atm_vcc->stats->rx_drop);
22080 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22081 }
22082
22083 } else {
22084 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22085 }
22086
22087 if (check_area (skb->data, skb->len)) {
22088 - atomic_inc(&atm_vcc->stats->tx_err);
22089 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22090 return -ENOMEM; // ?
22091 }
22092
22093 diff -urNp linux-3.0.4/drivers/atm/atmtcp.c linux-3.0.4/drivers/atm/atmtcp.c
22094 --- linux-3.0.4/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22095 +++ linux-3.0.4/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22096 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22097 if (vcc->pop) vcc->pop(vcc,skb);
22098 else dev_kfree_skb(skb);
22099 if (dev_data) return 0;
22100 - atomic_inc(&vcc->stats->tx_err);
22101 + atomic_inc_unchecked(&vcc->stats->tx_err);
22102 return -ENOLINK;
22103 }
22104 size = skb->len+sizeof(struct atmtcp_hdr);
22105 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22106 if (!new_skb) {
22107 if (vcc->pop) vcc->pop(vcc,skb);
22108 else dev_kfree_skb(skb);
22109 - atomic_inc(&vcc->stats->tx_err);
22110 + atomic_inc_unchecked(&vcc->stats->tx_err);
22111 return -ENOBUFS;
22112 }
22113 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22114 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22115 if (vcc->pop) vcc->pop(vcc,skb);
22116 else dev_kfree_skb(skb);
22117 out_vcc->push(out_vcc,new_skb);
22118 - atomic_inc(&vcc->stats->tx);
22119 - atomic_inc(&out_vcc->stats->rx);
22120 + atomic_inc_unchecked(&vcc->stats->tx);
22121 + atomic_inc_unchecked(&out_vcc->stats->rx);
22122 return 0;
22123 }
22124
22125 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22126 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22127 read_unlock(&vcc_sklist_lock);
22128 if (!out_vcc) {
22129 - atomic_inc(&vcc->stats->tx_err);
22130 + atomic_inc_unchecked(&vcc->stats->tx_err);
22131 goto done;
22132 }
22133 skb_pull(skb,sizeof(struct atmtcp_hdr));
22134 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22135 __net_timestamp(new_skb);
22136 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22137 out_vcc->push(out_vcc,new_skb);
22138 - atomic_inc(&vcc->stats->tx);
22139 - atomic_inc(&out_vcc->stats->rx);
22140 + atomic_inc_unchecked(&vcc->stats->tx);
22141 + atomic_inc_unchecked(&out_vcc->stats->rx);
22142 done:
22143 if (vcc->pop) vcc->pop(vcc,skb);
22144 else dev_kfree_skb(skb);
22145 diff -urNp linux-3.0.4/drivers/atm/eni.c linux-3.0.4/drivers/atm/eni.c
22146 --- linux-3.0.4/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22147 +++ linux-3.0.4/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22148 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22149 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22150 vcc->dev->number);
22151 length = 0;
22152 - atomic_inc(&vcc->stats->rx_err);
22153 + atomic_inc_unchecked(&vcc->stats->rx_err);
22154 }
22155 else {
22156 length = ATM_CELL_SIZE-1; /* no HEC */
22157 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22158 size);
22159 }
22160 eff = length = 0;
22161 - atomic_inc(&vcc->stats->rx_err);
22162 + atomic_inc_unchecked(&vcc->stats->rx_err);
22163 }
22164 else {
22165 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22166 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22167 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22168 vcc->dev->number,vcc->vci,length,size << 2,descr);
22169 length = eff = 0;
22170 - atomic_inc(&vcc->stats->rx_err);
22171 + atomic_inc_unchecked(&vcc->stats->rx_err);
22172 }
22173 }
22174 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22175 @@ -771,7 +771,7 @@ rx_dequeued++;
22176 vcc->push(vcc,skb);
22177 pushed++;
22178 }
22179 - atomic_inc(&vcc->stats->rx);
22180 + atomic_inc_unchecked(&vcc->stats->rx);
22181 }
22182 wake_up(&eni_dev->rx_wait);
22183 }
22184 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22185 PCI_DMA_TODEVICE);
22186 if (vcc->pop) vcc->pop(vcc,skb);
22187 else dev_kfree_skb_irq(skb);
22188 - atomic_inc(&vcc->stats->tx);
22189 + atomic_inc_unchecked(&vcc->stats->tx);
22190 wake_up(&eni_dev->tx_wait);
22191 dma_complete++;
22192 }
22193 diff -urNp linux-3.0.4/drivers/atm/firestream.c linux-3.0.4/drivers/atm/firestream.c
22194 --- linux-3.0.4/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22195 +++ linux-3.0.4/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22196 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22197 }
22198 }
22199
22200 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22201 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22202
22203 fs_dprintk (FS_DEBUG_TXMEM, "i");
22204 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22205 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22206 #endif
22207 skb_put (skb, qe->p1 & 0xffff);
22208 ATM_SKB(skb)->vcc = atm_vcc;
22209 - atomic_inc(&atm_vcc->stats->rx);
22210 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22211 __net_timestamp(skb);
22212 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22213 atm_vcc->push (atm_vcc, skb);
22214 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22215 kfree (pe);
22216 }
22217 if (atm_vcc)
22218 - atomic_inc(&atm_vcc->stats->rx_drop);
22219 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22220 break;
22221 case 0x1f: /* Reassembly abort: no buffers. */
22222 /* Silently increment error counter. */
22223 if (atm_vcc)
22224 - atomic_inc(&atm_vcc->stats->rx_drop);
22225 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22226 break;
22227 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22228 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22229 diff -urNp linux-3.0.4/drivers/atm/fore200e.c linux-3.0.4/drivers/atm/fore200e.c
22230 --- linux-3.0.4/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22231 +++ linux-3.0.4/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22232 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22233 #endif
22234 /* check error condition */
22235 if (*entry->status & STATUS_ERROR)
22236 - atomic_inc(&vcc->stats->tx_err);
22237 + atomic_inc_unchecked(&vcc->stats->tx_err);
22238 else
22239 - atomic_inc(&vcc->stats->tx);
22240 + atomic_inc_unchecked(&vcc->stats->tx);
22241 }
22242 }
22243
22244 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22245 if (skb == NULL) {
22246 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22247
22248 - atomic_inc(&vcc->stats->rx_drop);
22249 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22250 return -ENOMEM;
22251 }
22252
22253 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22254
22255 dev_kfree_skb_any(skb);
22256
22257 - atomic_inc(&vcc->stats->rx_drop);
22258 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22259 return -ENOMEM;
22260 }
22261
22262 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22263
22264 vcc->push(vcc, skb);
22265 - atomic_inc(&vcc->stats->rx);
22266 + atomic_inc_unchecked(&vcc->stats->rx);
22267
22268 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22269
22270 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22271 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22272 fore200e->atm_dev->number,
22273 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22274 - atomic_inc(&vcc->stats->rx_err);
22275 + atomic_inc_unchecked(&vcc->stats->rx_err);
22276 }
22277 }
22278
22279 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22280 goto retry_here;
22281 }
22282
22283 - atomic_inc(&vcc->stats->tx_err);
22284 + atomic_inc_unchecked(&vcc->stats->tx_err);
22285
22286 fore200e->tx_sat++;
22287 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22288 diff -urNp linux-3.0.4/drivers/atm/he.c linux-3.0.4/drivers/atm/he.c
22289 --- linux-3.0.4/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22290 +++ linux-3.0.4/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22291 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22292
22293 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22294 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22295 - atomic_inc(&vcc->stats->rx_drop);
22296 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22297 goto return_host_buffers;
22298 }
22299
22300 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22301 RBRQ_LEN_ERR(he_dev->rbrq_head)
22302 ? "LEN_ERR" : "",
22303 vcc->vpi, vcc->vci);
22304 - atomic_inc(&vcc->stats->rx_err);
22305 + atomic_inc_unchecked(&vcc->stats->rx_err);
22306 goto return_host_buffers;
22307 }
22308
22309 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22310 vcc->push(vcc, skb);
22311 spin_lock(&he_dev->global_lock);
22312
22313 - atomic_inc(&vcc->stats->rx);
22314 + atomic_inc_unchecked(&vcc->stats->rx);
22315
22316 return_host_buffers:
22317 ++pdus_assembled;
22318 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22319 tpd->vcc->pop(tpd->vcc, tpd->skb);
22320 else
22321 dev_kfree_skb_any(tpd->skb);
22322 - atomic_inc(&tpd->vcc->stats->tx_err);
22323 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22324 }
22325 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22326 return;
22327 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22328 vcc->pop(vcc, skb);
22329 else
22330 dev_kfree_skb_any(skb);
22331 - atomic_inc(&vcc->stats->tx_err);
22332 + atomic_inc_unchecked(&vcc->stats->tx_err);
22333 return -EINVAL;
22334 }
22335
22336 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22337 vcc->pop(vcc, skb);
22338 else
22339 dev_kfree_skb_any(skb);
22340 - atomic_inc(&vcc->stats->tx_err);
22341 + atomic_inc_unchecked(&vcc->stats->tx_err);
22342 return -EINVAL;
22343 }
22344 #endif
22345 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22346 vcc->pop(vcc, skb);
22347 else
22348 dev_kfree_skb_any(skb);
22349 - atomic_inc(&vcc->stats->tx_err);
22350 + atomic_inc_unchecked(&vcc->stats->tx_err);
22351 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22352 return -ENOMEM;
22353 }
22354 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22355 vcc->pop(vcc, skb);
22356 else
22357 dev_kfree_skb_any(skb);
22358 - atomic_inc(&vcc->stats->tx_err);
22359 + atomic_inc_unchecked(&vcc->stats->tx_err);
22360 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22361 return -ENOMEM;
22362 }
22363 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22364 __enqueue_tpd(he_dev, tpd, cid);
22365 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22366
22367 - atomic_inc(&vcc->stats->tx);
22368 + atomic_inc_unchecked(&vcc->stats->tx);
22369
22370 return 0;
22371 }
22372 diff -urNp linux-3.0.4/drivers/atm/horizon.c linux-3.0.4/drivers/atm/horizon.c
22373 --- linux-3.0.4/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22374 +++ linux-3.0.4/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22375 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22376 {
22377 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22378 // VC layer stats
22379 - atomic_inc(&vcc->stats->rx);
22380 + atomic_inc_unchecked(&vcc->stats->rx);
22381 __net_timestamp(skb);
22382 // end of our responsibility
22383 vcc->push (vcc, skb);
22384 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22385 dev->tx_iovec = NULL;
22386
22387 // VC layer stats
22388 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22389 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22390
22391 // free the skb
22392 hrz_kfree_skb (skb);
22393 diff -urNp linux-3.0.4/drivers/atm/idt77252.c linux-3.0.4/drivers/atm/idt77252.c
22394 --- linux-3.0.4/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22395 +++ linux-3.0.4/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22396 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22397 else
22398 dev_kfree_skb(skb);
22399
22400 - atomic_inc(&vcc->stats->tx);
22401 + atomic_inc_unchecked(&vcc->stats->tx);
22402 }
22403
22404 atomic_dec(&scq->used);
22405 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22406 if ((sb = dev_alloc_skb(64)) == NULL) {
22407 printk("%s: Can't allocate buffers for aal0.\n",
22408 card->name);
22409 - atomic_add(i, &vcc->stats->rx_drop);
22410 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22411 break;
22412 }
22413 if (!atm_charge(vcc, sb->truesize)) {
22414 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22415 card->name);
22416 - atomic_add(i - 1, &vcc->stats->rx_drop);
22417 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22418 dev_kfree_skb(sb);
22419 break;
22420 }
22421 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22422 ATM_SKB(sb)->vcc = vcc;
22423 __net_timestamp(sb);
22424 vcc->push(vcc, sb);
22425 - atomic_inc(&vcc->stats->rx);
22426 + atomic_inc_unchecked(&vcc->stats->rx);
22427
22428 cell += ATM_CELL_PAYLOAD;
22429 }
22430 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22431 "(CDC: %08x)\n",
22432 card->name, len, rpp->len, readl(SAR_REG_CDC));
22433 recycle_rx_pool_skb(card, rpp);
22434 - atomic_inc(&vcc->stats->rx_err);
22435 + atomic_inc_unchecked(&vcc->stats->rx_err);
22436 return;
22437 }
22438 if (stat & SAR_RSQE_CRC) {
22439 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22440 recycle_rx_pool_skb(card, rpp);
22441 - atomic_inc(&vcc->stats->rx_err);
22442 + atomic_inc_unchecked(&vcc->stats->rx_err);
22443 return;
22444 }
22445 if (skb_queue_len(&rpp->queue) > 1) {
22446 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22447 RXPRINTK("%s: Can't alloc RX skb.\n",
22448 card->name);
22449 recycle_rx_pool_skb(card, rpp);
22450 - atomic_inc(&vcc->stats->rx_err);
22451 + atomic_inc_unchecked(&vcc->stats->rx_err);
22452 return;
22453 }
22454 if (!atm_charge(vcc, skb->truesize)) {
22455 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22456 __net_timestamp(skb);
22457
22458 vcc->push(vcc, skb);
22459 - atomic_inc(&vcc->stats->rx);
22460 + atomic_inc_unchecked(&vcc->stats->rx);
22461
22462 return;
22463 }
22464 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22465 __net_timestamp(skb);
22466
22467 vcc->push(vcc, skb);
22468 - atomic_inc(&vcc->stats->rx);
22469 + atomic_inc_unchecked(&vcc->stats->rx);
22470
22471 if (skb->truesize > SAR_FB_SIZE_3)
22472 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22473 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22474 if (vcc->qos.aal != ATM_AAL0) {
22475 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22476 card->name, vpi, vci);
22477 - atomic_inc(&vcc->stats->rx_drop);
22478 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22479 goto drop;
22480 }
22481
22482 if ((sb = dev_alloc_skb(64)) == NULL) {
22483 printk("%s: Can't allocate buffers for AAL0.\n",
22484 card->name);
22485 - atomic_inc(&vcc->stats->rx_err);
22486 + atomic_inc_unchecked(&vcc->stats->rx_err);
22487 goto drop;
22488 }
22489
22490 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22491 ATM_SKB(sb)->vcc = vcc;
22492 __net_timestamp(sb);
22493 vcc->push(vcc, sb);
22494 - atomic_inc(&vcc->stats->rx);
22495 + atomic_inc_unchecked(&vcc->stats->rx);
22496
22497 drop:
22498 skb_pull(queue, 64);
22499 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22500
22501 if (vc == NULL) {
22502 printk("%s: NULL connection in send().\n", card->name);
22503 - atomic_inc(&vcc->stats->tx_err);
22504 + atomic_inc_unchecked(&vcc->stats->tx_err);
22505 dev_kfree_skb(skb);
22506 return -EINVAL;
22507 }
22508 if (!test_bit(VCF_TX, &vc->flags)) {
22509 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22510 - atomic_inc(&vcc->stats->tx_err);
22511 + atomic_inc_unchecked(&vcc->stats->tx_err);
22512 dev_kfree_skb(skb);
22513 return -EINVAL;
22514 }
22515 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22516 break;
22517 default:
22518 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22519 - atomic_inc(&vcc->stats->tx_err);
22520 + atomic_inc_unchecked(&vcc->stats->tx_err);
22521 dev_kfree_skb(skb);
22522 return -EINVAL;
22523 }
22524
22525 if (skb_shinfo(skb)->nr_frags != 0) {
22526 printk("%s: No scatter-gather yet.\n", card->name);
22527 - atomic_inc(&vcc->stats->tx_err);
22528 + atomic_inc_unchecked(&vcc->stats->tx_err);
22529 dev_kfree_skb(skb);
22530 return -EINVAL;
22531 }
22532 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22533
22534 err = queue_skb(card, vc, skb, oam);
22535 if (err) {
22536 - atomic_inc(&vcc->stats->tx_err);
22537 + atomic_inc_unchecked(&vcc->stats->tx_err);
22538 dev_kfree_skb(skb);
22539 return err;
22540 }
22541 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22542 skb = dev_alloc_skb(64);
22543 if (!skb) {
22544 printk("%s: Out of memory in send_oam().\n", card->name);
22545 - atomic_inc(&vcc->stats->tx_err);
22546 + atomic_inc_unchecked(&vcc->stats->tx_err);
22547 return -ENOMEM;
22548 }
22549 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22550 diff -urNp linux-3.0.4/drivers/atm/iphase.c linux-3.0.4/drivers/atm/iphase.c
22551 --- linux-3.0.4/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
22552 +++ linux-3.0.4/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
22553 @@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
22554 status = (u_short) (buf_desc_ptr->desc_mode);
22555 if (status & (RX_CER | RX_PTE | RX_OFL))
22556 {
22557 - atomic_inc(&vcc->stats->rx_err);
22558 + atomic_inc_unchecked(&vcc->stats->rx_err);
22559 IF_ERR(printk("IA: bad packet, dropping it");)
22560 if (status & RX_CER) {
22561 IF_ERR(printk(" cause: packet CRC error\n");)
22562 @@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
22563 len = dma_addr - buf_addr;
22564 if (len > iadev->rx_buf_sz) {
22565 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22566 - atomic_inc(&vcc->stats->rx_err);
22567 + atomic_inc_unchecked(&vcc->stats->rx_err);
22568 goto out_free_desc;
22569 }
22570
22571 @@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
22572 ia_vcc = INPH_IA_VCC(vcc);
22573 if (ia_vcc == NULL)
22574 {
22575 - atomic_inc(&vcc->stats->rx_err);
22576 + atomic_inc_unchecked(&vcc->stats->rx_err);
22577 dev_kfree_skb_any(skb);
22578 atm_return(vcc, atm_guess_pdu2truesize(len));
22579 goto INCR_DLE;
22580 @@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
22581 if ((length > iadev->rx_buf_sz) || (length >
22582 (skb->len - sizeof(struct cpcs_trailer))))
22583 {
22584 - atomic_inc(&vcc->stats->rx_err);
22585 + atomic_inc_unchecked(&vcc->stats->rx_err);
22586 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22587 length, skb->len);)
22588 dev_kfree_skb_any(skb);
22589 @@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
22590
22591 IF_RX(printk("rx_dle_intr: skb push");)
22592 vcc->push(vcc,skb);
22593 - atomic_inc(&vcc->stats->rx);
22594 + atomic_inc_unchecked(&vcc->stats->rx);
22595 iadev->rx_pkt_cnt++;
22596 }
22597 INCR_DLE:
22598 @@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
22599 {
22600 struct k_sonet_stats *stats;
22601 stats = &PRIV(_ia_dev[board])->sonet_stats;
22602 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22603 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22604 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22605 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22606 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22607 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22608 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22609 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22610 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22611 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22612 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22613 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22614 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22615 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22616 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22617 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22618 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22619 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22620 }
22621 ia_cmds.status = 0;
22622 break;
22623 @@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22624 if ((desc == 0) || (desc > iadev->num_tx_desc))
22625 {
22626 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22627 - atomic_inc(&vcc->stats->tx);
22628 + atomic_inc_unchecked(&vcc->stats->tx);
22629 if (vcc->pop)
22630 vcc->pop(vcc, skb);
22631 else
22632 @@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22633 ATM_DESC(skb) = vcc->vci;
22634 skb_queue_tail(&iadev->tx_dma_q, skb);
22635
22636 - atomic_inc(&vcc->stats->tx);
22637 + atomic_inc_unchecked(&vcc->stats->tx);
22638 iadev->tx_pkt_cnt++;
22639 /* Increment transaction counter */
22640 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22641
22642 #if 0
22643 /* add flow control logic */
22644 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22645 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22646 if (iavcc->vc_desc_cnt > 10) {
22647 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22648 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22649 diff -urNp linux-3.0.4/drivers/atm/lanai.c linux-3.0.4/drivers/atm/lanai.c
22650 --- linux-3.0.4/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
22651 +++ linux-3.0.4/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
22652 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22653 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22654 lanai_endtx(lanai, lvcc);
22655 lanai_free_skb(lvcc->tx.atmvcc, skb);
22656 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22657 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22658 }
22659
22660 /* Try to fill the buffer - don't call unless there is backlog */
22661 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22662 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22663 __net_timestamp(skb);
22664 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22665 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22666 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22667 out:
22668 lvcc->rx.buf.ptr = end;
22669 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22670 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22671 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22672 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22673 lanai->stats.service_rxnotaal5++;
22674 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22675 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22676 return 0;
22677 }
22678 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22679 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22680 int bytes;
22681 read_unlock(&vcc_sklist_lock);
22682 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22683 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22684 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22685 lvcc->stats.x.aal5.service_trash++;
22686 bytes = (SERVICE_GET_END(s) * 16) -
22687 (((unsigned long) lvcc->rx.buf.ptr) -
22688 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22689 }
22690 if (s & SERVICE_STREAM) {
22691 read_unlock(&vcc_sklist_lock);
22692 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22693 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22694 lvcc->stats.x.aal5.service_stream++;
22695 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22696 "PDU on VCI %d!\n", lanai->number, vci);
22697 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22698 return 0;
22699 }
22700 DPRINTK("got rx crc error on vci %d\n", vci);
22701 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22702 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22703 lvcc->stats.x.aal5.service_rxcrc++;
22704 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22705 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22706 diff -urNp linux-3.0.4/drivers/atm/nicstar.c linux-3.0.4/drivers/atm/nicstar.c
22707 --- linux-3.0.4/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
22708 +++ linux-3.0.4/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
22709 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22710 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22711 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22712 card->index);
22713 - atomic_inc(&vcc->stats->tx_err);
22714 + atomic_inc_unchecked(&vcc->stats->tx_err);
22715 dev_kfree_skb_any(skb);
22716 return -EINVAL;
22717 }
22718 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22719 if (!vc->tx) {
22720 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22721 card->index);
22722 - atomic_inc(&vcc->stats->tx_err);
22723 + atomic_inc_unchecked(&vcc->stats->tx_err);
22724 dev_kfree_skb_any(skb);
22725 return -EINVAL;
22726 }
22727 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22728 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22729 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22730 card->index);
22731 - atomic_inc(&vcc->stats->tx_err);
22732 + atomic_inc_unchecked(&vcc->stats->tx_err);
22733 dev_kfree_skb_any(skb);
22734 return -EINVAL;
22735 }
22736
22737 if (skb_shinfo(skb)->nr_frags != 0) {
22738 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22739 - atomic_inc(&vcc->stats->tx_err);
22740 + atomic_inc_unchecked(&vcc->stats->tx_err);
22741 dev_kfree_skb_any(skb);
22742 return -EINVAL;
22743 }
22744 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22745 }
22746
22747 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22748 - atomic_inc(&vcc->stats->tx_err);
22749 + atomic_inc_unchecked(&vcc->stats->tx_err);
22750 dev_kfree_skb_any(skb);
22751 return -EIO;
22752 }
22753 - atomic_inc(&vcc->stats->tx);
22754 + atomic_inc_unchecked(&vcc->stats->tx);
22755
22756 return 0;
22757 }
22758 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22759 printk
22760 ("nicstar%d: Can't allocate buffers for aal0.\n",
22761 card->index);
22762 - atomic_add(i, &vcc->stats->rx_drop);
22763 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22764 break;
22765 }
22766 if (!atm_charge(vcc, sb->truesize)) {
22767 RXPRINTK
22768 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22769 card->index);
22770 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22771 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22772 dev_kfree_skb_any(sb);
22773 break;
22774 }
22775 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22776 ATM_SKB(sb)->vcc = vcc;
22777 __net_timestamp(sb);
22778 vcc->push(vcc, sb);
22779 - atomic_inc(&vcc->stats->rx);
22780 + atomic_inc_unchecked(&vcc->stats->rx);
22781 cell += ATM_CELL_PAYLOAD;
22782 }
22783
22784 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22785 if (iovb == NULL) {
22786 printk("nicstar%d: Out of iovec buffers.\n",
22787 card->index);
22788 - atomic_inc(&vcc->stats->rx_drop);
22789 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22790 recycle_rx_buf(card, skb);
22791 return;
22792 }
22793 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22794 small or large buffer itself. */
22795 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22796 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22797 - atomic_inc(&vcc->stats->rx_err);
22798 + atomic_inc_unchecked(&vcc->stats->rx_err);
22799 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22800 NS_MAX_IOVECS);
22801 NS_PRV_IOVCNT(iovb) = 0;
22802 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22803 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22804 card->index);
22805 which_list(card, skb);
22806 - atomic_inc(&vcc->stats->rx_err);
22807 + atomic_inc_unchecked(&vcc->stats->rx_err);
22808 recycle_rx_buf(card, skb);
22809 vc->rx_iov = NULL;
22810 recycle_iov_buf(card, iovb);
22811 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22812 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22813 card->index);
22814 which_list(card, skb);
22815 - atomic_inc(&vcc->stats->rx_err);
22816 + atomic_inc_unchecked(&vcc->stats->rx_err);
22817 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22818 NS_PRV_IOVCNT(iovb));
22819 vc->rx_iov = NULL;
22820 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22821 printk(" - PDU size mismatch.\n");
22822 else
22823 printk(".\n");
22824 - atomic_inc(&vcc->stats->rx_err);
22825 + atomic_inc_unchecked(&vcc->stats->rx_err);
22826 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22827 NS_PRV_IOVCNT(iovb));
22828 vc->rx_iov = NULL;
22829 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22830 /* skb points to a small buffer */
22831 if (!atm_charge(vcc, skb->truesize)) {
22832 push_rxbufs(card, skb);
22833 - atomic_inc(&vcc->stats->rx_drop);
22834 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22835 } else {
22836 skb_put(skb, len);
22837 dequeue_sm_buf(card, skb);
22838 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22839 ATM_SKB(skb)->vcc = vcc;
22840 __net_timestamp(skb);
22841 vcc->push(vcc, skb);
22842 - atomic_inc(&vcc->stats->rx);
22843 + atomic_inc_unchecked(&vcc->stats->rx);
22844 }
22845 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22846 struct sk_buff *sb;
22847 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22848 if (len <= NS_SMBUFSIZE) {
22849 if (!atm_charge(vcc, sb->truesize)) {
22850 push_rxbufs(card, sb);
22851 - atomic_inc(&vcc->stats->rx_drop);
22852 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22853 } else {
22854 skb_put(sb, len);
22855 dequeue_sm_buf(card, sb);
22856 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
22857 ATM_SKB(sb)->vcc = vcc;
22858 __net_timestamp(sb);
22859 vcc->push(vcc, sb);
22860 - atomic_inc(&vcc->stats->rx);
22861 + atomic_inc_unchecked(&vcc->stats->rx);
22862 }
22863
22864 push_rxbufs(card, skb);
22865 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
22866
22867 if (!atm_charge(vcc, skb->truesize)) {
22868 push_rxbufs(card, skb);
22869 - atomic_inc(&vcc->stats->rx_drop);
22870 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22871 } else {
22872 dequeue_lg_buf(card, skb);
22873 #ifdef NS_USE_DESTRUCTORS
22874 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
22875 ATM_SKB(skb)->vcc = vcc;
22876 __net_timestamp(skb);
22877 vcc->push(vcc, skb);
22878 - atomic_inc(&vcc->stats->rx);
22879 + atomic_inc_unchecked(&vcc->stats->rx);
22880 }
22881
22882 push_rxbufs(card, sb);
22883 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
22884 printk
22885 ("nicstar%d: Out of huge buffers.\n",
22886 card->index);
22887 - atomic_inc(&vcc->stats->rx_drop);
22888 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22889 recycle_iovec_rx_bufs(card,
22890 (struct iovec *)
22891 iovb->data,
22892 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
22893 card->hbpool.count++;
22894 } else
22895 dev_kfree_skb_any(hb);
22896 - atomic_inc(&vcc->stats->rx_drop);
22897 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22898 } else {
22899 /* Copy the small buffer to the huge buffer */
22900 sb = (struct sk_buff *)iov->iov_base;
22901 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
22902 #endif /* NS_USE_DESTRUCTORS */
22903 __net_timestamp(hb);
22904 vcc->push(vcc, hb);
22905 - atomic_inc(&vcc->stats->rx);
22906 + atomic_inc_unchecked(&vcc->stats->rx);
22907 }
22908 }
22909
22910 diff -urNp linux-3.0.4/drivers/atm/solos-pci.c linux-3.0.4/drivers/atm/solos-pci.c
22911 --- linux-3.0.4/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
22912 +++ linux-3.0.4/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
22913 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
22914 }
22915 atm_charge(vcc, skb->truesize);
22916 vcc->push(vcc, skb);
22917 - atomic_inc(&vcc->stats->rx);
22918 + atomic_inc_unchecked(&vcc->stats->rx);
22919 break;
22920
22921 case PKT_STATUS:
22922 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
22923 char msg[500];
22924 char item[10];
22925
22926 + pax_track_stack();
22927 +
22928 len = buf->len;
22929 for (i = 0; i < len; i++){
22930 if(i % 8 == 0)
22931 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
22932 vcc = SKB_CB(oldskb)->vcc;
22933
22934 if (vcc) {
22935 - atomic_inc(&vcc->stats->tx);
22936 + atomic_inc_unchecked(&vcc->stats->tx);
22937 solos_pop(vcc, oldskb);
22938 } else
22939 dev_kfree_skb_irq(oldskb);
22940 diff -urNp linux-3.0.4/drivers/atm/suni.c linux-3.0.4/drivers/atm/suni.c
22941 --- linux-3.0.4/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
22942 +++ linux-3.0.4/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
22943 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22944
22945
22946 #define ADD_LIMITED(s,v) \
22947 - atomic_add((v),&stats->s); \
22948 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22949 + atomic_add_unchecked((v),&stats->s); \
22950 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22951
22952
22953 static void suni_hz(unsigned long from_timer)
22954 diff -urNp linux-3.0.4/drivers/atm/uPD98402.c linux-3.0.4/drivers/atm/uPD98402.c
22955 --- linux-3.0.4/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
22956 +++ linux-3.0.4/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
22957 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22958 struct sonet_stats tmp;
22959 int error = 0;
22960
22961 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22962 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22963 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22964 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22965 if (zero && !error) {
22966 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22967
22968
22969 #define ADD_LIMITED(s,v) \
22970 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22971 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22972 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22973 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22974 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22975 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22976
22977
22978 static void stat_event(struct atm_dev *dev)
22979 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
22980 if (reason & uPD98402_INT_PFM) stat_event(dev);
22981 if (reason & uPD98402_INT_PCO) {
22982 (void) GET(PCOCR); /* clear interrupt cause */
22983 - atomic_add(GET(HECCT),
22984 + atomic_add_unchecked(GET(HECCT),
22985 &PRIV(dev)->sonet_stats.uncorr_hcs);
22986 }
22987 if ((reason & uPD98402_INT_RFO) &&
22988 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22989 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22990 uPD98402_INT_LOS),PIMR); /* enable them */
22991 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
22992 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22993 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
22994 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
22995 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22996 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
22997 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
22998 return 0;
22999 }
23000
23001 diff -urNp linux-3.0.4/drivers/atm/zatm.c linux-3.0.4/drivers/atm/zatm.c
23002 --- linux-3.0.4/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
23003 +++ linux-3.0.4/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
23004 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23005 }
23006 if (!size) {
23007 dev_kfree_skb_irq(skb);
23008 - if (vcc) atomic_inc(&vcc->stats->rx_err);
23009 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23010 continue;
23011 }
23012 if (!atm_charge(vcc,skb->truesize)) {
23013 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23014 skb->len = size;
23015 ATM_SKB(skb)->vcc = vcc;
23016 vcc->push(vcc,skb);
23017 - atomic_inc(&vcc->stats->rx);
23018 + atomic_inc_unchecked(&vcc->stats->rx);
23019 }
23020 zout(pos & 0xffff,MTA(mbx));
23021 #if 0 /* probably a stupid idea */
23022 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23023 skb_queue_head(&zatm_vcc->backlog,skb);
23024 break;
23025 }
23026 - atomic_inc(&vcc->stats->tx);
23027 + atomic_inc_unchecked(&vcc->stats->tx);
23028 wake_up(&zatm_vcc->tx_wait);
23029 }
23030
23031 diff -urNp linux-3.0.4/drivers/base/power/wakeup.c linux-3.0.4/drivers/base/power/wakeup.c
23032 --- linux-3.0.4/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23033 +++ linux-3.0.4/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23034 @@ -29,14 +29,14 @@ bool events_check_enabled;
23035 * They need to be modified together atomically, so it's better to use one
23036 * atomic variable to hold them both.
23037 */
23038 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23039 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23040
23041 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23042 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23043
23044 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23045 {
23046 - unsigned int comb = atomic_read(&combined_event_count);
23047 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
23048
23049 *cnt = (comb >> IN_PROGRESS_BITS);
23050 *inpr = comb & MAX_IN_PROGRESS;
23051 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23052 ws->last_time = ktime_get();
23053
23054 /* Increment the counter of events in progress. */
23055 - atomic_inc(&combined_event_count);
23056 + atomic_inc_unchecked(&combined_event_count);
23057 }
23058
23059 /**
23060 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23061 * Increment the counter of registered wakeup events and decrement the
23062 * couter of wakeup events in progress simultaneously.
23063 */
23064 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23065 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23066 }
23067
23068 /**
23069 diff -urNp linux-3.0.4/drivers/block/cciss.c linux-3.0.4/drivers/block/cciss.c
23070 --- linux-3.0.4/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23071 +++ linux-3.0.4/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23072 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23073 int err;
23074 u32 cp;
23075
23076 + memset(&arg64, 0, sizeof(arg64));
23077 +
23078 err = 0;
23079 err |=
23080 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23081 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23082 while (!list_empty(&h->reqQ)) {
23083 c = list_entry(h->reqQ.next, CommandList_struct, list);
23084 /* can't do anything if fifo is full */
23085 - if ((h->access.fifo_full(h))) {
23086 + if ((h->access->fifo_full(h))) {
23087 dev_warn(&h->pdev->dev, "fifo full\n");
23088 break;
23089 }
23090 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23091 h->Qdepth--;
23092
23093 /* Tell the controller execute command */
23094 - h->access.submit_command(h, c);
23095 + h->access->submit_command(h, c);
23096
23097 /* Put job onto the completed Q */
23098 addQ(&h->cmpQ, c);
23099 @@ -3422,17 +3424,17 @@ startio:
23100
23101 static inline unsigned long get_next_completion(ctlr_info_t *h)
23102 {
23103 - return h->access.command_completed(h);
23104 + return h->access->command_completed(h);
23105 }
23106
23107 static inline int interrupt_pending(ctlr_info_t *h)
23108 {
23109 - return h->access.intr_pending(h);
23110 + return h->access->intr_pending(h);
23111 }
23112
23113 static inline long interrupt_not_for_us(ctlr_info_t *h)
23114 {
23115 - return ((h->access.intr_pending(h) == 0) ||
23116 + return ((h->access->intr_pending(h) == 0) ||
23117 (h->interrupts_enabled == 0));
23118 }
23119
23120 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23121 u32 a;
23122
23123 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23124 - return h->access.command_completed(h);
23125 + return h->access->command_completed(h);
23126
23127 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23128 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23129 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23130 trans_support & CFGTBL_Trans_use_short_tags);
23131
23132 /* Change the access methods to the performant access methods */
23133 - h->access = SA5_performant_access;
23134 + h->access = &SA5_performant_access;
23135 h->transMethod = CFGTBL_Trans_Performant;
23136
23137 return;
23138 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23139 if (prod_index < 0)
23140 return -ENODEV;
23141 h->product_name = products[prod_index].product_name;
23142 - h->access = *(products[prod_index].access);
23143 + h->access = products[prod_index].access;
23144
23145 if (cciss_board_disabled(h)) {
23146 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23147 @@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23148 }
23149
23150 /* make sure the board interrupts are off */
23151 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23152 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23153 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23154 if (rc)
23155 goto clean2;
23156 @@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23157 * fake ones to scoop up any residual completions.
23158 */
23159 spin_lock_irqsave(&h->lock, flags);
23160 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23161 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23162 spin_unlock_irqrestore(&h->lock, flags);
23163 free_irq(h->intr[PERF_MODE_INT], h);
23164 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23165 @@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23166 dev_info(&h->pdev->dev, "Board READY.\n");
23167 dev_info(&h->pdev->dev,
23168 "Waiting for stale completions to drain.\n");
23169 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23170 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23171 msleep(10000);
23172 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23173 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23174
23175 rc = controller_reset_failed(h->cfgtable);
23176 if (rc)
23177 @@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23178 cciss_scsi_setup(h);
23179
23180 /* Turn the interrupts on so we can service requests */
23181 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23182 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23183
23184 /* Get the firmware version */
23185 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23186 @@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23187 kfree(flush_buf);
23188 if (return_code != IO_OK)
23189 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23190 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23191 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23192 free_irq(h->intr[PERF_MODE_INT], h);
23193 }
23194
23195 diff -urNp linux-3.0.4/drivers/block/cciss.h linux-3.0.4/drivers/block/cciss.h
23196 --- linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:44:40.000000000 -0400
23197 +++ linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23198 @@ -100,7 +100,7 @@ struct ctlr_info
23199 /* information about each logical volume */
23200 drive_info_struct *drv[CISS_MAX_LUN];
23201
23202 - struct access_method access;
23203 + struct access_method *access;
23204
23205 /* queue and queue Info */
23206 struct list_head reqQ;
23207 diff -urNp linux-3.0.4/drivers/block/cpqarray.c linux-3.0.4/drivers/block/cpqarray.c
23208 --- linux-3.0.4/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23209 +++ linux-3.0.4/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23210 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23211 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23212 goto Enomem4;
23213 }
23214 - hba[i]->access.set_intr_mask(hba[i], 0);
23215 + hba[i]->access->set_intr_mask(hba[i], 0);
23216 if (request_irq(hba[i]->intr, do_ida_intr,
23217 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23218 {
23219 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23220 add_timer(&hba[i]->timer);
23221
23222 /* Enable IRQ now that spinlock and rate limit timer are set up */
23223 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23224 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23225
23226 for(j=0; j<NWD; j++) {
23227 struct gendisk *disk = ida_gendisk[i][j];
23228 @@ -694,7 +694,7 @@ DBGINFO(
23229 for(i=0; i<NR_PRODUCTS; i++) {
23230 if (board_id == products[i].board_id) {
23231 c->product_name = products[i].product_name;
23232 - c->access = *(products[i].access);
23233 + c->access = products[i].access;
23234 break;
23235 }
23236 }
23237 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23238 hba[ctlr]->intr = intr;
23239 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23240 hba[ctlr]->product_name = products[j].product_name;
23241 - hba[ctlr]->access = *(products[j].access);
23242 + hba[ctlr]->access = products[j].access;
23243 hba[ctlr]->ctlr = ctlr;
23244 hba[ctlr]->board_id = board_id;
23245 hba[ctlr]->pci_dev = NULL; /* not PCI */
23246 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23247 struct scatterlist tmp_sg[SG_MAX];
23248 int i, dir, seg;
23249
23250 + pax_track_stack();
23251 +
23252 queue_next:
23253 creq = blk_peek_request(q);
23254 if (!creq)
23255 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23256
23257 while((c = h->reqQ) != NULL) {
23258 /* Can't do anything if we're busy */
23259 - if (h->access.fifo_full(h) == 0)
23260 + if (h->access->fifo_full(h) == 0)
23261 return;
23262
23263 /* Get the first entry from the request Q */
23264 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23265 h->Qdepth--;
23266
23267 /* Tell the controller to do our bidding */
23268 - h->access.submit_command(h, c);
23269 + h->access->submit_command(h, c);
23270
23271 /* Get onto the completion Q */
23272 addQ(&h->cmpQ, c);
23273 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23274 unsigned long flags;
23275 __u32 a,a1;
23276
23277 - istat = h->access.intr_pending(h);
23278 + istat = h->access->intr_pending(h);
23279 /* Is this interrupt for us? */
23280 if (istat == 0)
23281 return IRQ_NONE;
23282 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23283 */
23284 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23285 if (istat & FIFO_NOT_EMPTY) {
23286 - while((a = h->access.command_completed(h))) {
23287 + while((a = h->access->command_completed(h))) {
23288 a1 = a; a &= ~3;
23289 if ((c = h->cmpQ) == NULL)
23290 {
23291 @@ -1449,11 +1451,11 @@ static int sendcmd(
23292 /*
23293 * Disable interrupt
23294 */
23295 - info_p->access.set_intr_mask(info_p, 0);
23296 + info_p->access->set_intr_mask(info_p, 0);
23297 /* Make sure there is room in the command FIFO */
23298 /* Actually it should be completely empty at this time. */
23299 for (i = 200000; i > 0; i--) {
23300 - temp = info_p->access.fifo_full(info_p);
23301 + temp = info_p->access->fifo_full(info_p);
23302 if (temp != 0) {
23303 break;
23304 }
23305 @@ -1466,7 +1468,7 @@ DBG(
23306 /*
23307 * Send the cmd
23308 */
23309 - info_p->access.submit_command(info_p, c);
23310 + info_p->access->submit_command(info_p, c);
23311 complete = pollcomplete(ctlr);
23312
23313 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23314 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23315 * we check the new geometry. Then turn interrupts back on when
23316 * we're done.
23317 */
23318 - host->access.set_intr_mask(host, 0);
23319 + host->access->set_intr_mask(host, 0);
23320 getgeometry(ctlr);
23321 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23322 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23323
23324 for(i=0; i<NWD; i++) {
23325 struct gendisk *disk = ida_gendisk[ctlr][i];
23326 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23327 /* Wait (up to 2 seconds) for a command to complete */
23328
23329 for (i = 200000; i > 0; i--) {
23330 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23331 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23332 if (done == 0) {
23333 udelay(10); /* a short fixed delay */
23334 } else
23335 diff -urNp linux-3.0.4/drivers/block/cpqarray.h linux-3.0.4/drivers/block/cpqarray.h
23336 --- linux-3.0.4/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23337 +++ linux-3.0.4/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23338 @@ -99,7 +99,7 @@ struct ctlr_info {
23339 drv_info_t drv[NWD];
23340 struct proc_dir_entry *proc;
23341
23342 - struct access_method access;
23343 + struct access_method *access;
23344
23345 cmdlist_t *reqQ;
23346 cmdlist_t *cmpQ;
23347 diff -urNp linux-3.0.4/drivers/block/DAC960.c linux-3.0.4/drivers/block/DAC960.c
23348 --- linux-3.0.4/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23349 +++ linux-3.0.4/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23350 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23351 unsigned long flags;
23352 int Channel, TargetID;
23353
23354 + pax_track_stack();
23355 +
23356 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23357 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23358 sizeof(DAC960_SCSI_Inquiry_T) +
23359 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_int.h linux-3.0.4/drivers/block/drbd/drbd_int.h
23360 --- linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23361 +++ linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23362 @@ -737,7 +737,7 @@ struct drbd_request;
23363 struct drbd_epoch {
23364 struct list_head list;
23365 unsigned int barrier_nr;
23366 - atomic_t epoch_size; /* increased on every request added. */
23367 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23368 atomic_t active; /* increased on every req. added, and dec on every finished. */
23369 unsigned long flags;
23370 };
23371 @@ -1109,7 +1109,7 @@ struct drbd_conf {
23372 void *int_dig_in;
23373 void *int_dig_vv;
23374 wait_queue_head_t seq_wait;
23375 - atomic_t packet_seq;
23376 + atomic_unchecked_t packet_seq;
23377 unsigned int peer_seq;
23378 spinlock_t peer_seq_lock;
23379 unsigned int minor;
23380 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_main.c linux-3.0.4/drivers/block/drbd/drbd_main.c
23381 --- linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23382 +++ linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23383 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23384 p.sector = sector;
23385 p.block_id = block_id;
23386 p.blksize = blksize;
23387 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23388 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23389
23390 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23391 return false;
23392 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23393 p.sector = cpu_to_be64(req->sector);
23394 p.block_id = (unsigned long)req;
23395 p.seq_num = cpu_to_be32(req->seq_num =
23396 - atomic_add_return(1, &mdev->packet_seq));
23397 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23398
23399 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23400
23401 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23402 atomic_set(&mdev->unacked_cnt, 0);
23403 atomic_set(&mdev->local_cnt, 0);
23404 atomic_set(&mdev->net_cnt, 0);
23405 - atomic_set(&mdev->packet_seq, 0);
23406 + atomic_set_unchecked(&mdev->packet_seq, 0);
23407 atomic_set(&mdev->pp_in_use, 0);
23408 atomic_set(&mdev->pp_in_use_by_net, 0);
23409 atomic_set(&mdev->rs_sect_in, 0);
23410 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23411 mdev->receiver.t_state);
23412
23413 /* no need to lock it, I'm the only thread alive */
23414 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23415 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23416 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23417 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23418 mdev->al_writ_cnt =
23419 mdev->bm_writ_cnt =
23420 mdev->read_cnt =
23421 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_nl.c linux-3.0.4/drivers/block/drbd/drbd_nl.c
23422 --- linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23423 +++ linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23424 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23425 module_put(THIS_MODULE);
23426 }
23427
23428 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23429 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23430
23431 static unsigned short *
23432 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23433 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23434 cn_reply->id.idx = CN_IDX_DRBD;
23435 cn_reply->id.val = CN_VAL_DRBD;
23436
23437 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23438 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23439 cn_reply->ack = 0; /* not used here. */
23440 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23441 (int)((char *)tl - (char *)reply->tag_list);
23442 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23443 cn_reply->id.idx = CN_IDX_DRBD;
23444 cn_reply->id.val = CN_VAL_DRBD;
23445
23446 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23447 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23448 cn_reply->ack = 0; /* not used here. */
23449 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23450 (int)((char *)tl - (char *)reply->tag_list);
23451 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23452 cn_reply->id.idx = CN_IDX_DRBD;
23453 cn_reply->id.val = CN_VAL_DRBD;
23454
23455 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23456 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23457 cn_reply->ack = 0; // not used here.
23458 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23459 (int)((char*)tl - (char*)reply->tag_list);
23460 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
23461 cn_reply->id.idx = CN_IDX_DRBD;
23462 cn_reply->id.val = CN_VAL_DRBD;
23463
23464 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23465 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23466 cn_reply->ack = 0; /* not used here. */
23467 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23468 (int)((char *)tl - (char *)reply->tag_list);
23469 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_receiver.c linux-3.0.4/drivers/block/drbd/drbd_receiver.c
23470 --- linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
23471 +++ linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
23472 @@ -894,7 +894,7 @@ retry:
23473 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23474 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23475
23476 - atomic_set(&mdev->packet_seq, 0);
23477 + atomic_set_unchecked(&mdev->packet_seq, 0);
23478 mdev->peer_seq = 0;
23479
23480 drbd_thread_start(&mdev->asender);
23481 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
23482 do {
23483 next_epoch = NULL;
23484
23485 - epoch_size = atomic_read(&epoch->epoch_size);
23486 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23487
23488 switch (ev & ~EV_CLEANUP) {
23489 case EV_PUT:
23490 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
23491 rv = FE_DESTROYED;
23492 } else {
23493 epoch->flags = 0;
23494 - atomic_set(&epoch->epoch_size, 0);
23495 + atomic_set_unchecked(&epoch->epoch_size, 0);
23496 /* atomic_set(&epoch->active, 0); is already zero */
23497 if (rv == FE_STILL_LIVE)
23498 rv = FE_RECYCLED;
23499 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
23500 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23501 drbd_flush(mdev);
23502
23503 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23504 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23505 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23506 if (epoch)
23507 break;
23508 }
23509
23510 epoch = mdev->current_epoch;
23511 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23512 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23513
23514 D_ASSERT(atomic_read(&epoch->active) == 0);
23515 D_ASSERT(epoch->flags == 0);
23516 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
23517 }
23518
23519 epoch->flags = 0;
23520 - atomic_set(&epoch->epoch_size, 0);
23521 + atomic_set_unchecked(&epoch->epoch_size, 0);
23522 atomic_set(&epoch->active, 0);
23523
23524 spin_lock(&mdev->epoch_lock);
23525 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23526 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23527 list_add(&epoch->list, &mdev->current_epoch->list);
23528 mdev->current_epoch = epoch;
23529 mdev->epochs++;
23530 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
23531 spin_unlock(&mdev->peer_seq_lock);
23532
23533 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23534 - atomic_inc(&mdev->current_epoch->epoch_size);
23535 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23536 return drbd_drain_block(mdev, data_size);
23537 }
23538
23539 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
23540
23541 spin_lock(&mdev->epoch_lock);
23542 e->epoch = mdev->current_epoch;
23543 - atomic_inc(&e->epoch->epoch_size);
23544 + atomic_inc_unchecked(&e->epoch->epoch_size);
23545 atomic_inc(&e->epoch->active);
23546 spin_unlock(&mdev->epoch_lock);
23547
23548 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
23549 D_ASSERT(list_empty(&mdev->done_ee));
23550
23551 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23552 - atomic_set(&mdev->current_epoch->epoch_size, 0);
23553 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23554 D_ASSERT(list_empty(&mdev->current_epoch->list));
23555 }
23556
23557 diff -urNp linux-3.0.4/drivers/block/nbd.c linux-3.0.4/drivers/block/nbd.c
23558 --- linux-3.0.4/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
23559 +++ linux-3.0.4/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
23560 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23561 struct kvec iov;
23562 sigset_t blocked, oldset;
23563
23564 + pax_track_stack();
23565 +
23566 if (unlikely(!sock)) {
23567 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23568 lo->disk->disk_name, (send ? "send" : "recv"));
23569 @@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
23570 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23571 unsigned int cmd, unsigned long arg)
23572 {
23573 + pax_track_stack();
23574 +
23575 switch (cmd) {
23576 case NBD_DISCONNECT: {
23577 struct request sreq;
23578 diff -urNp linux-3.0.4/drivers/char/agp/frontend.c linux-3.0.4/drivers/char/agp/frontend.c
23579 --- linux-3.0.4/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
23580 +++ linux-3.0.4/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
23581 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23582 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23583 return -EFAULT;
23584
23585 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23586 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23587 return -EFAULT;
23588
23589 client = agp_find_client_by_pid(reserve.pid);
23590 diff -urNp linux-3.0.4/drivers/char/briq_panel.c linux-3.0.4/drivers/char/briq_panel.c
23591 --- linux-3.0.4/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
23592 +++ linux-3.0.4/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
23593 @@ -9,6 +9,7 @@
23594 #include <linux/types.h>
23595 #include <linux/errno.h>
23596 #include <linux/tty.h>
23597 +#include <linux/mutex.h>
23598 #include <linux/timer.h>
23599 #include <linux/kernel.h>
23600 #include <linux/wait.h>
23601 @@ -34,6 +35,7 @@ static int vfd_is_open;
23602 static unsigned char vfd[40];
23603 static int vfd_cursor;
23604 static unsigned char ledpb, led;
23605 +static DEFINE_MUTEX(vfd_mutex);
23606
23607 static void update_vfd(void)
23608 {
23609 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23610 if (!vfd_is_open)
23611 return -EBUSY;
23612
23613 + mutex_lock(&vfd_mutex);
23614 for (;;) {
23615 char c;
23616 if (!indx)
23617 break;
23618 - if (get_user(c, buf))
23619 + if (get_user(c, buf)) {
23620 + mutex_unlock(&vfd_mutex);
23621 return -EFAULT;
23622 + }
23623 if (esc) {
23624 set_led(c);
23625 esc = 0;
23626 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23627 buf++;
23628 }
23629 update_vfd();
23630 + mutex_unlock(&vfd_mutex);
23631
23632 return len;
23633 }
23634 diff -urNp linux-3.0.4/drivers/char/genrtc.c linux-3.0.4/drivers/char/genrtc.c
23635 --- linux-3.0.4/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
23636 +++ linux-3.0.4/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
23637 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23638 switch (cmd) {
23639
23640 case RTC_PLL_GET:
23641 + memset(&pll, 0, sizeof(pll));
23642 if (get_rtc_pll(&pll))
23643 return -EINVAL;
23644 else
23645 diff -urNp linux-3.0.4/drivers/char/hpet.c linux-3.0.4/drivers/char/hpet.c
23646 --- linux-3.0.4/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
23647 +++ linux-3.0.4/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
23648 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
23649 }
23650
23651 static int
23652 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23653 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23654 struct hpet_info *info)
23655 {
23656 struct hpet_timer __iomem *timer;
23657 diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c
23658 --- linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
23659 +++ linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
23660 @@ -415,7 +415,7 @@ struct ipmi_smi {
23661 struct proc_dir_entry *proc_dir;
23662 char proc_dir_name[10];
23663
23664 - atomic_t stats[IPMI_NUM_STATS];
23665 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23666
23667 /*
23668 * run_to_completion duplicate of smb_info, smi_info
23669 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23670
23671
23672 #define ipmi_inc_stat(intf, stat) \
23673 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23674 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23675 #define ipmi_get_stat(intf, stat) \
23676 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23677 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23678
23679 static int is_lan_addr(struct ipmi_addr *addr)
23680 {
23681 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23682 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23683 init_waitqueue_head(&intf->waitq);
23684 for (i = 0; i < IPMI_NUM_STATS; i++)
23685 - atomic_set(&intf->stats[i], 0);
23686 + atomic_set_unchecked(&intf->stats[i], 0);
23687
23688 intf->proc_dir = NULL;
23689
23690 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
23691 struct ipmi_smi_msg smi_msg;
23692 struct ipmi_recv_msg recv_msg;
23693
23694 + pax_track_stack();
23695 +
23696 si = (struct ipmi_system_interface_addr *) &addr;
23697 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23698 si->channel = IPMI_BMC_CHANNEL;
23699 diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c
23700 --- linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
23701 +++ linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
23702 @@ -277,7 +277,7 @@ struct smi_info {
23703 unsigned char slave_addr;
23704
23705 /* Counters and things for the proc filesystem. */
23706 - atomic_t stats[SI_NUM_STATS];
23707 + atomic_unchecked_t stats[SI_NUM_STATS];
23708
23709 struct task_struct *thread;
23710
23711 @@ -286,9 +286,9 @@ struct smi_info {
23712 };
23713
23714 #define smi_inc_stat(smi, stat) \
23715 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23716 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23717 #define smi_get_stat(smi, stat) \
23718 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23719 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23720
23721 #define SI_MAX_PARMS 4
23722
23723 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
23724 atomic_set(&new_smi->req_events, 0);
23725 new_smi->run_to_completion = 0;
23726 for (i = 0; i < SI_NUM_STATS; i++)
23727 - atomic_set(&new_smi->stats[i], 0);
23728 + atomic_set_unchecked(&new_smi->stats[i], 0);
23729
23730 new_smi->interrupt_disabled = 1;
23731 atomic_set(&new_smi->stop_operation, 0);
23732 diff -urNp linux-3.0.4/drivers/char/Kconfig linux-3.0.4/drivers/char/Kconfig
23733 --- linux-3.0.4/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
23734 +++ linux-3.0.4/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
23735 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23736
23737 config DEVKMEM
23738 bool "/dev/kmem virtual device support"
23739 - default y
23740 + default n
23741 + depends on !GRKERNSEC_KMEM
23742 help
23743 Say Y here if you want to support the /dev/kmem device. The
23744 /dev/kmem device is rarely used, but can be used for certain
23745 @@ -596,6 +597,7 @@ config DEVPORT
23746 bool
23747 depends on !M68K
23748 depends on ISA || PCI
23749 + depends on !GRKERNSEC_KMEM
23750 default y
23751
23752 source "drivers/s390/char/Kconfig"
23753 diff -urNp linux-3.0.4/drivers/char/mem.c linux-3.0.4/drivers/char/mem.c
23754 --- linux-3.0.4/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
23755 +++ linux-3.0.4/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
23756 @@ -18,6 +18,7 @@
23757 #include <linux/raw.h>
23758 #include <linux/tty.h>
23759 #include <linux/capability.h>
23760 +#include <linux/security.h>
23761 #include <linux/ptrace.h>
23762 #include <linux/device.h>
23763 #include <linux/highmem.h>
23764 @@ -34,6 +35,10 @@
23765 # include <linux/efi.h>
23766 #endif
23767
23768 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23769 +extern struct file_operations grsec_fops;
23770 +#endif
23771 +
23772 static inline unsigned long size_inside_page(unsigned long start,
23773 unsigned long size)
23774 {
23775 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23776
23777 while (cursor < to) {
23778 if (!devmem_is_allowed(pfn)) {
23779 +#ifdef CONFIG_GRKERNSEC_KMEM
23780 + gr_handle_mem_readwrite(from, to);
23781 +#else
23782 printk(KERN_INFO
23783 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23784 current->comm, from, to);
23785 +#endif
23786 return 0;
23787 }
23788 cursor += PAGE_SIZE;
23789 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23790 }
23791 return 1;
23792 }
23793 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23794 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23795 +{
23796 + return 0;
23797 +}
23798 #else
23799 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23800 {
23801 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23802
23803 while (count > 0) {
23804 unsigned long remaining;
23805 + char *temp;
23806
23807 sz = size_inside_page(p, count);
23808
23809 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23810 if (!ptr)
23811 return -EFAULT;
23812
23813 - remaining = copy_to_user(buf, ptr, sz);
23814 +#ifdef CONFIG_PAX_USERCOPY
23815 + temp = kmalloc(sz, GFP_KERNEL);
23816 + if (!temp) {
23817 + unxlate_dev_mem_ptr(p, ptr);
23818 + return -ENOMEM;
23819 + }
23820 + memcpy(temp, ptr, sz);
23821 +#else
23822 + temp = ptr;
23823 +#endif
23824 +
23825 + remaining = copy_to_user(buf, temp, sz);
23826 +
23827 +#ifdef CONFIG_PAX_USERCOPY
23828 + kfree(temp);
23829 +#endif
23830 +
23831 unxlate_dev_mem_ptr(p, ptr);
23832 if (remaining)
23833 return -EFAULT;
23834 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23835 size_t count, loff_t *ppos)
23836 {
23837 unsigned long p = *ppos;
23838 - ssize_t low_count, read, sz;
23839 + ssize_t low_count, read, sz, err = 0;
23840 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23841 - int err = 0;
23842
23843 read = 0;
23844 if (p < (unsigned long) high_memory) {
23845 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23846 }
23847 #endif
23848 while (low_count > 0) {
23849 + char *temp;
23850 +
23851 sz = size_inside_page(p, low_count);
23852
23853 /*
23854 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23855 */
23856 kbuf = xlate_dev_kmem_ptr((char *)p);
23857
23858 - if (copy_to_user(buf, kbuf, sz))
23859 +#ifdef CONFIG_PAX_USERCOPY
23860 + temp = kmalloc(sz, GFP_KERNEL);
23861 + if (!temp)
23862 + return -ENOMEM;
23863 + memcpy(temp, kbuf, sz);
23864 +#else
23865 + temp = kbuf;
23866 +#endif
23867 +
23868 + err = copy_to_user(buf, temp, sz);
23869 +
23870 +#ifdef CONFIG_PAX_USERCOPY
23871 + kfree(temp);
23872 +#endif
23873 +
23874 + if (err)
23875 return -EFAULT;
23876 buf += sz;
23877 p += sz;
23878 @@ -866,6 +913,9 @@ static const struct memdev {
23879 #ifdef CONFIG_CRASH_DUMP
23880 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23881 #endif
23882 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23883 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23884 +#endif
23885 };
23886
23887 static int memory_open(struct inode *inode, struct file *filp)
23888 diff -urNp linux-3.0.4/drivers/char/nvram.c linux-3.0.4/drivers/char/nvram.c
23889 --- linux-3.0.4/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
23890 +++ linux-3.0.4/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
23891 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
23892
23893 spin_unlock_irq(&rtc_lock);
23894
23895 - if (copy_to_user(buf, contents, tmp - contents))
23896 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23897 return -EFAULT;
23898
23899 *ppos = i;
23900 diff -urNp linux-3.0.4/drivers/char/random.c linux-3.0.4/drivers/char/random.c
23901 --- linux-3.0.4/drivers/char/random.c 2011-08-23 21:44:40.000000000 -0400
23902 +++ linux-3.0.4/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
23903 @@ -261,8 +261,13 @@
23904 /*
23905 * Configuration information
23906 */
23907 +#ifdef CONFIG_GRKERNSEC_RANDNET
23908 +#define INPUT_POOL_WORDS 512
23909 +#define OUTPUT_POOL_WORDS 128
23910 +#else
23911 #define INPUT_POOL_WORDS 128
23912 #define OUTPUT_POOL_WORDS 32
23913 +#endif
23914 #define SEC_XFER_SIZE 512
23915 #define EXTRACT_SIZE 10
23916
23917 @@ -300,10 +305,17 @@ static struct poolinfo {
23918 int poolwords;
23919 int tap1, tap2, tap3, tap4, tap5;
23920 } poolinfo_table[] = {
23921 +#ifdef CONFIG_GRKERNSEC_RANDNET
23922 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23923 + { 512, 411, 308, 208, 104, 1 },
23924 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23925 + { 128, 103, 76, 51, 25, 1 },
23926 +#else
23927 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23928 { 128, 103, 76, 51, 25, 1 },
23929 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23930 { 32, 26, 20, 14, 7, 1 },
23931 +#endif
23932 #if 0
23933 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
23934 { 2048, 1638, 1231, 819, 411, 1 },
23935 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
23936
23937 extract_buf(r, tmp);
23938 i = min_t(int, nbytes, EXTRACT_SIZE);
23939 - if (copy_to_user(buf, tmp, i)) {
23940 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23941 ret = -EFAULT;
23942 break;
23943 }
23944 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23945 #include <linux/sysctl.h>
23946
23947 static int min_read_thresh = 8, min_write_thresh;
23948 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
23949 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23950 static int max_write_thresh = INPUT_POOL_WORDS * 32;
23951 static char sysctl_bootid[16];
23952
23953 diff -urNp linux-3.0.4/drivers/char/sonypi.c linux-3.0.4/drivers/char/sonypi.c
23954 --- linux-3.0.4/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
23955 +++ linux-3.0.4/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
23956 @@ -55,6 +55,7 @@
23957 #include <asm/uaccess.h>
23958 #include <asm/io.h>
23959 #include <asm/system.h>
23960 +#include <asm/local.h>
23961
23962 #include <linux/sonypi.h>
23963
23964 @@ -491,7 +492,7 @@ static struct sonypi_device {
23965 spinlock_t fifo_lock;
23966 wait_queue_head_t fifo_proc_list;
23967 struct fasync_struct *fifo_async;
23968 - int open_count;
23969 + local_t open_count;
23970 int model;
23971 struct input_dev *input_jog_dev;
23972 struct input_dev *input_key_dev;
23973 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
23974 static int sonypi_misc_release(struct inode *inode, struct file *file)
23975 {
23976 mutex_lock(&sonypi_device.lock);
23977 - sonypi_device.open_count--;
23978 + local_dec(&sonypi_device.open_count);
23979 mutex_unlock(&sonypi_device.lock);
23980 return 0;
23981 }
23982 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
23983 {
23984 mutex_lock(&sonypi_device.lock);
23985 /* Flush input queue on first open */
23986 - if (!sonypi_device.open_count)
23987 + if (!local_read(&sonypi_device.open_count))
23988 kfifo_reset(&sonypi_device.fifo);
23989 - sonypi_device.open_count++;
23990 + local_inc(&sonypi_device.open_count);
23991 mutex_unlock(&sonypi_device.lock);
23992
23993 return 0;
23994 diff -urNp linux-3.0.4/drivers/char/tpm/tpm_bios.c linux-3.0.4/drivers/char/tpm/tpm_bios.c
23995 --- linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
23996 +++ linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
23997 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
23998 event = addr;
23999
24000 if ((event->event_type == 0 && event->event_size == 0) ||
24001 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24002 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24003 return NULL;
24004
24005 return addr;
24006 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24007 return NULL;
24008
24009 if ((event->event_type == 0 && event->event_size == 0) ||
24010 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24011 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24012 return NULL;
24013
24014 (*pos)++;
24015 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24016 int i;
24017
24018 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24019 - seq_putc(m, data[i]);
24020 + if (!seq_putc(m, data[i]))
24021 + return -EFAULT;
24022
24023 return 0;
24024 }
24025 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24026 log->bios_event_log_end = log->bios_event_log + len;
24027
24028 virt = acpi_os_map_memory(start, len);
24029 + if (!virt) {
24030 + kfree(log->bios_event_log);
24031 + log->bios_event_log = NULL;
24032 + return -EFAULT;
24033 + }
24034
24035 memcpy(log->bios_event_log, virt, len);
24036
24037 diff -urNp linux-3.0.4/drivers/char/tpm/tpm.c linux-3.0.4/drivers/char/tpm/tpm.c
24038 --- linux-3.0.4/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24039 +++ linux-3.0.4/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24040 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24041 chip->vendor.req_complete_val)
24042 goto out_recv;
24043
24044 - if ((status == chip->vendor.req_canceled)) {
24045 + if (status == chip->vendor.req_canceled) {
24046 dev_err(chip->dev, "Operation Canceled\n");
24047 rc = -ECANCELED;
24048 goto out;
24049 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24050
24051 struct tpm_chip *chip = dev_get_drvdata(dev);
24052
24053 + pax_track_stack();
24054 +
24055 tpm_cmd.header.in = tpm_readpubek_header;
24056 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24057 "attempting to read the PUBEK");
24058 diff -urNp linux-3.0.4/drivers/crypto/hifn_795x.c linux-3.0.4/drivers/crypto/hifn_795x.c
24059 --- linux-3.0.4/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24060 +++ linux-3.0.4/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24061 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24062 0xCA, 0x34, 0x2B, 0x2E};
24063 struct scatterlist sg;
24064
24065 + pax_track_stack();
24066 +
24067 memset(src, 0, sizeof(src));
24068 memset(ctx.key, 0, sizeof(ctx.key));
24069
24070 diff -urNp linux-3.0.4/drivers/crypto/padlock-aes.c linux-3.0.4/drivers/crypto/padlock-aes.c
24071 --- linux-3.0.4/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24072 +++ linux-3.0.4/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24073 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24074 struct crypto_aes_ctx gen_aes;
24075 int cpu;
24076
24077 + pax_track_stack();
24078 +
24079 if (key_len % 8) {
24080 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24081 return -EINVAL;
24082 diff -urNp linux-3.0.4/drivers/edac/edac_pci_sysfs.c linux-3.0.4/drivers/edac/edac_pci_sysfs.c
24083 --- linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24084 +++ linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24085 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24086 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24087 static int edac_pci_poll_msec = 1000; /* one second workq period */
24088
24089 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24090 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24091 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24092 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24093
24094 static struct kobject *edac_pci_top_main_kobj;
24095 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24096 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24097 edac_printk(KERN_CRIT, EDAC_PCI,
24098 "Signaled System Error on %s\n",
24099 pci_name(dev));
24100 - atomic_inc(&pci_nonparity_count);
24101 + atomic_inc_unchecked(&pci_nonparity_count);
24102 }
24103
24104 if (status & (PCI_STATUS_PARITY)) {
24105 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24106 "Master Data Parity Error on %s\n",
24107 pci_name(dev));
24108
24109 - atomic_inc(&pci_parity_count);
24110 + atomic_inc_unchecked(&pci_parity_count);
24111 }
24112
24113 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24114 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24115 "Detected Parity Error on %s\n",
24116 pci_name(dev));
24117
24118 - atomic_inc(&pci_parity_count);
24119 + atomic_inc_unchecked(&pci_parity_count);
24120 }
24121 }
24122
24123 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24124 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24125 "Signaled System Error on %s\n",
24126 pci_name(dev));
24127 - atomic_inc(&pci_nonparity_count);
24128 + atomic_inc_unchecked(&pci_nonparity_count);
24129 }
24130
24131 if (status & (PCI_STATUS_PARITY)) {
24132 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24133 "Master Data Parity Error on "
24134 "%s\n", pci_name(dev));
24135
24136 - atomic_inc(&pci_parity_count);
24137 + atomic_inc_unchecked(&pci_parity_count);
24138 }
24139
24140 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24141 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24142 "Detected Parity Error on %s\n",
24143 pci_name(dev));
24144
24145 - atomic_inc(&pci_parity_count);
24146 + atomic_inc_unchecked(&pci_parity_count);
24147 }
24148 }
24149 }
24150 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24151 if (!check_pci_errors)
24152 return;
24153
24154 - before_count = atomic_read(&pci_parity_count);
24155 + before_count = atomic_read_unchecked(&pci_parity_count);
24156
24157 /* scan all PCI devices looking for a Parity Error on devices and
24158 * bridges.
24159 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24160 /* Only if operator has selected panic on PCI Error */
24161 if (edac_pci_get_panic_on_pe()) {
24162 /* If the count is different 'after' from 'before' */
24163 - if (before_count != atomic_read(&pci_parity_count))
24164 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24165 panic("EDAC: PCI Parity Error");
24166 }
24167 }
24168 diff -urNp linux-3.0.4/drivers/edac/mce_amd.h linux-3.0.4/drivers/edac/mce_amd.h
24169 --- linux-3.0.4/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24170 +++ linux-3.0.4/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24171 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24172 bool (*dc_mce)(u16, u8);
24173 bool (*ic_mce)(u16, u8);
24174 bool (*nb_mce)(u16, u8);
24175 -};
24176 +} __no_const;
24177
24178 void amd_report_gart_errors(bool);
24179 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24180 diff -urNp linux-3.0.4/drivers/firewire/core-card.c linux-3.0.4/drivers/firewire/core-card.c
24181 --- linux-3.0.4/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24182 +++ linux-3.0.4/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24183 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24184
24185 void fw_core_remove_card(struct fw_card *card)
24186 {
24187 - struct fw_card_driver dummy_driver = dummy_driver_template;
24188 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24189
24190 card->driver->update_phy_reg(card, 4,
24191 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24192 diff -urNp linux-3.0.4/drivers/firewire/core-cdev.c linux-3.0.4/drivers/firewire/core-cdev.c
24193 --- linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:44:40.000000000 -0400
24194 +++ linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24195 @@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24196 int ret;
24197
24198 if ((request->channels == 0 && request->bandwidth == 0) ||
24199 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24200 - request->bandwidth < 0)
24201 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24202 return -EINVAL;
24203
24204 r = kmalloc(sizeof(*r), GFP_KERNEL);
24205 diff -urNp linux-3.0.4/drivers/firewire/core.h linux-3.0.4/drivers/firewire/core.h
24206 --- linux-3.0.4/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24207 +++ linux-3.0.4/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24208 @@ -101,6 +101,7 @@ struct fw_card_driver {
24209
24210 int (*stop_iso)(struct fw_iso_context *ctx);
24211 };
24212 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24213
24214 void fw_card_initialize(struct fw_card *card,
24215 const struct fw_card_driver *driver, struct device *device);
24216 diff -urNp linux-3.0.4/drivers/firewire/core-transaction.c linux-3.0.4/drivers/firewire/core-transaction.c
24217 --- linux-3.0.4/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24218 +++ linux-3.0.4/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24219 @@ -37,6 +37,7 @@
24220 #include <linux/timer.h>
24221 #include <linux/types.h>
24222 #include <linux/workqueue.h>
24223 +#include <linux/sched.h>
24224
24225 #include <asm/byteorder.h>
24226
24227 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24228 struct transaction_callback_data d;
24229 struct fw_transaction t;
24230
24231 + pax_track_stack();
24232 +
24233 init_timer_on_stack(&t.split_timeout_timer);
24234 init_completion(&d.done);
24235 d.payload = payload;
24236 diff -urNp linux-3.0.4/drivers/firmware/dmi_scan.c linux-3.0.4/drivers/firmware/dmi_scan.c
24237 --- linux-3.0.4/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24238 +++ linux-3.0.4/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24239 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24240 }
24241 }
24242 else {
24243 - /*
24244 - * no iounmap() for that ioremap(); it would be a no-op, but
24245 - * it's so early in setup that sucker gets confused into doing
24246 - * what it shouldn't if we actually call it.
24247 - */
24248 p = dmi_ioremap(0xF0000, 0x10000);
24249 if (p == NULL)
24250 goto error;
24251 diff -urNp linux-3.0.4/drivers/gpio/vr41xx_giu.c linux-3.0.4/drivers/gpio/vr41xx_giu.c
24252 --- linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24253 +++ linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24254 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24255 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24256 maskl, pendl, maskh, pendh);
24257
24258 - atomic_inc(&irq_err_count);
24259 + atomic_inc_unchecked(&irq_err_count);
24260
24261 return -EINVAL;
24262 }
24263 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c
24264 --- linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24265 +++ linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24266 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24267 struct drm_crtc *tmp;
24268 int crtc_mask = 1;
24269
24270 - WARN(!crtc, "checking null crtc?\n");
24271 + BUG_ON(!crtc);
24272
24273 dev = crtc->dev;
24274
24275 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24276 struct drm_encoder *encoder;
24277 bool ret = true;
24278
24279 + pax_track_stack();
24280 +
24281 crtc->enabled = drm_helper_crtc_in_use(crtc);
24282 if (!crtc->enabled)
24283 return true;
24284 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_drv.c linux-3.0.4/drivers/gpu/drm/drm_drv.c
24285 --- linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24286 +++ linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24287 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24288
24289 dev = file_priv->minor->dev;
24290 atomic_inc(&dev->ioctl_count);
24291 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24292 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24293 ++file_priv->ioctl_count;
24294
24295 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24296 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_fops.c linux-3.0.4/drivers/gpu/drm/drm_fops.c
24297 --- linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24298 +++ linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24299 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24300 }
24301
24302 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24303 - atomic_set(&dev->counts[i], 0);
24304 + atomic_set_unchecked(&dev->counts[i], 0);
24305
24306 dev->sigdata.lock = NULL;
24307
24308 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24309
24310 retcode = drm_open_helper(inode, filp, dev);
24311 if (!retcode) {
24312 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24313 - if (!dev->open_count++)
24314 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24315 + if (local_inc_return(&dev->open_count) == 1)
24316 retcode = drm_setup(dev);
24317 }
24318 if (!retcode) {
24319 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24320
24321 mutex_lock(&drm_global_mutex);
24322
24323 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24324 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24325
24326 if (dev->driver->preclose)
24327 dev->driver->preclose(dev, file_priv);
24328 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24329 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24330 task_pid_nr(current),
24331 (long)old_encode_dev(file_priv->minor->device),
24332 - dev->open_count);
24333 + local_read(&dev->open_count));
24334
24335 /* if the master has gone away we can't do anything with the lock */
24336 if (file_priv->minor->master)
24337 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24338 * End inline drm_release
24339 */
24340
24341 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24342 - if (!--dev->open_count) {
24343 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24344 + if (local_dec_and_test(&dev->open_count)) {
24345 if (atomic_read(&dev->ioctl_count)) {
24346 DRM_ERROR("Device busy: %d\n",
24347 atomic_read(&dev->ioctl_count));
24348 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_global.c linux-3.0.4/drivers/gpu/drm/drm_global.c
24349 --- linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24350 +++ linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24351 @@ -36,7 +36,7 @@
24352 struct drm_global_item {
24353 struct mutex mutex;
24354 void *object;
24355 - int refcount;
24356 + atomic_t refcount;
24357 };
24358
24359 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24360 @@ -49,7 +49,7 @@ void drm_global_init(void)
24361 struct drm_global_item *item = &glob[i];
24362 mutex_init(&item->mutex);
24363 item->object = NULL;
24364 - item->refcount = 0;
24365 + atomic_set(&item->refcount, 0);
24366 }
24367 }
24368
24369 @@ -59,7 +59,7 @@ void drm_global_release(void)
24370 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24371 struct drm_global_item *item = &glob[i];
24372 BUG_ON(item->object != NULL);
24373 - BUG_ON(item->refcount != 0);
24374 + BUG_ON(atomic_read(&item->refcount) != 0);
24375 }
24376 }
24377
24378 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24379 void *object;
24380
24381 mutex_lock(&item->mutex);
24382 - if (item->refcount == 0) {
24383 + if (atomic_read(&item->refcount) == 0) {
24384 item->object = kzalloc(ref->size, GFP_KERNEL);
24385 if (unlikely(item->object == NULL)) {
24386 ret = -ENOMEM;
24387 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24388 goto out_err;
24389
24390 }
24391 - ++item->refcount;
24392 + atomic_inc(&item->refcount);
24393 ref->object = item->object;
24394 object = item->object;
24395 mutex_unlock(&item->mutex);
24396 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24397 struct drm_global_item *item = &glob[ref->global_type];
24398
24399 mutex_lock(&item->mutex);
24400 - BUG_ON(item->refcount == 0);
24401 + BUG_ON(atomic_read(&item->refcount) == 0);
24402 BUG_ON(ref->object != item->object);
24403 - if (--item->refcount == 0) {
24404 + if (atomic_dec_and_test(&item->refcount)) {
24405 ref->release(ref);
24406 item->object = NULL;
24407 }
24408 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_info.c linux-3.0.4/drivers/gpu/drm/drm_info.c
24409 --- linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24410 +++ linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24411 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24412 struct drm_local_map *map;
24413 struct drm_map_list *r_list;
24414
24415 - /* Hardcoded from _DRM_FRAME_BUFFER,
24416 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24417 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24418 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24419 + static const char * const types[] = {
24420 + [_DRM_FRAME_BUFFER] = "FB",
24421 + [_DRM_REGISTERS] = "REG",
24422 + [_DRM_SHM] = "SHM",
24423 + [_DRM_AGP] = "AGP",
24424 + [_DRM_SCATTER_GATHER] = "SG",
24425 + [_DRM_CONSISTENT] = "PCI",
24426 + [_DRM_GEM] = "GEM" };
24427 const char *type;
24428 int i;
24429
24430 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24431 map = r_list->map;
24432 if (!map)
24433 continue;
24434 - if (map->type < 0 || map->type > 5)
24435 + if (map->type >= ARRAY_SIZE(types))
24436 type = "??";
24437 else
24438 type = types[map->type];
24439 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24440 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24441 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24442 vma->vm_flags & VM_IO ? 'i' : '-',
24443 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24444 + 0);
24445 +#else
24446 vma->vm_pgoff);
24447 +#endif
24448
24449 #if defined(__i386__)
24450 pgprot = pgprot_val(vma->vm_page_prot);
24451 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioctl.c linux-3.0.4/drivers/gpu/drm/drm_ioctl.c
24452 --- linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
24453 +++ linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
24454 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24455 stats->data[i].value =
24456 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24457 else
24458 - stats->data[i].value = atomic_read(&dev->counts[i]);
24459 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24460 stats->data[i].type = dev->types[i];
24461 }
24462
24463 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_lock.c linux-3.0.4/drivers/gpu/drm/drm_lock.c
24464 --- linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
24465 +++ linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
24466 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24467 if (drm_lock_take(&master->lock, lock->context)) {
24468 master->lock.file_priv = file_priv;
24469 master->lock.lock_time = jiffies;
24470 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24471 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24472 break; /* Got lock */
24473 }
24474
24475 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24476 return -EINVAL;
24477 }
24478
24479 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24480 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24481
24482 if (drm_lock_free(&master->lock, lock->context)) {
24483 /* FIXME: Should really bail out here. */
24484 diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c
24485 --- linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
24486 +++ linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
24487 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24488 dma->buflist[vertex->idx],
24489 vertex->discard, vertex->used);
24490
24491 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24492 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24493 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24494 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24495 sarea_priv->last_enqueue = dev_priv->counter - 1;
24496 sarea_priv->last_dispatch = (int)hw_status[5];
24497
24498 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24499 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24500 mc->last_render);
24501
24502 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24503 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24504 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24505 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24506 sarea_priv->last_enqueue = dev_priv->counter - 1;
24507 sarea_priv->last_dispatch = (int)hw_status[5];
24508
24509 diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h
24510 --- linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
24511 +++ linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
24512 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24513 int page_flipping;
24514
24515 wait_queue_head_t irq_queue;
24516 - atomic_t irq_received;
24517 - atomic_t irq_emitted;
24518 + atomic_unchecked_t irq_received;
24519 + atomic_unchecked_t irq_emitted;
24520
24521 int front_offset;
24522 } drm_i810_private_t;
24523 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c
24524 --- linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
24525 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
24526 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
24527 I915_READ(GTIMR));
24528 }
24529 seq_printf(m, "Interrupts received: %d\n",
24530 - atomic_read(&dev_priv->irq_received));
24531 + atomic_read_unchecked(&dev_priv->irq_received));
24532 for (i = 0; i < I915_NUM_RINGS; i++) {
24533 if (IS_GEN6(dev)) {
24534 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24535 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c
24536 --- linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:44:40.000000000 -0400
24537 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
24538 @@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
24539 bool can_switch;
24540
24541 spin_lock(&dev->count_lock);
24542 - can_switch = (dev->open_count == 0);
24543 + can_switch = (local_read(&dev->open_count) == 0);
24544 spin_unlock(&dev->count_lock);
24545 return can_switch;
24546 }
24547 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h
24548 --- linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
24549 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
24550 @@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
24551 /* render clock increase/decrease */
24552 /* display clock increase/decrease */
24553 /* pll clock increase/decrease */
24554 -};
24555 +} __no_const;
24556
24557 struct intel_device_info {
24558 u8 gen;
24559 @@ -300,7 +300,7 @@ typedef struct drm_i915_private {
24560 int current_page;
24561 int page_flipping;
24562
24563 - atomic_t irq_received;
24564 + atomic_unchecked_t irq_received;
24565
24566 /* protects the irq masks */
24567 spinlock_t irq_lock;
24568 @@ -874,7 +874,7 @@ struct drm_i915_gem_object {
24569 * will be page flipped away on the next vblank. When it
24570 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24571 */
24572 - atomic_t pending_flip;
24573 + atomic_unchecked_t pending_flip;
24574 };
24575
24576 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24577 @@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
24578 extern void intel_teardown_gmbus(struct drm_device *dev);
24579 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24580 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24581 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24582 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24583 {
24584 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24585 }
24586 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24587 --- linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
24588 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
24589 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
24590 i915_gem_clflush_object(obj);
24591
24592 if (obj->base.pending_write_domain)
24593 - cd->flips |= atomic_read(&obj->pending_flip);
24594 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24595
24596 /* The actual obj->write_domain will be updated with
24597 * pending_write_domain after we emit the accumulated flush for all
24598 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c
24599 --- linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:44:40.000000000 -0400
24600 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
24601 @@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
24602 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
24603 struct drm_i915_master_private *master_priv;
24604
24605 - atomic_inc(&dev_priv->irq_received);
24606 + atomic_inc_unchecked(&dev_priv->irq_received);
24607
24608 /* disable master interrupt before clearing iir */
24609 de_ier = I915_READ(DEIER);
24610 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
24611 struct drm_i915_master_private *master_priv;
24612 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
24613
24614 - atomic_inc(&dev_priv->irq_received);
24615 + atomic_inc_unchecked(&dev_priv->irq_received);
24616
24617 if (IS_GEN6(dev))
24618 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
24619 @@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
24620 int ret = IRQ_NONE, pipe;
24621 bool blc_event = false;
24622
24623 - atomic_inc(&dev_priv->irq_received);
24624 + atomic_inc_unchecked(&dev_priv->irq_received);
24625
24626 iir = I915_READ(IIR);
24627
24628 @@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
24629 {
24630 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24631
24632 - atomic_set(&dev_priv->irq_received, 0);
24633 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24634
24635 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24636 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24637 @@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
24638 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24639 int pipe;
24640
24641 - atomic_set(&dev_priv->irq_received, 0);
24642 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24643
24644 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24645 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24646 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/intel_display.c linux-3.0.4/drivers/gpu/drm/i915/intel_display.c
24647 --- linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:44:40.000000000 -0400
24648 +++ linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
24649 @@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24650
24651 wait_event(dev_priv->pending_flip_queue,
24652 atomic_read(&dev_priv->mm.wedged) ||
24653 - atomic_read(&obj->pending_flip) == 0);
24654 + atomic_read_unchecked(&obj->pending_flip) == 0);
24655
24656 /* Big Hammer, we also need to ensure that any pending
24657 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24658 @@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
24659 obj = to_intel_framebuffer(crtc->fb)->obj;
24660 dev_priv = crtc->dev->dev_private;
24661 wait_event(dev_priv->pending_flip_queue,
24662 - atomic_read(&obj->pending_flip) == 0);
24663 + atomic_read_unchecked(&obj->pending_flip) == 0);
24664 }
24665
24666 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24667 @@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
24668
24669 atomic_clear_mask(1 << intel_crtc->plane,
24670 &obj->pending_flip.counter);
24671 - if (atomic_read(&obj->pending_flip) == 0)
24672 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
24673 wake_up(&dev_priv->pending_flip_queue);
24674
24675 schedule_work(&work->work);
24676 @@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
24677 /* Block clients from rendering to the new back buffer until
24678 * the flip occurs and the object is no longer visible.
24679 */
24680 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24681 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24682
24683 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
24684 if (ret)
24685 @@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
24686 return 0;
24687
24688 cleanup_pending:
24689 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24690 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24691 cleanup_objs:
24692 drm_gem_object_unreference(&work->old_fb_obj->base);
24693 drm_gem_object_unreference(&obj->base);
24694 diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h
24695 --- linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
24696 +++ linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
24697 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24698 u32 clear_cmd;
24699 u32 maccess;
24700
24701 - atomic_t vbl_received; /**< Number of vblanks received. */
24702 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24703 wait_queue_head_t fence_queue;
24704 - atomic_t last_fence_retired;
24705 + atomic_unchecked_t last_fence_retired;
24706 u32 next_fence_to_post;
24707
24708 unsigned int fb_cpp;
24709 diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c
24710 --- linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
24711 +++ linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
24712 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24713 if (crtc != 0)
24714 return 0;
24715
24716 - return atomic_read(&dev_priv->vbl_received);
24717 + return atomic_read_unchecked(&dev_priv->vbl_received);
24718 }
24719
24720
24721 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24722 /* VBLANK interrupt */
24723 if (status & MGA_VLINEPEN) {
24724 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24725 - atomic_inc(&dev_priv->vbl_received);
24726 + atomic_inc_unchecked(&dev_priv->vbl_received);
24727 drm_handle_vblank(dev, 0);
24728 handled = 1;
24729 }
24730 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24731 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24732 MGA_WRITE(MGA_PRIMEND, prim_end);
24733
24734 - atomic_inc(&dev_priv->last_fence_retired);
24735 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
24736 DRM_WAKEUP(&dev_priv->fence_queue);
24737 handled = 1;
24738 }
24739 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24740 * using fences.
24741 */
24742 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24743 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24744 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24745 - *sequence) <= (1 << 23)));
24746
24747 *sequence = cur_fence;
24748 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c
24749 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
24750 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
24751 @@ -200,7 +200,7 @@ struct methods {
24752 const char desc[8];
24753 void (*loadbios)(struct drm_device *, uint8_t *);
24754 const bool rw;
24755 -};
24756 +} __do_const;
24757
24758 static struct methods shadow_methods[] = {
24759 { "PRAMIN", load_vbios_pramin, true },
24760 @@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
24761 struct bit_table {
24762 const char id;
24763 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
24764 -};
24765 +} __no_const;
24766
24767 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
24768
24769 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24770 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
24771 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
24772 @@ -227,7 +227,7 @@ struct nouveau_channel {
24773 struct list_head pending;
24774 uint32_t sequence;
24775 uint32_t sequence_ack;
24776 - atomic_t last_sequence_irq;
24777 + atomic_unchecked_t last_sequence_irq;
24778 } fence;
24779
24780 /* DMA push buffer */
24781 @@ -304,7 +304,7 @@ struct nouveau_exec_engine {
24782 u32 handle, u16 class);
24783 void (*set_tile_region)(struct drm_device *dev, int i);
24784 void (*tlb_flush)(struct drm_device *, int engine);
24785 -};
24786 +} __no_const;
24787
24788 struct nouveau_instmem_engine {
24789 void *priv;
24790 @@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
24791 struct nouveau_mc_engine {
24792 int (*init)(struct drm_device *dev);
24793 void (*takedown)(struct drm_device *dev);
24794 -};
24795 +} __no_const;
24796
24797 struct nouveau_timer_engine {
24798 int (*init)(struct drm_device *dev);
24799 void (*takedown)(struct drm_device *dev);
24800 uint64_t (*read)(struct drm_device *dev);
24801 -};
24802 +} __no_const;
24803
24804 struct nouveau_fb_engine {
24805 int num_tiles;
24806 @@ -494,7 +494,7 @@ struct nouveau_vram_engine {
24807 void (*put)(struct drm_device *, struct nouveau_mem **);
24808
24809 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24810 -};
24811 +} __no_const;
24812
24813 struct nouveau_engine {
24814 struct nouveau_instmem_engine instmem;
24815 @@ -640,7 +640,7 @@ struct drm_nouveau_private {
24816 struct drm_global_reference mem_global_ref;
24817 struct ttm_bo_global_ref bo_global_ref;
24818 struct ttm_bo_device bdev;
24819 - atomic_t validate_sequence;
24820 + atomic_unchecked_t validate_sequence;
24821 } ttm;
24822
24823 struct {
24824 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24825 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
24826 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
24827 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24828 if (USE_REFCNT(dev))
24829 sequence = nvchan_rd32(chan, 0x48);
24830 else
24831 - sequence = atomic_read(&chan->fence.last_sequence_irq);
24832 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24833
24834 if (chan->fence.sequence_ack == sequence)
24835 goto out;
24836 @@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
24837
24838 INIT_LIST_HEAD(&chan->fence.pending);
24839 spin_lock_init(&chan->fence.lock);
24840 - atomic_set(&chan->fence.last_sequence_irq, 0);
24841 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24842 return 0;
24843 }
24844
24845 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24846 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
24847 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
24848 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24849 int trycnt = 0;
24850 int ret, i;
24851
24852 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24853 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24854 retry:
24855 if (++trycnt > 100000) {
24856 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24857 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c
24858 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
24859 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
24860 @@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
24861 bool can_switch;
24862
24863 spin_lock(&dev->count_lock);
24864 - can_switch = (dev->open_count == 0);
24865 + can_switch = (local_read(&dev->open_count) == 0);
24866 spin_unlock(&dev->count_lock);
24867 return can_switch;
24868 }
24869 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c
24870 --- linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
24871 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
24872 @@ -560,7 +560,7 @@ static int
24873 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24874 u32 class, u32 mthd, u32 data)
24875 {
24876 - atomic_set(&chan->fence.last_sequence_irq, data);
24877 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24878 return 0;
24879 }
24880
24881 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c
24882 --- linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
24883 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
24884 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24885
24886 /* GH: Simple idle check.
24887 */
24888 - atomic_set(&dev_priv->idle_count, 0);
24889 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24890
24891 /* We don't support anything other than bus-mastering ring mode,
24892 * but the ring can be in either AGP or PCI space for the ring
24893 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h
24894 --- linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
24895 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
24896 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24897 int is_pci;
24898 unsigned long cce_buffers_offset;
24899
24900 - atomic_t idle_count;
24901 + atomic_unchecked_t idle_count;
24902
24903 int page_flipping;
24904 int current_page;
24905 u32 crtc_offset;
24906 u32 crtc_offset_cntl;
24907
24908 - atomic_t vbl_received;
24909 + atomic_unchecked_t vbl_received;
24910
24911 u32 color_fmt;
24912 unsigned int front_offset;
24913 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c
24914 --- linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
24915 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
24916 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24917 if (crtc != 0)
24918 return 0;
24919
24920 - return atomic_read(&dev_priv->vbl_received);
24921 + return atomic_read_unchecked(&dev_priv->vbl_received);
24922 }
24923
24924 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24925 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24926 /* VBLANK interrupt */
24927 if (status & R128_CRTC_VBLANK_INT) {
24928 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24929 - atomic_inc(&dev_priv->vbl_received);
24930 + atomic_inc_unchecked(&dev_priv->vbl_received);
24931 drm_handle_vblank(dev, 0);
24932 return IRQ_HANDLED;
24933 }
24934 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_state.c linux-3.0.4/drivers/gpu/drm/r128/r128_state.c
24935 --- linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
24936 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
24937 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
24938
24939 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
24940 {
24941 - if (atomic_read(&dev_priv->idle_count) == 0)
24942 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
24943 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
24944 else
24945 - atomic_set(&dev_priv->idle_count, 0);
24946 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24947 }
24948
24949 #endif
24950 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/atom.c linux-3.0.4/drivers/gpu/drm/radeon/atom.c
24951 --- linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
24952 +++ linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
24953 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
24954 char name[512];
24955 int i;
24956
24957 + pax_track_stack();
24958 +
24959 ctx->card = card;
24960 ctx->bios = bios;
24961
24962 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c
24963 --- linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
24964 +++ linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
24965 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
24966 regex_t mask_rex;
24967 regmatch_t match[4];
24968 char buf[1024];
24969 - size_t end;
24970 + long end;
24971 int len;
24972 int done = 0;
24973 int r;
24974 unsigned o;
24975 struct offset *offset;
24976 char last_reg_s[10];
24977 - int last_reg;
24978 + unsigned long last_reg;
24979
24980 if (regcomp
24981 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24982 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c
24983 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
24984 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
24985 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
24986 struct radeon_gpio_rec gpio;
24987 struct radeon_hpd hpd;
24988
24989 + pax_track_stack();
24990 +
24991 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
24992 return false;
24993
24994 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c
24995 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:44:40.000000000 -0400
24996 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
24997 @@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
24998 bool can_switch;
24999
25000 spin_lock(&dev->count_lock);
25001 - can_switch = (dev->open_count == 0);
25002 + can_switch = (local_read(&dev->open_count) == 0);
25003 spin_unlock(&dev->count_lock);
25004 return can_switch;
25005 }
25006 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c
25007 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:44:40.000000000 -0400
25008 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
25009 @@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
25010 uint32_t post_div;
25011 u32 pll_out_min, pll_out_max;
25012
25013 + pax_track_stack();
25014 +
25015 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25016 freq = freq * 1000;
25017
25018 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h
25019 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
25020 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
25021 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25022
25023 /* SW interrupt */
25024 wait_queue_head_t swi_queue;
25025 - atomic_t swi_emitted;
25026 + atomic_unchecked_t swi_emitted;
25027 int vblank_crtc;
25028 uint32_t irq_enable_reg;
25029 uint32_t r500_disp_irq_reg;
25030 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c
25031 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
25032 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
25033 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25034 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25035 return 0;
25036 }
25037 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25038 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25039 if (!rdev->cp.ready)
25040 /* FIXME: cp is not running assume everythings is done right
25041 * away
25042 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25043 return r;
25044 }
25045 radeon_fence_write(rdev, 0);
25046 - atomic_set(&rdev->fence_drv.seq, 0);
25047 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25048 INIT_LIST_HEAD(&rdev->fence_drv.created);
25049 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25050 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25051 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon.h linux-3.0.4/drivers/gpu/drm/radeon/radeon.h
25052 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25053 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25054 @@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25055 */
25056 struct radeon_fence_driver {
25057 uint32_t scratch_reg;
25058 - atomic_t seq;
25059 + atomic_unchecked_t seq;
25060 uint32_t last_seq;
25061 unsigned long last_jiffies;
25062 unsigned long last_timeout;
25063 @@ -960,7 +960,7 @@ struct radeon_asic {
25064 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25065 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25066 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25067 -};
25068 +} __no_const;
25069
25070 /*
25071 * Asic structures
25072 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25073 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25074 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
25075 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25076 request = compat_alloc_user_space(sizeof(*request));
25077 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25078 || __put_user(req32.param, &request->param)
25079 - || __put_user((void __user *)(unsigned long)req32.value,
25080 + || __put_user((unsigned long)req32.value,
25081 &request->value))
25082 return -EFAULT;
25083
25084 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c
25085 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
25086 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
25087 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25088 unsigned int ret;
25089 RING_LOCALS;
25090
25091 - atomic_inc(&dev_priv->swi_emitted);
25092 - ret = atomic_read(&dev_priv->swi_emitted);
25093 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25094 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25095
25096 BEGIN_RING(4);
25097 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25098 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25099 drm_radeon_private_t *dev_priv =
25100 (drm_radeon_private_t *) dev->dev_private;
25101
25102 - atomic_set(&dev_priv->swi_emitted, 0);
25103 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25104 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25105
25106 dev->max_vblank_count = 0x001fffff;
25107 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c
25108 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25109 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25110 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25111 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25112 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25113
25114 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25115 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25116 sarea_priv->nbox * sizeof(depth_boxes[0])))
25117 return -EFAULT;
25118
25119 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25120 {
25121 drm_radeon_private_t *dev_priv = dev->dev_private;
25122 drm_radeon_getparam_t *param = data;
25123 - int value;
25124 + int value = 0;
25125
25126 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25127
25128 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c
25129 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25130 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25131 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25132 }
25133 if (unlikely(ttm_vm_ops == NULL)) {
25134 ttm_vm_ops = vma->vm_ops;
25135 - radeon_ttm_vm_ops = *ttm_vm_ops;
25136 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25137 + pax_open_kernel();
25138 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25139 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25140 + pax_close_kernel();
25141 }
25142 vma->vm_ops = &radeon_ttm_vm_ops;
25143 return 0;
25144 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/rs690.c linux-3.0.4/drivers/gpu/drm/radeon/rs690.c
25145 --- linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25146 +++ linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25147 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25148 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25149 rdev->pm.sideport_bandwidth.full)
25150 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25151 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25152 + read_delay_latency.full = dfixed_const(800 * 1000);
25153 read_delay_latency.full = dfixed_div(read_delay_latency,
25154 rdev->pm.igp_sideport_mclk);
25155 + a.full = dfixed_const(370);
25156 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25157 } else {
25158 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25159 rdev->pm.k8_bandwidth.full)
25160 diff -urNp linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25161 --- linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25162 +++ linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25163 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25164 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25165 struct shrink_control *sc)
25166 {
25167 - static atomic_t start_pool = ATOMIC_INIT(0);
25168 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25169 unsigned i;
25170 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25171 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25172 struct ttm_page_pool *pool;
25173 int shrink_pages = sc->nr_to_scan;
25174
25175 diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_drv.h linux-3.0.4/drivers/gpu/drm/via/via_drv.h
25176 --- linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25177 +++ linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25178 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25179 typedef uint32_t maskarray_t[5];
25180
25181 typedef struct drm_via_irq {
25182 - atomic_t irq_received;
25183 + atomic_unchecked_t irq_received;
25184 uint32_t pending_mask;
25185 uint32_t enable_mask;
25186 wait_queue_head_t irq_queue;
25187 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25188 struct timeval last_vblank;
25189 int last_vblank_valid;
25190 unsigned usec_per_vblank;
25191 - atomic_t vbl_received;
25192 + atomic_unchecked_t vbl_received;
25193 drm_via_state_t hc_state;
25194 char pci_buf[VIA_PCI_BUF_SIZE];
25195 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25196 diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_irq.c linux-3.0.4/drivers/gpu/drm/via/via_irq.c
25197 --- linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25198 +++ linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25199 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25200 if (crtc != 0)
25201 return 0;
25202
25203 - return atomic_read(&dev_priv->vbl_received);
25204 + return atomic_read_unchecked(&dev_priv->vbl_received);
25205 }
25206
25207 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25208 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25209
25210 status = VIA_READ(VIA_REG_INTERRUPT);
25211 if (status & VIA_IRQ_VBLANK_PENDING) {
25212 - atomic_inc(&dev_priv->vbl_received);
25213 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25214 + atomic_inc_unchecked(&dev_priv->vbl_received);
25215 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25216 do_gettimeofday(&cur_vblank);
25217 if (dev_priv->last_vblank_valid) {
25218 dev_priv->usec_per_vblank =
25219 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25220 dev_priv->last_vblank = cur_vblank;
25221 dev_priv->last_vblank_valid = 1;
25222 }
25223 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25224 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25225 DRM_DEBUG("US per vblank is: %u\n",
25226 dev_priv->usec_per_vblank);
25227 }
25228 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25229
25230 for (i = 0; i < dev_priv->num_irqs; ++i) {
25231 if (status & cur_irq->pending_mask) {
25232 - atomic_inc(&cur_irq->irq_received);
25233 + atomic_inc_unchecked(&cur_irq->irq_received);
25234 DRM_WAKEUP(&cur_irq->irq_queue);
25235 handled = 1;
25236 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25237 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25238 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25239 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25240 masks[irq][4]));
25241 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25242 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25243 } else {
25244 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25245 (((cur_irq_sequence =
25246 - atomic_read(&cur_irq->irq_received)) -
25247 + atomic_read_unchecked(&cur_irq->irq_received)) -
25248 *sequence) <= (1 << 23)));
25249 }
25250 *sequence = cur_irq_sequence;
25251 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25252 }
25253
25254 for (i = 0; i < dev_priv->num_irqs; ++i) {
25255 - atomic_set(&cur_irq->irq_received, 0);
25256 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25257 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25258 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25259 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25260 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25261 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25262 case VIA_IRQ_RELATIVE:
25263 irqwait->request.sequence +=
25264 - atomic_read(&cur_irq->irq_received);
25265 + atomic_read_unchecked(&cur_irq->irq_received);
25266 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25267 case VIA_IRQ_ABSOLUTE:
25268 break;
25269 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25270 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25271 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25272 @@ -240,7 +240,7 @@ struct vmw_private {
25273 * Fencing and IRQs.
25274 */
25275
25276 - atomic_t fence_seq;
25277 + atomic_unchecked_t fence_seq;
25278 wait_queue_head_t fence_queue;
25279 wait_queue_head_t fifo_queue;
25280 atomic_t fence_queue_waiters;
25281 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25282 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25283 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25284 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25285 while (!vmw_lag_lt(queue, us)) {
25286 spin_lock(&queue->lock);
25287 if (list_empty(&queue->head))
25288 - sequence = atomic_read(&dev_priv->fence_seq);
25289 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25290 else {
25291 fence = list_first_entry(&queue->head,
25292 struct vmw_fence, head);
25293 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25294 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25295 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25296 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25297 (unsigned int) min,
25298 (unsigned int) fifo->capabilities);
25299
25300 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25301 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25302 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25303 vmw_fence_queue_init(&fifo->fence_queue);
25304 return vmw_fifo_send_fence(dev_priv, &dummy);
25305 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25306
25307 fm = vmw_fifo_reserve(dev_priv, bytes);
25308 if (unlikely(fm == NULL)) {
25309 - *sequence = atomic_read(&dev_priv->fence_seq);
25310 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25311 ret = -ENOMEM;
25312 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25313 false, 3*HZ);
25314 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25315 }
25316
25317 do {
25318 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25319 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25320 } while (*sequence == 0);
25321
25322 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25323 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25324 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25325 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25326 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25327 * emitted. Then the fence is stale and signaled.
25328 */
25329
25330 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25331 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25332 > VMW_FENCE_WRAP);
25333
25334 return ret;
25335 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25336
25337 if (fifo_idle)
25338 down_read(&fifo_state->rwsem);
25339 - signal_seq = atomic_read(&dev_priv->fence_seq);
25340 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25341 ret = 0;
25342
25343 for (;;) {
25344 diff -urNp linux-3.0.4/drivers/hid/hid-core.c linux-3.0.4/drivers/hid/hid-core.c
25345 --- linux-3.0.4/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25346 +++ linux-3.0.4/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25347 @@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25348
25349 int hid_add_device(struct hid_device *hdev)
25350 {
25351 - static atomic_t id = ATOMIC_INIT(0);
25352 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25353 int ret;
25354
25355 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25356 @@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25357 /* XXX hack, any other cleaner solution after the driver core
25358 * is converted to allow more than 20 bytes as the device name? */
25359 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25360 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25361 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25362
25363 hid_debug_register(hdev, dev_name(&hdev->dev));
25364 ret = device_add(&hdev->dev);
25365 diff -urNp linux-3.0.4/drivers/hid/usbhid/hiddev.c linux-3.0.4/drivers/hid/usbhid/hiddev.c
25366 --- linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25367 +++ linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25368 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25369 break;
25370
25371 case HIDIOCAPPLICATION:
25372 - if (arg < 0 || arg >= hid->maxapplication)
25373 + if (arg >= hid->maxapplication)
25374 break;
25375
25376 for (i = 0; i < hid->maxcollection; i++)
25377 diff -urNp linux-3.0.4/drivers/hwmon/acpi_power_meter.c linux-3.0.4/drivers/hwmon/acpi_power_meter.c
25378 --- linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25379 +++ linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25380 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25381 return res;
25382
25383 temp /= 1000;
25384 - if (temp < 0)
25385 - return -EINVAL;
25386
25387 mutex_lock(&resource->lock);
25388 resource->trip[attr->index - 7] = temp;
25389 diff -urNp linux-3.0.4/drivers/hwmon/sht15.c linux-3.0.4/drivers/hwmon/sht15.c
25390 --- linux-3.0.4/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25391 +++ linux-3.0.4/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25392 @@ -166,7 +166,7 @@ struct sht15_data {
25393 int supply_uV;
25394 bool supply_uV_valid;
25395 struct work_struct update_supply_work;
25396 - atomic_t interrupt_handled;
25397 + atomic_unchecked_t interrupt_handled;
25398 };
25399
25400 /**
25401 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25402 return ret;
25403
25404 gpio_direction_input(data->pdata->gpio_data);
25405 - atomic_set(&data->interrupt_handled, 0);
25406 + atomic_set_unchecked(&data->interrupt_handled, 0);
25407
25408 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25409 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25410 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25411 /* Only relevant if the interrupt hasn't occurred. */
25412 - if (!atomic_read(&data->interrupt_handled))
25413 + if (!atomic_read_unchecked(&data->interrupt_handled))
25414 schedule_work(&data->read_work);
25415 }
25416 ret = wait_event_timeout(data->wait_queue,
25417 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25418
25419 /* First disable the interrupt */
25420 disable_irq_nosync(irq);
25421 - atomic_inc(&data->interrupt_handled);
25422 + atomic_inc_unchecked(&data->interrupt_handled);
25423 /* Then schedule a reading work struct */
25424 if (data->state != SHT15_READING_NOTHING)
25425 schedule_work(&data->read_work);
25426 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25427 * If not, then start the interrupt again - care here as could
25428 * have gone low in meantime so verify it hasn't!
25429 */
25430 - atomic_set(&data->interrupt_handled, 0);
25431 + atomic_set_unchecked(&data->interrupt_handled, 0);
25432 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25433 /* If still not occurred or another handler has been scheduled */
25434 if (gpio_get_value(data->pdata->gpio_data)
25435 - || atomic_read(&data->interrupt_handled))
25436 + || atomic_read_unchecked(&data->interrupt_handled))
25437 return;
25438 }
25439
25440 diff -urNp linux-3.0.4/drivers/hwmon/w83791d.c linux-3.0.4/drivers/hwmon/w83791d.c
25441 --- linux-3.0.4/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25442 +++ linux-3.0.4/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25443 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25444 struct i2c_board_info *info);
25445 static int w83791d_remove(struct i2c_client *client);
25446
25447 -static int w83791d_read(struct i2c_client *client, u8 register);
25448 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25449 +static int w83791d_read(struct i2c_client *client, u8 reg);
25450 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25451 static struct w83791d_data *w83791d_update_device(struct device *dev);
25452
25453 #ifdef DEBUG
25454 diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c
25455 --- linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
25456 +++ linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
25457 @@ -43,7 +43,7 @@
25458 extern struct i2c_adapter amd756_smbus;
25459
25460 static struct i2c_adapter *s4882_adapter;
25461 -static struct i2c_algorithm *s4882_algo;
25462 +static i2c_algorithm_no_const *s4882_algo;
25463
25464 /* Wrapper access functions for multiplexed SMBus */
25465 static DEFINE_MUTEX(amd756_lock);
25466 diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25467 --- linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
25468 +++ linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
25469 @@ -41,7 +41,7 @@
25470 extern struct i2c_adapter *nforce2_smbus;
25471
25472 static struct i2c_adapter *s4985_adapter;
25473 -static struct i2c_algorithm *s4985_algo;
25474 +static i2c_algorithm_no_const *s4985_algo;
25475
25476 /* Wrapper access functions for multiplexed SMBus */
25477 static DEFINE_MUTEX(nforce2_lock);
25478 diff -urNp linux-3.0.4/drivers/i2c/i2c-mux.c linux-3.0.4/drivers/i2c/i2c-mux.c
25479 --- linux-3.0.4/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
25480 +++ linux-3.0.4/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
25481 @@ -28,7 +28,7 @@
25482 /* multiplexer per channel data */
25483 struct i2c_mux_priv {
25484 struct i2c_adapter adap;
25485 - struct i2c_algorithm algo;
25486 + i2c_algorithm_no_const algo;
25487
25488 struct i2c_adapter *parent;
25489 void *mux_dev; /* the mux chip/device */
25490 diff -urNp linux-3.0.4/drivers/ide/ide-cd.c linux-3.0.4/drivers/ide/ide-cd.c
25491 --- linux-3.0.4/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
25492 +++ linux-3.0.4/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
25493 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25494 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25495 if ((unsigned long)buf & alignment
25496 || blk_rq_bytes(rq) & q->dma_pad_mask
25497 - || object_is_on_stack(buf))
25498 + || object_starts_on_stack(buf))
25499 drive->dma = 0;
25500 }
25501 }
25502 diff -urNp linux-3.0.4/drivers/ide/ide-floppy.c linux-3.0.4/drivers/ide/ide-floppy.c
25503 --- linux-3.0.4/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
25504 +++ linux-3.0.4/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
25505 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25506 u8 pc_buf[256], header_len, desc_cnt;
25507 int i, rc = 1, blocks, length;
25508
25509 + pax_track_stack();
25510 +
25511 ide_debug_log(IDE_DBG_FUNC, "enter");
25512
25513 drive->bios_cyl = 0;
25514 diff -urNp linux-3.0.4/drivers/ide/setup-pci.c linux-3.0.4/drivers/ide/setup-pci.c
25515 --- linux-3.0.4/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
25516 +++ linux-3.0.4/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
25517 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25518 int ret, i, n_ports = dev2 ? 4 : 2;
25519 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25520
25521 + pax_track_stack();
25522 +
25523 for (i = 0; i < n_ports / 2; i++) {
25524 ret = ide_setup_pci_controller(pdev[i], d, !i);
25525 if (ret < 0)
25526 diff -urNp linux-3.0.4/drivers/infiniband/core/cm.c linux-3.0.4/drivers/infiniband/core/cm.c
25527 --- linux-3.0.4/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
25528 +++ linux-3.0.4/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
25529 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25530
25531 struct cm_counter_group {
25532 struct kobject obj;
25533 - atomic_long_t counter[CM_ATTR_COUNT];
25534 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25535 };
25536
25537 struct cm_counter_attribute {
25538 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25539 struct ib_mad_send_buf *msg = NULL;
25540 int ret;
25541
25542 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25543 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25544 counter[CM_REQ_COUNTER]);
25545
25546 /* Quick state check to discard duplicate REQs. */
25547 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25548 if (!cm_id_priv)
25549 return;
25550
25551 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25552 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25553 counter[CM_REP_COUNTER]);
25554 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25555 if (ret)
25556 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25557 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25558 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25559 spin_unlock_irq(&cm_id_priv->lock);
25560 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25561 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25562 counter[CM_RTU_COUNTER]);
25563 goto out;
25564 }
25565 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25566 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25567 dreq_msg->local_comm_id);
25568 if (!cm_id_priv) {
25569 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25570 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25571 counter[CM_DREQ_COUNTER]);
25572 cm_issue_drep(work->port, work->mad_recv_wc);
25573 return -EINVAL;
25574 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25575 case IB_CM_MRA_REP_RCVD:
25576 break;
25577 case IB_CM_TIMEWAIT:
25578 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25579 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25580 counter[CM_DREQ_COUNTER]);
25581 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25582 goto unlock;
25583 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25584 cm_free_msg(msg);
25585 goto deref;
25586 case IB_CM_DREQ_RCVD:
25587 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25588 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25589 counter[CM_DREQ_COUNTER]);
25590 goto unlock;
25591 default:
25592 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25593 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25594 cm_id_priv->msg, timeout)) {
25595 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25596 - atomic_long_inc(&work->port->
25597 + atomic_long_inc_unchecked(&work->port->
25598 counter_group[CM_RECV_DUPLICATES].
25599 counter[CM_MRA_COUNTER]);
25600 goto out;
25601 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25602 break;
25603 case IB_CM_MRA_REQ_RCVD:
25604 case IB_CM_MRA_REP_RCVD:
25605 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25606 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25607 counter[CM_MRA_COUNTER]);
25608 /* fall through */
25609 default:
25610 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25611 case IB_CM_LAP_IDLE:
25612 break;
25613 case IB_CM_MRA_LAP_SENT:
25614 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25615 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25616 counter[CM_LAP_COUNTER]);
25617 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25618 goto unlock;
25619 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25620 cm_free_msg(msg);
25621 goto deref;
25622 case IB_CM_LAP_RCVD:
25623 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25624 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25625 counter[CM_LAP_COUNTER]);
25626 goto unlock;
25627 default:
25628 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25629 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25630 if (cur_cm_id_priv) {
25631 spin_unlock_irq(&cm.lock);
25632 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25633 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25634 counter[CM_SIDR_REQ_COUNTER]);
25635 goto out; /* Duplicate message. */
25636 }
25637 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25638 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25639 msg->retries = 1;
25640
25641 - atomic_long_add(1 + msg->retries,
25642 + atomic_long_add_unchecked(1 + msg->retries,
25643 &port->counter_group[CM_XMIT].counter[attr_index]);
25644 if (msg->retries)
25645 - atomic_long_add(msg->retries,
25646 + atomic_long_add_unchecked(msg->retries,
25647 &port->counter_group[CM_XMIT_RETRIES].
25648 counter[attr_index]);
25649
25650 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25651 }
25652
25653 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25654 - atomic_long_inc(&port->counter_group[CM_RECV].
25655 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25656 counter[attr_id - CM_ATTR_ID_OFFSET]);
25657
25658 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25659 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25660 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25661
25662 return sprintf(buf, "%ld\n",
25663 - atomic_long_read(&group->counter[cm_attr->index]));
25664 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25665 }
25666
25667 static const struct sysfs_ops cm_counter_ops = {
25668 diff -urNp linux-3.0.4/drivers/infiniband/core/fmr_pool.c linux-3.0.4/drivers/infiniband/core/fmr_pool.c
25669 --- linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
25670 +++ linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
25671 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25672
25673 struct task_struct *thread;
25674
25675 - atomic_t req_ser;
25676 - atomic_t flush_ser;
25677 + atomic_unchecked_t req_ser;
25678 + atomic_unchecked_t flush_ser;
25679
25680 wait_queue_head_t force_wait;
25681 };
25682 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25683 struct ib_fmr_pool *pool = pool_ptr;
25684
25685 do {
25686 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25687 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25688 ib_fmr_batch_release(pool);
25689
25690 - atomic_inc(&pool->flush_ser);
25691 + atomic_inc_unchecked(&pool->flush_ser);
25692 wake_up_interruptible(&pool->force_wait);
25693
25694 if (pool->flush_function)
25695 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25696 }
25697
25698 set_current_state(TASK_INTERRUPTIBLE);
25699 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25700 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25701 !kthread_should_stop())
25702 schedule();
25703 __set_current_state(TASK_RUNNING);
25704 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25705 pool->dirty_watermark = params->dirty_watermark;
25706 pool->dirty_len = 0;
25707 spin_lock_init(&pool->pool_lock);
25708 - atomic_set(&pool->req_ser, 0);
25709 - atomic_set(&pool->flush_ser, 0);
25710 + atomic_set_unchecked(&pool->req_ser, 0);
25711 + atomic_set_unchecked(&pool->flush_ser, 0);
25712 init_waitqueue_head(&pool->force_wait);
25713
25714 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25715 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25716 }
25717 spin_unlock_irq(&pool->pool_lock);
25718
25719 - serial = atomic_inc_return(&pool->req_ser);
25720 + serial = atomic_inc_return_unchecked(&pool->req_ser);
25721 wake_up_process(pool->thread);
25722
25723 if (wait_event_interruptible(pool->force_wait,
25724 - atomic_read(&pool->flush_ser) - serial >= 0))
25725 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25726 return -EINTR;
25727
25728 return 0;
25729 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25730 } else {
25731 list_add_tail(&fmr->list, &pool->dirty_list);
25732 if (++pool->dirty_len >= pool->dirty_watermark) {
25733 - atomic_inc(&pool->req_ser);
25734 + atomic_inc_unchecked(&pool->req_ser);
25735 wake_up_process(pool->thread);
25736 }
25737 }
25738 diff -urNp linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c
25739 --- linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
25740 +++ linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
25741 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25742 int err;
25743 struct fw_ri_tpte tpt;
25744 u32 stag_idx;
25745 - static atomic_t key;
25746 + static atomic_unchecked_t key;
25747
25748 if (c4iw_fatal_error(rdev))
25749 return -EIO;
25750 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25751 &rdev->resource.tpt_fifo_lock);
25752 if (!stag_idx)
25753 return -ENOMEM;
25754 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25755 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25756 }
25757 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25758 __func__, stag_state, type, pdid, stag_idx);
25759 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c
25760 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
25761 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
25762 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25763 struct infinipath_counters counters;
25764 struct ipath_devdata *dd;
25765
25766 + pax_track_stack();
25767 +
25768 dd = file->f_path.dentry->d_inode->i_private;
25769 dd->ipath_f_read_counters(dd, &counters);
25770
25771 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c
25772 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
25773 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
25774 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25775 struct ib_atomic_eth *ateth;
25776 struct ipath_ack_entry *e;
25777 u64 vaddr;
25778 - atomic64_t *maddr;
25779 + atomic64_unchecked_t *maddr;
25780 u64 sdata;
25781 u32 rkey;
25782 u8 next;
25783 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25784 IB_ACCESS_REMOTE_ATOMIC)))
25785 goto nack_acc_unlck;
25786 /* Perform atomic OP and save result. */
25787 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25788 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25789 sdata = be64_to_cpu(ateth->swap_data);
25790 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25791 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25792 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25793 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25794 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25795 be64_to_cpu(ateth->compare_data),
25796 sdata);
25797 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25798 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
25799 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
25800 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25801 unsigned long flags;
25802 struct ib_wc wc;
25803 u64 sdata;
25804 - atomic64_t *maddr;
25805 + atomic64_unchecked_t *maddr;
25806 enum ib_wc_status send_status;
25807
25808 /*
25809 @@ -382,11 +382,11 @@ again:
25810 IB_ACCESS_REMOTE_ATOMIC)))
25811 goto acc_err;
25812 /* Perform atomic OP and save result. */
25813 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25814 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25815 sdata = wqe->wr.wr.atomic.compare_add;
25816 *(u64 *) sqp->s_sge.sge.vaddr =
25817 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25818 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25819 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25820 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25821 sdata, wqe->wr.wr.atomic.swap);
25822 goto send_comp;
25823 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.c linux-3.0.4/drivers/infiniband/hw/nes/nes.c
25824 --- linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
25825 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
25826 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25827 LIST_HEAD(nes_adapter_list);
25828 static LIST_HEAD(nes_dev_list);
25829
25830 -atomic_t qps_destroyed;
25831 +atomic_unchecked_t qps_destroyed;
25832
25833 static unsigned int ee_flsh_adapter;
25834 static unsigned int sysfs_nonidx_addr;
25835 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25836 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25837 struct nes_adapter *nesadapter = nesdev->nesadapter;
25838
25839 - atomic_inc(&qps_destroyed);
25840 + atomic_inc_unchecked(&qps_destroyed);
25841
25842 /* Free the control structures */
25843
25844 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c
25845 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
25846 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
25847 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25848 u32 cm_packets_retrans;
25849 u32 cm_packets_created;
25850 u32 cm_packets_received;
25851 -atomic_t cm_listens_created;
25852 -atomic_t cm_listens_destroyed;
25853 +atomic_unchecked_t cm_listens_created;
25854 +atomic_unchecked_t cm_listens_destroyed;
25855 u32 cm_backlog_drops;
25856 -atomic_t cm_loopbacks;
25857 -atomic_t cm_nodes_created;
25858 -atomic_t cm_nodes_destroyed;
25859 -atomic_t cm_accel_dropped_pkts;
25860 -atomic_t cm_resets_recvd;
25861 +atomic_unchecked_t cm_loopbacks;
25862 +atomic_unchecked_t cm_nodes_created;
25863 +atomic_unchecked_t cm_nodes_destroyed;
25864 +atomic_unchecked_t cm_accel_dropped_pkts;
25865 +atomic_unchecked_t cm_resets_recvd;
25866
25867 static inline int mini_cm_accelerated(struct nes_cm_core *,
25868 struct nes_cm_node *);
25869 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25870
25871 static struct nes_cm_core *g_cm_core;
25872
25873 -atomic_t cm_connects;
25874 -atomic_t cm_accepts;
25875 -atomic_t cm_disconnects;
25876 -atomic_t cm_closes;
25877 -atomic_t cm_connecteds;
25878 -atomic_t cm_connect_reqs;
25879 -atomic_t cm_rejects;
25880 +atomic_unchecked_t cm_connects;
25881 +atomic_unchecked_t cm_accepts;
25882 +atomic_unchecked_t cm_disconnects;
25883 +atomic_unchecked_t cm_closes;
25884 +atomic_unchecked_t cm_connecteds;
25885 +atomic_unchecked_t cm_connect_reqs;
25886 +atomic_unchecked_t cm_rejects;
25887
25888
25889 /**
25890 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25891 kfree(listener);
25892 listener = NULL;
25893 ret = 0;
25894 - atomic_inc(&cm_listens_destroyed);
25895 + atomic_inc_unchecked(&cm_listens_destroyed);
25896 } else {
25897 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25898 }
25899 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25900 cm_node->rem_mac);
25901
25902 add_hte_node(cm_core, cm_node);
25903 - atomic_inc(&cm_nodes_created);
25904 + atomic_inc_unchecked(&cm_nodes_created);
25905
25906 return cm_node;
25907 }
25908 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25909 }
25910
25911 atomic_dec(&cm_core->node_cnt);
25912 - atomic_inc(&cm_nodes_destroyed);
25913 + atomic_inc_unchecked(&cm_nodes_destroyed);
25914 nesqp = cm_node->nesqp;
25915 if (nesqp) {
25916 nesqp->cm_node = NULL;
25917 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25918
25919 static void drop_packet(struct sk_buff *skb)
25920 {
25921 - atomic_inc(&cm_accel_dropped_pkts);
25922 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25923 dev_kfree_skb_any(skb);
25924 }
25925
25926 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25927 {
25928
25929 int reset = 0; /* whether to send reset in case of err.. */
25930 - atomic_inc(&cm_resets_recvd);
25931 + atomic_inc_unchecked(&cm_resets_recvd);
25932 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25933 " refcnt=%d\n", cm_node, cm_node->state,
25934 atomic_read(&cm_node->ref_count));
25935 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25936 rem_ref_cm_node(cm_node->cm_core, cm_node);
25937 return NULL;
25938 }
25939 - atomic_inc(&cm_loopbacks);
25940 + atomic_inc_unchecked(&cm_loopbacks);
25941 loopbackremotenode->loopbackpartner = cm_node;
25942 loopbackremotenode->tcp_cntxt.rcv_wscale =
25943 NES_CM_DEFAULT_RCV_WND_SCALE;
25944 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
25945 add_ref_cm_node(cm_node);
25946 } else if (cm_node->state == NES_CM_STATE_TSA) {
25947 rem_ref_cm_node(cm_core, cm_node);
25948 - atomic_inc(&cm_accel_dropped_pkts);
25949 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25950 dev_kfree_skb_any(skb);
25951 break;
25952 }
25953 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
25954
25955 if ((cm_id) && (cm_id->event_handler)) {
25956 if (issue_disconn) {
25957 - atomic_inc(&cm_disconnects);
25958 + atomic_inc_unchecked(&cm_disconnects);
25959 cm_event.event = IW_CM_EVENT_DISCONNECT;
25960 cm_event.status = disconn_status;
25961 cm_event.local_addr = cm_id->local_addr;
25962 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
25963 }
25964
25965 if (issue_close) {
25966 - atomic_inc(&cm_closes);
25967 + atomic_inc_unchecked(&cm_closes);
25968 nes_disconnect(nesqp, 1);
25969
25970 cm_id->provider_data = nesqp;
25971 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
25972
25973 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
25974 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
25975 - atomic_inc(&cm_accepts);
25976 + atomic_inc_unchecked(&cm_accepts);
25977
25978 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
25979 netdev_refcnt_read(nesvnic->netdev));
25980 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
25981
25982 struct nes_cm_core *cm_core;
25983
25984 - atomic_inc(&cm_rejects);
25985 + atomic_inc_unchecked(&cm_rejects);
25986 cm_node = (struct nes_cm_node *) cm_id->provider_data;
25987 loopback = cm_node->loopbackpartner;
25988 cm_core = cm_node->cm_core;
25989 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
25990 ntohl(cm_id->local_addr.sin_addr.s_addr),
25991 ntohs(cm_id->local_addr.sin_port));
25992
25993 - atomic_inc(&cm_connects);
25994 + atomic_inc_unchecked(&cm_connects);
25995 nesqp->active_conn = 1;
25996
25997 /* cache the cm_id in the qp */
25998 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
25999 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26000 return err;
26001 }
26002 - atomic_inc(&cm_listens_created);
26003 + atomic_inc_unchecked(&cm_listens_created);
26004 }
26005
26006 cm_id->add_ref(cm_id);
26007 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26008 if (nesqp->destroyed) {
26009 return;
26010 }
26011 - atomic_inc(&cm_connecteds);
26012 + atomic_inc_unchecked(&cm_connecteds);
26013 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26014 " local port 0x%04X. jiffies = %lu.\n",
26015 nesqp->hwqp.qp_id,
26016 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26017
26018 cm_id->add_ref(cm_id);
26019 ret = cm_id->event_handler(cm_id, &cm_event);
26020 - atomic_inc(&cm_closes);
26021 + atomic_inc_unchecked(&cm_closes);
26022 cm_event.event = IW_CM_EVENT_CLOSE;
26023 cm_event.status = 0;
26024 cm_event.provider_data = cm_id->provider_data;
26025 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26026 return;
26027 cm_id = cm_node->cm_id;
26028
26029 - atomic_inc(&cm_connect_reqs);
26030 + atomic_inc_unchecked(&cm_connect_reqs);
26031 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26032 cm_node, cm_id, jiffies);
26033
26034 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26035 return;
26036 cm_id = cm_node->cm_id;
26037
26038 - atomic_inc(&cm_connect_reqs);
26039 + atomic_inc_unchecked(&cm_connect_reqs);
26040 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26041 cm_node, cm_id, jiffies);
26042
26043 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.h linux-3.0.4/drivers/infiniband/hw/nes/nes.h
26044 --- linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26045 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26046 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26047 extern unsigned int wqm_quanta;
26048 extern struct list_head nes_adapter_list;
26049
26050 -extern atomic_t cm_connects;
26051 -extern atomic_t cm_accepts;
26052 -extern atomic_t cm_disconnects;
26053 -extern atomic_t cm_closes;
26054 -extern atomic_t cm_connecteds;
26055 -extern atomic_t cm_connect_reqs;
26056 -extern atomic_t cm_rejects;
26057 -extern atomic_t mod_qp_timouts;
26058 -extern atomic_t qps_created;
26059 -extern atomic_t qps_destroyed;
26060 -extern atomic_t sw_qps_destroyed;
26061 +extern atomic_unchecked_t cm_connects;
26062 +extern atomic_unchecked_t cm_accepts;
26063 +extern atomic_unchecked_t cm_disconnects;
26064 +extern atomic_unchecked_t cm_closes;
26065 +extern atomic_unchecked_t cm_connecteds;
26066 +extern atomic_unchecked_t cm_connect_reqs;
26067 +extern atomic_unchecked_t cm_rejects;
26068 +extern atomic_unchecked_t mod_qp_timouts;
26069 +extern atomic_unchecked_t qps_created;
26070 +extern atomic_unchecked_t qps_destroyed;
26071 +extern atomic_unchecked_t sw_qps_destroyed;
26072 extern u32 mh_detected;
26073 extern u32 mh_pauses_sent;
26074 extern u32 cm_packets_sent;
26075 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26076 extern u32 cm_packets_received;
26077 extern u32 cm_packets_dropped;
26078 extern u32 cm_packets_retrans;
26079 -extern atomic_t cm_listens_created;
26080 -extern atomic_t cm_listens_destroyed;
26081 +extern atomic_unchecked_t cm_listens_created;
26082 +extern atomic_unchecked_t cm_listens_destroyed;
26083 extern u32 cm_backlog_drops;
26084 -extern atomic_t cm_loopbacks;
26085 -extern atomic_t cm_nodes_created;
26086 -extern atomic_t cm_nodes_destroyed;
26087 -extern atomic_t cm_accel_dropped_pkts;
26088 -extern atomic_t cm_resets_recvd;
26089 +extern atomic_unchecked_t cm_loopbacks;
26090 +extern atomic_unchecked_t cm_nodes_created;
26091 +extern atomic_unchecked_t cm_nodes_destroyed;
26092 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26093 +extern atomic_unchecked_t cm_resets_recvd;
26094
26095 extern u32 int_mod_timer_init;
26096 extern u32 int_mod_cq_depth_256;
26097 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c
26098 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26099 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26100 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26101 target_stat_values[++index] = mh_detected;
26102 target_stat_values[++index] = mh_pauses_sent;
26103 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26104 - target_stat_values[++index] = atomic_read(&cm_connects);
26105 - target_stat_values[++index] = atomic_read(&cm_accepts);
26106 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26107 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26108 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26109 - target_stat_values[++index] = atomic_read(&cm_rejects);
26110 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26111 - target_stat_values[++index] = atomic_read(&qps_created);
26112 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26113 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26114 - target_stat_values[++index] = atomic_read(&cm_closes);
26115 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26116 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26117 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26118 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26119 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26120 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26121 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26122 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26123 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26124 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26125 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26126 target_stat_values[++index] = cm_packets_sent;
26127 target_stat_values[++index] = cm_packets_bounced;
26128 target_stat_values[++index] = cm_packets_created;
26129 target_stat_values[++index] = cm_packets_received;
26130 target_stat_values[++index] = cm_packets_dropped;
26131 target_stat_values[++index] = cm_packets_retrans;
26132 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26133 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26134 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26135 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26136 target_stat_values[++index] = cm_backlog_drops;
26137 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26138 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26139 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26140 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26141 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26142 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26143 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26144 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26145 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26146 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26147 target_stat_values[++index] = nesadapter->free_4kpbl;
26148 target_stat_values[++index] = nesadapter->free_256pbl;
26149 target_stat_values[++index] = int_mod_timer_init;
26150 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c
26151 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26152 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26153 @@ -46,9 +46,9 @@
26154
26155 #include <rdma/ib_umem.h>
26156
26157 -atomic_t mod_qp_timouts;
26158 -atomic_t qps_created;
26159 -atomic_t sw_qps_destroyed;
26160 +atomic_unchecked_t mod_qp_timouts;
26161 +atomic_unchecked_t qps_created;
26162 +atomic_unchecked_t sw_qps_destroyed;
26163
26164 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26165
26166 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26167 if (init_attr->create_flags)
26168 return ERR_PTR(-EINVAL);
26169
26170 - atomic_inc(&qps_created);
26171 + atomic_inc_unchecked(&qps_created);
26172 switch (init_attr->qp_type) {
26173 case IB_QPT_RC:
26174 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26175 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26176 struct iw_cm_event cm_event;
26177 int ret;
26178
26179 - atomic_inc(&sw_qps_destroyed);
26180 + atomic_inc_unchecked(&sw_qps_destroyed);
26181 nesqp->destroyed = 1;
26182
26183 /* Blow away the connection if it exists. */
26184 diff -urNp linux-3.0.4/drivers/infiniband/hw/qib/qib.h linux-3.0.4/drivers/infiniband/hw/qib/qib.h
26185 --- linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26186 +++ linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26187 @@ -51,6 +51,7 @@
26188 #include <linux/completion.h>
26189 #include <linux/kref.h>
26190 #include <linux/sched.h>
26191 +#include <linux/slab.h>
26192
26193 #include "qib_common.h"
26194 #include "qib_verbs.h"
26195 diff -urNp linux-3.0.4/drivers/input/gameport/gameport.c linux-3.0.4/drivers/input/gameport/gameport.c
26196 --- linux-3.0.4/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26197 +++ linux-3.0.4/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26198 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26199 */
26200 static void gameport_init_port(struct gameport *gameport)
26201 {
26202 - static atomic_t gameport_no = ATOMIC_INIT(0);
26203 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26204
26205 __module_get(THIS_MODULE);
26206
26207 mutex_init(&gameport->drv_mutex);
26208 device_initialize(&gameport->dev);
26209 dev_set_name(&gameport->dev, "gameport%lu",
26210 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26211 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26212 gameport->dev.bus = &gameport_bus;
26213 gameport->dev.release = gameport_release_port;
26214 if (gameport->parent)
26215 diff -urNp linux-3.0.4/drivers/input/input.c linux-3.0.4/drivers/input/input.c
26216 --- linux-3.0.4/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26217 +++ linux-3.0.4/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26218 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26219 */
26220 int input_register_device(struct input_dev *dev)
26221 {
26222 - static atomic_t input_no = ATOMIC_INIT(0);
26223 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26224 struct input_handler *handler;
26225 const char *path;
26226 int error;
26227 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26228 dev->setkeycode = input_default_setkeycode;
26229
26230 dev_set_name(&dev->dev, "input%ld",
26231 - (unsigned long) atomic_inc_return(&input_no) - 1);
26232 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26233
26234 error = device_add(&dev->dev);
26235 if (error)
26236 diff -urNp linux-3.0.4/drivers/input/joystick/sidewinder.c linux-3.0.4/drivers/input/joystick/sidewinder.c
26237 --- linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26238 +++ linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26239 @@ -30,6 +30,7 @@
26240 #include <linux/kernel.h>
26241 #include <linux/module.h>
26242 #include <linux/slab.h>
26243 +#include <linux/sched.h>
26244 #include <linux/init.h>
26245 #include <linux/input.h>
26246 #include <linux/gameport.h>
26247 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26248 unsigned char buf[SW_LENGTH];
26249 int i;
26250
26251 + pax_track_stack();
26252 +
26253 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26254
26255 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26256 diff -urNp linux-3.0.4/drivers/input/joystick/xpad.c linux-3.0.4/drivers/input/joystick/xpad.c
26257 --- linux-3.0.4/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26258 +++ linux-3.0.4/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26259 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26260
26261 static int xpad_led_probe(struct usb_xpad *xpad)
26262 {
26263 - static atomic_t led_seq = ATOMIC_INIT(0);
26264 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26265 long led_no;
26266 struct xpad_led *led;
26267 struct led_classdev *led_cdev;
26268 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26269 if (!led)
26270 return -ENOMEM;
26271
26272 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26273 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26274
26275 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26276 led->xpad = xpad;
26277 diff -urNp linux-3.0.4/drivers/input/mousedev.c linux-3.0.4/drivers/input/mousedev.c
26278 --- linux-3.0.4/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26279 +++ linux-3.0.4/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26280 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26281
26282 spin_unlock_irq(&client->packet_lock);
26283
26284 - if (copy_to_user(buffer, data, count))
26285 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26286 return -EFAULT;
26287
26288 return count;
26289 diff -urNp linux-3.0.4/drivers/input/serio/serio.c linux-3.0.4/drivers/input/serio/serio.c
26290 --- linux-3.0.4/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26291 +++ linux-3.0.4/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26292 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26293 */
26294 static void serio_init_port(struct serio *serio)
26295 {
26296 - static atomic_t serio_no = ATOMIC_INIT(0);
26297 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26298
26299 __module_get(THIS_MODULE);
26300
26301 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26302 mutex_init(&serio->drv_mutex);
26303 device_initialize(&serio->dev);
26304 dev_set_name(&serio->dev, "serio%ld",
26305 - (long)atomic_inc_return(&serio_no) - 1);
26306 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26307 serio->dev.bus = &serio_bus;
26308 serio->dev.release = serio_release_port;
26309 serio->dev.groups = serio_device_attr_groups;
26310 diff -urNp linux-3.0.4/drivers/isdn/capi/capi.c linux-3.0.4/drivers/isdn/capi/capi.c
26311 --- linux-3.0.4/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26312 +++ linux-3.0.4/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26313 @@ -83,8 +83,8 @@ struct capiminor {
26314
26315 struct capi20_appl *ap;
26316 u32 ncci;
26317 - atomic_t datahandle;
26318 - atomic_t msgid;
26319 + atomic_unchecked_t datahandle;
26320 + atomic_unchecked_t msgid;
26321
26322 struct tty_port port;
26323 int ttyinstop;
26324 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26325 capimsg_setu16(s, 2, mp->ap->applid);
26326 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26327 capimsg_setu8 (s, 5, CAPI_RESP);
26328 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26329 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26330 capimsg_setu32(s, 8, mp->ncci);
26331 capimsg_setu16(s, 12, datahandle);
26332 }
26333 @@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26334 mp->outbytes -= len;
26335 spin_unlock_bh(&mp->outlock);
26336
26337 - datahandle = atomic_inc_return(&mp->datahandle);
26338 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26339 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26340 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26341 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26342 capimsg_setu16(skb->data, 2, mp->ap->applid);
26343 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26344 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26345 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26346 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26347 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26348 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26349 capimsg_setu16(skb->data, 16, len); /* Data length */
26350 diff -urNp linux-3.0.4/drivers/isdn/gigaset/common.c linux-3.0.4/drivers/isdn/gigaset/common.c
26351 --- linux-3.0.4/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26352 +++ linux-3.0.4/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26353 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26354 cs->commands_pending = 0;
26355 cs->cur_at_seq = 0;
26356 cs->gotfwver = -1;
26357 - cs->open_count = 0;
26358 + local_set(&cs->open_count, 0);
26359 cs->dev = NULL;
26360 cs->tty = NULL;
26361 cs->tty_dev = NULL;
26362 diff -urNp linux-3.0.4/drivers/isdn/gigaset/gigaset.h linux-3.0.4/drivers/isdn/gigaset/gigaset.h
26363 --- linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26364 +++ linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26365 @@ -35,6 +35,7 @@
26366 #include <linux/tty_driver.h>
26367 #include <linux/list.h>
26368 #include <asm/atomic.h>
26369 +#include <asm/local.h>
26370
26371 #define GIG_VERSION {0, 5, 0, 0}
26372 #define GIG_COMPAT {0, 4, 0, 0}
26373 @@ -433,7 +434,7 @@ struct cardstate {
26374 spinlock_t cmdlock;
26375 unsigned curlen, cmdbytes;
26376
26377 - unsigned open_count;
26378 + local_t open_count;
26379 struct tty_struct *tty;
26380 struct tasklet_struct if_wake_tasklet;
26381 unsigned control_state;
26382 diff -urNp linux-3.0.4/drivers/isdn/gigaset/interface.c linux-3.0.4/drivers/isdn/gigaset/interface.c
26383 --- linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26384 +++ linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26385 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26386 }
26387 tty->driver_data = cs;
26388
26389 - ++cs->open_count;
26390 -
26391 - if (cs->open_count == 1) {
26392 + if (local_inc_return(&cs->open_count) == 1) {
26393 spin_lock_irqsave(&cs->lock, flags);
26394 cs->tty = tty;
26395 spin_unlock_irqrestore(&cs->lock, flags);
26396 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26397
26398 if (!cs->connected)
26399 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26400 - else if (!cs->open_count)
26401 + else if (!local_read(&cs->open_count))
26402 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26403 else {
26404 - if (!--cs->open_count) {
26405 + if (!local_dec_return(&cs->open_count)) {
26406 spin_lock_irqsave(&cs->lock, flags);
26407 cs->tty = NULL;
26408 spin_unlock_irqrestore(&cs->lock, flags);
26409 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26410 if (!cs->connected) {
26411 gig_dbg(DEBUG_IF, "not connected");
26412 retval = -ENODEV;
26413 - } else if (!cs->open_count)
26414 + } else if (!local_read(&cs->open_count))
26415 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26416 else {
26417 retval = 0;
26418 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26419 retval = -ENODEV;
26420 goto done;
26421 }
26422 - if (!cs->open_count) {
26423 + if (!local_read(&cs->open_count)) {
26424 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26425 retval = -ENODEV;
26426 goto done;
26427 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26428 if (!cs->connected) {
26429 gig_dbg(DEBUG_IF, "not connected");
26430 retval = -ENODEV;
26431 - } else if (!cs->open_count)
26432 + } else if (!local_read(&cs->open_count))
26433 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26434 else if (cs->mstate != MS_LOCKED) {
26435 dev_warn(cs->dev, "can't write to unlocked device\n");
26436 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26437
26438 if (!cs->connected)
26439 gig_dbg(DEBUG_IF, "not connected");
26440 - else if (!cs->open_count)
26441 + else if (!local_read(&cs->open_count))
26442 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26443 else if (cs->mstate != MS_LOCKED)
26444 dev_warn(cs->dev, "can't write to unlocked device\n");
26445 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26446
26447 if (!cs->connected)
26448 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26449 - else if (!cs->open_count)
26450 + else if (!local_read(&cs->open_count))
26451 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26452 else
26453 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26454 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
26455
26456 if (!cs->connected)
26457 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26458 - else if (!cs->open_count)
26459 + else if (!local_read(&cs->open_count))
26460 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26461 else
26462 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26463 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
26464 goto out;
26465 }
26466
26467 - if (!cs->open_count) {
26468 + if (!local_read(&cs->open_count)) {
26469 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26470 goto out;
26471 }
26472 diff -urNp linux-3.0.4/drivers/isdn/hardware/avm/b1.c linux-3.0.4/drivers/isdn/hardware/avm/b1.c
26473 --- linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
26474 +++ linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
26475 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26476 }
26477 if (left) {
26478 if (t4file->user) {
26479 - if (copy_from_user(buf, dp, left))
26480 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26481 return -EFAULT;
26482 } else {
26483 memcpy(buf, dp, left);
26484 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26485 }
26486 if (left) {
26487 if (config->user) {
26488 - if (copy_from_user(buf, dp, left))
26489 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26490 return -EFAULT;
26491 } else {
26492 memcpy(buf, dp, left);
26493 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c
26494 --- linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
26495 +++ linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
26496 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26497 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26498 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26499
26500 + pax_track_stack();
26501
26502 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26503 {
26504 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c
26505 --- linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
26506 +++ linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
26507 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26508 IDI_SYNC_REQ req;
26509 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26510
26511 + pax_track_stack();
26512 +
26513 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26514
26515 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26516 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c
26517 --- linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
26518 +++ linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
26519 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26520 IDI_SYNC_REQ req;
26521 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26522
26523 + pax_track_stack();
26524 +
26525 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26526
26527 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26528 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c
26529 --- linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
26530 +++ linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
26531 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
26532 IDI_SYNC_REQ req;
26533 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26534
26535 + pax_track_stack();
26536 +
26537 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26538
26539 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26540 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h
26541 --- linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
26542 +++ linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
26543 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26544 } diva_didd_add_adapter_t;
26545 typedef struct _diva_didd_remove_adapter {
26546 IDI_CALL p_request;
26547 -} diva_didd_remove_adapter_t;
26548 +} __no_const diva_didd_remove_adapter_t;
26549 typedef struct _diva_didd_read_adapter_array {
26550 void * buffer;
26551 dword length;
26552 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c
26553 --- linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
26554 +++ linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
26555 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26556 IDI_SYNC_REQ req;
26557 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26558
26559 + pax_track_stack();
26560 +
26561 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26562
26563 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26564 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/message.c linux-3.0.4/drivers/isdn/hardware/eicon/message.c
26565 --- linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
26566 +++ linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
26567 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
26568 dword d;
26569 word w;
26570
26571 + pax_track_stack();
26572 +
26573 a = plci->adapter;
26574 Id = ((word)plci->Id<<8)|a->Id;
26575 PUT_WORD(&SS_Ind[4],0x0000);
26576 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
26577 word j, n, w;
26578 dword d;
26579
26580 + pax_track_stack();
26581 +
26582
26583 for(i=0;i<8;i++) bp_parms[i].length = 0;
26584 for(i=0;i<2;i++) global_config[i].length = 0;
26585 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
26586 const byte llc3[] = {4,3,2,2,6,6,0};
26587 const byte header[] = {0,2,3,3,0,0,0};
26588
26589 + pax_track_stack();
26590 +
26591 for(i=0;i<8;i++) bp_parms[i].length = 0;
26592 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26593 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26594 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
26595 word appl_number_group_type[MAX_APPL];
26596 PLCI *auxplci;
26597
26598 + pax_track_stack();
26599 +
26600 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26601
26602 if(!a->group_optimization_enabled)
26603 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c
26604 --- linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
26605 +++ linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
26606 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26607 IDI_SYNC_REQ req;
26608 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26609
26610 + pax_track_stack();
26611 +
26612 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26613
26614 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26615 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26616 --- linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
26617 +++ linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
26618 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26619 typedef struct _diva_os_idi_adapter_interface {
26620 diva_init_card_proc_t cleanup_adapter_proc;
26621 diva_cmd_card_proc_t cmd_proc;
26622 -} diva_os_idi_adapter_interface_t;
26623 +} __no_const diva_os_idi_adapter_interface_t;
26624
26625 typedef struct _diva_os_xdi_adapter {
26626 struct list_head link;
26627 diff -urNp linux-3.0.4/drivers/isdn/i4l/isdn_common.c linux-3.0.4/drivers/isdn/i4l/isdn_common.c
26628 --- linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
26629 +++ linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
26630 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
26631 } iocpar;
26632 void __user *argp = (void __user *)arg;
26633
26634 + pax_track_stack();
26635 +
26636 #define name iocpar.name
26637 #define bname iocpar.bname
26638 #define iocts iocpar.iocts
26639 diff -urNp linux-3.0.4/drivers/isdn/icn/icn.c linux-3.0.4/drivers/isdn/icn/icn.c
26640 --- linux-3.0.4/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
26641 +++ linux-3.0.4/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
26642 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26643 if (count > len)
26644 count = len;
26645 if (user) {
26646 - if (copy_from_user(msg, buf, count))
26647 + if (count > sizeof msg || copy_from_user(msg, buf, count))
26648 return -EFAULT;
26649 } else
26650 memcpy(msg, buf, count);
26651 diff -urNp linux-3.0.4/drivers/lguest/core.c linux-3.0.4/drivers/lguest/core.c
26652 --- linux-3.0.4/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
26653 +++ linux-3.0.4/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
26654 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26655 * it's worked so far. The end address needs +1 because __get_vm_area
26656 * allocates an extra guard page, so we need space for that.
26657 */
26658 +
26659 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26660 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26661 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26662 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26663 +#else
26664 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26665 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26666 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26667 +#endif
26668 +
26669 if (!switcher_vma) {
26670 err = -ENOMEM;
26671 printk("lguest: could not map switcher pages high\n");
26672 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26673 * Now the Switcher is mapped at the right address, we can't fail!
26674 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26675 */
26676 - memcpy(switcher_vma->addr, start_switcher_text,
26677 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26678 end_switcher_text - start_switcher_text);
26679
26680 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26681 diff -urNp linux-3.0.4/drivers/lguest/x86/core.c linux-3.0.4/drivers/lguest/x86/core.c
26682 --- linux-3.0.4/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
26683 +++ linux-3.0.4/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
26684 @@ -59,7 +59,7 @@ static struct {
26685 /* Offset from where switcher.S was compiled to where we've copied it */
26686 static unsigned long switcher_offset(void)
26687 {
26688 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26689 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26690 }
26691
26692 /* This cpu's struct lguest_pages. */
26693 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26694 * These copies are pretty cheap, so we do them unconditionally: */
26695 /* Save the current Host top-level page directory.
26696 */
26697 +
26698 +#ifdef CONFIG_PAX_PER_CPU_PGD
26699 + pages->state.host_cr3 = read_cr3();
26700 +#else
26701 pages->state.host_cr3 = __pa(current->mm->pgd);
26702 +#endif
26703 +
26704 /*
26705 * Set up the Guest's page tables to see this CPU's pages (and no
26706 * other CPU's pages).
26707 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26708 * compiled-in switcher code and the high-mapped copy we just made.
26709 */
26710 for (i = 0; i < IDT_ENTRIES; i++)
26711 - default_idt_entries[i] += switcher_offset();
26712 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26713
26714 /*
26715 * Set up the Switcher's per-cpu areas.
26716 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26717 * it will be undisturbed when we switch. To change %cs and jump we
26718 * need this structure to feed to Intel's "lcall" instruction.
26719 */
26720 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26721 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26722 lguest_entry.segment = LGUEST_CS;
26723
26724 /*
26725 diff -urNp linux-3.0.4/drivers/lguest/x86/switcher_32.S linux-3.0.4/drivers/lguest/x86/switcher_32.S
26726 --- linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
26727 +++ linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
26728 @@ -87,6 +87,7 @@
26729 #include <asm/page.h>
26730 #include <asm/segment.h>
26731 #include <asm/lguest.h>
26732 +#include <asm/processor-flags.h>
26733
26734 // We mark the start of the code to copy
26735 // It's placed in .text tho it's never run here
26736 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26737 // Changes type when we load it: damn Intel!
26738 // For after we switch over our page tables
26739 // That entry will be read-only: we'd crash.
26740 +
26741 +#ifdef CONFIG_PAX_KERNEXEC
26742 + mov %cr0, %edx
26743 + xor $X86_CR0_WP, %edx
26744 + mov %edx, %cr0
26745 +#endif
26746 +
26747 movl $(GDT_ENTRY_TSS*8), %edx
26748 ltr %dx
26749
26750 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26751 // Let's clear it again for our return.
26752 // The GDT descriptor of the Host
26753 // Points to the table after two "size" bytes
26754 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26755 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26756 // Clear "used" from type field (byte 5, bit 2)
26757 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26758 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26759 +
26760 +#ifdef CONFIG_PAX_KERNEXEC
26761 + mov %cr0, %eax
26762 + xor $X86_CR0_WP, %eax
26763 + mov %eax, %cr0
26764 +#endif
26765
26766 // Once our page table's switched, the Guest is live!
26767 // The Host fades as we run this final step.
26768 @@ -295,13 +309,12 @@ deliver_to_host:
26769 // I consulted gcc, and it gave
26770 // These instructions, which I gladly credit:
26771 leal (%edx,%ebx,8), %eax
26772 - movzwl (%eax),%edx
26773 - movl 4(%eax), %eax
26774 - xorw %ax, %ax
26775 - orl %eax, %edx
26776 + movl 4(%eax), %edx
26777 + movw (%eax), %dx
26778 // Now the address of the handler's in %edx
26779 // We call it now: its "iret" drops us home.
26780 - jmp *%edx
26781 + ljmp $__KERNEL_CS, $1f
26782 +1: jmp *%edx
26783
26784 // Every interrupt can come to us here
26785 // But we must truly tell each apart.
26786 diff -urNp linux-3.0.4/drivers/md/dm.c linux-3.0.4/drivers/md/dm.c
26787 --- linux-3.0.4/drivers/md/dm.c 2011-08-23 21:44:40.000000000 -0400
26788 +++ linux-3.0.4/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
26789 @@ -164,9 +164,9 @@ struct mapped_device {
26790 /*
26791 * Event handling.
26792 */
26793 - atomic_t event_nr;
26794 + atomic_unchecked_t event_nr;
26795 wait_queue_head_t eventq;
26796 - atomic_t uevent_seq;
26797 + atomic_unchecked_t uevent_seq;
26798 struct list_head uevent_list;
26799 spinlock_t uevent_lock; /* Protect access to uevent_list */
26800
26801 @@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
26802 rwlock_init(&md->map_lock);
26803 atomic_set(&md->holders, 1);
26804 atomic_set(&md->open_count, 0);
26805 - atomic_set(&md->event_nr, 0);
26806 - atomic_set(&md->uevent_seq, 0);
26807 + atomic_set_unchecked(&md->event_nr, 0);
26808 + atomic_set_unchecked(&md->uevent_seq, 0);
26809 INIT_LIST_HEAD(&md->uevent_list);
26810 spin_lock_init(&md->uevent_lock);
26811
26812 @@ -1977,7 +1977,7 @@ static void event_callback(void *context
26813
26814 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26815
26816 - atomic_inc(&md->event_nr);
26817 + atomic_inc_unchecked(&md->event_nr);
26818 wake_up(&md->eventq);
26819 }
26820
26821 @@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
26822
26823 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26824 {
26825 - return atomic_add_return(1, &md->uevent_seq);
26826 + return atomic_add_return_unchecked(1, &md->uevent_seq);
26827 }
26828
26829 uint32_t dm_get_event_nr(struct mapped_device *md)
26830 {
26831 - return atomic_read(&md->event_nr);
26832 + return atomic_read_unchecked(&md->event_nr);
26833 }
26834
26835 int dm_wait_event(struct mapped_device *md, int event_nr)
26836 {
26837 return wait_event_interruptible(md->eventq,
26838 - (event_nr != atomic_read(&md->event_nr)));
26839 + (event_nr != atomic_read_unchecked(&md->event_nr)));
26840 }
26841
26842 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26843 diff -urNp linux-3.0.4/drivers/md/dm-ioctl.c linux-3.0.4/drivers/md/dm-ioctl.c
26844 --- linux-3.0.4/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
26845 +++ linux-3.0.4/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
26846 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26847 cmd == DM_LIST_VERSIONS_CMD)
26848 return 0;
26849
26850 - if ((cmd == DM_DEV_CREATE_CMD)) {
26851 + if (cmd == DM_DEV_CREATE_CMD) {
26852 if (!*param->name) {
26853 DMWARN("name not supplied when creating device");
26854 return -EINVAL;
26855 diff -urNp linux-3.0.4/drivers/md/dm-raid1.c linux-3.0.4/drivers/md/dm-raid1.c
26856 --- linux-3.0.4/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
26857 +++ linux-3.0.4/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
26858 @@ -40,7 +40,7 @@ enum dm_raid1_error {
26859
26860 struct mirror {
26861 struct mirror_set *ms;
26862 - atomic_t error_count;
26863 + atomic_unchecked_t error_count;
26864 unsigned long error_type;
26865 struct dm_dev *dev;
26866 sector_t offset;
26867 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
26868 struct mirror *m;
26869
26870 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26871 - if (!atomic_read(&m->error_count))
26872 + if (!atomic_read_unchecked(&m->error_count))
26873 return m;
26874
26875 return NULL;
26876 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
26877 * simple way to tell if a device has encountered
26878 * errors.
26879 */
26880 - atomic_inc(&m->error_count);
26881 + atomic_inc_unchecked(&m->error_count);
26882
26883 if (test_and_set_bit(error_type, &m->error_type))
26884 return;
26885 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
26886 struct mirror *m = get_default_mirror(ms);
26887
26888 do {
26889 - if (likely(!atomic_read(&m->error_count)))
26890 + if (likely(!atomic_read_unchecked(&m->error_count)))
26891 return m;
26892
26893 if (m-- == ms->mirror)
26894 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
26895 {
26896 struct mirror *default_mirror = get_default_mirror(m->ms);
26897
26898 - return !atomic_read(&default_mirror->error_count);
26899 + return !atomic_read_unchecked(&default_mirror->error_count);
26900 }
26901
26902 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26903 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
26904 */
26905 if (likely(region_in_sync(ms, region, 1)))
26906 m = choose_mirror(ms, bio->bi_sector);
26907 - else if (m && atomic_read(&m->error_count))
26908 + else if (m && atomic_read_unchecked(&m->error_count))
26909 m = NULL;
26910
26911 if (likely(m))
26912 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
26913 }
26914
26915 ms->mirror[mirror].ms = ms;
26916 - atomic_set(&(ms->mirror[mirror].error_count), 0);
26917 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26918 ms->mirror[mirror].error_type = 0;
26919 ms->mirror[mirror].offset = offset;
26920
26921 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26922 */
26923 static char device_status_char(struct mirror *m)
26924 {
26925 - if (!atomic_read(&(m->error_count)))
26926 + if (!atomic_read_unchecked(&(m->error_count)))
26927 return 'A';
26928
26929 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26930 diff -urNp linux-3.0.4/drivers/md/dm-stripe.c linux-3.0.4/drivers/md/dm-stripe.c
26931 --- linux-3.0.4/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
26932 +++ linux-3.0.4/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
26933 @@ -20,7 +20,7 @@ struct stripe {
26934 struct dm_dev *dev;
26935 sector_t physical_start;
26936
26937 - atomic_t error_count;
26938 + atomic_unchecked_t error_count;
26939 };
26940
26941 struct stripe_c {
26942 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
26943 kfree(sc);
26944 return r;
26945 }
26946 - atomic_set(&(sc->stripe[i].error_count), 0);
26947 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
26948 }
26949
26950 ti->private = sc;
26951 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
26952 DMEMIT("%d ", sc->stripes);
26953 for (i = 0; i < sc->stripes; i++) {
26954 DMEMIT("%s ", sc->stripe[i].dev->name);
26955 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
26956 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
26957 'D' : 'A';
26958 }
26959 buffer[i] = '\0';
26960 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
26961 */
26962 for (i = 0; i < sc->stripes; i++)
26963 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
26964 - atomic_inc(&(sc->stripe[i].error_count));
26965 - if (atomic_read(&(sc->stripe[i].error_count)) <
26966 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
26967 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
26968 DM_IO_ERROR_THRESHOLD)
26969 schedule_work(&sc->trigger_event);
26970 }
26971 diff -urNp linux-3.0.4/drivers/md/dm-table.c linux-3.0.4/drivers/md/dm-table.c
26972 --- linux-3.0.4/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
26973 +++ linux-3.0.4/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
26974 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
26975 if (!dev_size)
26976 return 0;
26977
26978 - if ((start >= dev_size) || (start + len > dev_size)) {
26979 + if ((start >= dev_size) || (len > dev_size - start)) {
26980 DMWARN("%s: %s too small for target: "
26981 "start=%llu, len=%llu, dev_size=%llu",
26982 dm_device_name(ti->table->md), bdevname(bdev, b),
26983 diff -urNp linux-3.0.4/drivers/md/md.c linux-3.0.4/drivers/md/md.c
26984 --- linux-3.0.4/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
26985 +++ linux-3.0.4/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
26986 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
26987 * start build, activate spare
26988 */
26989 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
26990 -static atomic_t md_event_count;
26991 +static atomic_unchecked_t md_event_count;
26992 void md_new_event(mddev_t *mddev)
26993 {
26994 - atomic_inc(&md_event_count);
26995 + atomic_inc_unchecked(&md_event_count);
26996 wake_up(&md_event_waiters);
26997 }
26998 EXPORT_SYMBOL_GPL(md_new_event);
26999 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27000 */
27001 static void md_new_event_inintr(mddev_t *mddev)
27002 {
27003 - atomic_inc(&md_event_count);
27004 + atomic_inc_unchecked(&md_event_count);
27005 wake_up(&md_event_waiters);
27006 }
27007
27008 @@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
27009
27010 rdev->preferred_minor = 0xffff;
27011 rdev->data_offset = le64_to_cpu(sb->data_offset);
27012 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27013 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27014
27015 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27016 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27017 @@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27018 else
27019 sb->resync_offset = cpu_to_le64(0);
27020
27021 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27022 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27023
27024 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27025 sb->size = cpu_to_le64(mddev->dev_sectors);
27026 @@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27027 static ssize_t
27028 errors_show(mdk_rdev_t *rdev, char *page)
27029 {
27030 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27031 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27032 }
27033
27034 static ssize_t
27035 @@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27036 char *e;
27037 unsigned long n = simple_strtoul(buf, &e, 10);
27038 if (*buf && (*e == 0 || *e == '\n')) {
27039 - atomic_set(&rdev->corrected_errors, n);
27040 + atomic_set_unchecked(&rdev->corrected_errors, n);
27041 return len;
27042 }
27043 return -EINVAL;
27044 @@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27045 rdev->last_read_error.tv_sec = 0;
27046 rdev->last_read_error.tv_nsec = 0;
27047 atomic_set(&rdev->nr_pending, 0);
27048 - atomic_set(&rdev->read_errors, 0);
27049 - atomic_set(&rdev->corrected_errors, 0);
27050 + atomic_set_unchecked(&rdev->read_errors, 0);
27051 + atomic_set_unchecked(&rdev->corrected_errors, 0);
27052
27053 INIT_LIST_HEAD(&rdev->same_set);
27054 init_waitqueue_head(&rdev->blocked_wait);
27055 @@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27056
27057 spin_unlock(&pers_lock);
27058 seq_printf(seq, "\n");
27059 - mi->event = atomic_read(&md_event_count);
27060 + mi->event = atomic_read_unchecked(&md_event_count);
27061 return 0;
27062 }
27063 if (v == (void*)2) {
27064 @@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27065 chunk_kb ? "KB" : "B");
27066 if (bitmap->file) {
27067 seq_printf(seq, ", file: ");
27068 - seq_path(seq, &bitmap->file->f_path, " \t\n");
27069 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27070 }
27071
27072 seq_printf(seq, "\n");
27073 @@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27074 else {
27075 struct seq_file *p = file->private_data;
27076 p->private = mi;
27077 - mi->event = atomic_read(&md_event_count);
27078 + mi->event = atomic_read_unchecked(&md_event_count);
27079 }
27080 return error;
27081 }
27082 @@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27083 /* always allow read */
27084 mask = POLLIN | POLLRDNORM;
27085
27086 - if (mi->event != atomic_read(&md_event_count))
27087 + if (mi->event != atomic_read_unchecked(&md_event_count))
27088 mask |= POLLERR | POLLPRI;
27089 return mask;
27090 }
27091 @@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27092 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27093 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27094 (int)part_stat_read(&disk->part0, sectors[1]) -
27095 - atomic_read(&disk->sync_io);
27096 + atomic_read_unchecked(&disk->sync_io);
27097 /* sync IO will cause sync_io to increase before the disk_stats
27098 * as sync_io is counted when a request starts, and
27099 * disk_stats is counted when it completes.
27100 diff -urNp linux-3.0.4/drivers/md/md.h linux-3.0.4/drivers/md/md.h
27101 --- linux-3.0.4/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27102 +++ linux-3.0.4/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27103 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27104 * only maintained for arrays that
27105 * support hot removal
27106 */
27107 - atomic_t read_errors; /* number of consecutive read errors that
27108 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27109 * we have tried to ignore.
27110 */
27111 struct timespec last_read_error; /* monotonic time since our
27112 * last read error
27113 */
27114 - atomic_t corrected_errors; /* number of corrected read errors,
27115 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27116 * for reporting to userspace and storing
27117 * in superblock.
27118 */
27119 @@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27120
27121 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27122 {
27123 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27124 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27125 }
27126
27127 struct mdk_personality
27128 diff -urNp linux-3.0.4/drivers/md/raid10.c linux-3.0.4/drivers/md/raid10.c
27129 --- linux-3.0.4/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27130 +++ linux-3.0.4/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27131 @@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27132 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27133 set_bit(R10BIO_Uptodate, &r10_bio->state);
27134 else {
27135 - atomic_add(r10_bio->sectors,
27136 + atomic_add_unchecked(r10_bio->sectors,
27137 &conf->mirrors[d].rdev->corrected_errors);
27138 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27139 md_error(r10_bio->mddev,
27140 @@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27141 {
27142 struct timespec cur_time_mon;
27143 unsigned long hours_since_last;
27144 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27145 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27146
27147 ktime_get_ts(&cur_time_mon);
27148
27149 @@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27150 * overflowing the shift of read_errors by hours_since_last.
27151 */
27152 if (hours_since_last >= 8 * sizeof(read_errors))
27153 - atomic_set(&rdev->read_errors, 0);
27154 + atomic_set_unchecked(&rdev->read_errors, 0);
27155 else
27156 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27157 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27158 }
27159
27160 /*
27161 @@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27162 return;
27163
27164 check_decay_read_errors(mddev, rdev);
27165 - atomic_inc(&rdev->read_errors);
27166 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
27167 + atomic_inc_unchecked(&rdev->read_errors);
27168 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27169 char b[BDEVNAME_SIZE];
27170 bdevname(rdev->bdev, b);
27171
27172 @@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27173 "md/raid10:%s: %s: Raid device exceeded "
27174 "read_error threshold [cur %d:max %d]\n",
27175 mdname(mddev), b,
27176 - atomic_read(&rdev->read_errors), max_read_errors);
27177 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27178 printk(KERN_NOTICE
27179 "md/raid10:%s: %s: Failing raid device\n",
27180 mdname(mddev), b);
27181 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27182 test_bit(In_sync, &rdev->flags)) {
27183 atomic_inc(&rdev->nr_pending);
27184 rcu_read_unlock();
27185 - atomic_add(s, &rdev->corrected_errors);
27186 + atomic_add_unchecked(s, &rdev->corrected_errors);
27187 if (sync_page_io(rdev,
27188 r10_bio->devs[sl].addr +
27189 sect,
27190 diff -urNp linux-3.0.4/drivers/md/raid1.c linux-3.0.4/drivers/md/raid1.c
27191 --- linux-3.0.4/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27192 +++ linux-3.0.4/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27193 @@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27194 rdev_dec_pending(rdev, mddev);
27195 md_error(mddev, rdev);
27196 } else
27197 - atomic_add(s, &rdev->corrected_errors);
27198 + atomic_add_unchecked(s, &rdev->corrected_errors);
27199 }
27200 d = start;
27201 while (d != r1_bio->read_disk) {
27202 @@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27203 /* Well, this device is dead */
27204 md_error(mddev, rdev);
27205 else {
27206 - atomic_add(s, &rdev->corrected_errors);
27207 + atomic_add_unchecked(s, &rdev->corrected_errors);
27208 printk(KERN_INFO
27209 "md/raid1:%s: read error corrected "
27210 "(%d sectors at %llu on %s)\n",
27211 diff -urNp linux-3.0.4/drivers/md/raid5.c linux-3.0.4/drivers/md/raid5.c
27212 --- linux-3.0.4/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27213 +++ linux-3.0.4/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27214 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27215 bi->bi_next = NULL;
27216 if ((rw & WRITE) &&
27217 test_bit(R5_ReWrite, &sh->dev[i].flags))
27218 - atomic_add(STRIPE_SECTORS,
27219 + atomic_add_unchecked(STRIPE_SECTORS,
27220 &rdev->corrected_errors);
27221 generic_make_request(bi);
27222 } else {
27223 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27224 clear_bit(R5_ReadError, &sh->dev[i].flags);
27225 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27226 }
27227 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27228 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27229 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27230 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27231 } else {
27232 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27233 int retry = 0;
27234 rdev = conf->disks[i].rdev;
27235
27236 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27237 - atomic_inc(&rdev->read_errors);
27238 + atomic_inc_unchecked(&rdev->read_errors);
27239 if (conf->mddev->degraded >= conf->max_degraded)
27240 printk_rl(KERN_WARNING
27241 "md/raid:%s: read error not correctable "
27242 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27243 (unsigned long long)(sh->sector
27244 + rdev->data_offset),
27245 bdn);
27246 - else if (atomic_read(&rdev->read_errors)
27247 + else if (atomic_read_unchecked(&rdev->read_errors)
27248 > conf->max_nr_stripes)
27249 printk(KERN_WARNING
27250 "md/raid:%s: Too many read errors, failing device %s.\n",
27251 @@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27252 sector_t r_sector;
27253 struct stripe_head sh2;
27254
27255 + pax_track_stack();
27256
27257 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27258 stripe = new_sector;
27259 diff -urNp linux-3.0.4/drivers/media/common/saa7146_hlp.c linux-3.0.4/drivers/media/common/saa7146_hlp.c
27260 --- linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27261 +++ linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27262 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27263
27264 int x[32], y[32], w[32], h[32];
27265
27266 + pax_track_stack();
27267 +
27268 /* clear out memory */
27269 memset(&line_list[0], 0x00, sizeof(u32)*32);
27270 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27271 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27272 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27273 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27274 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27275 u8 buf[HOST_LINK_BUF_SIZE];
27276 int i;
27277
27278 + pax_track_stack();
27279 +
27280 dprintk("%s\n", __func__);
27281
27282 /* check if we have space for a link buf in the rx_buffer */
27283 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27284 unsigned long timeout;
27285 int written;
27286
27287 + pax_track_stack();
27288 +
27289 dprintk("%s\n", __func__);
27290
27291 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27292 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h
27293 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
27294 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
27295 @@ -68,12 +68,12 @@ struct dvb_demux_feed {
27296 union {
27297 struct dmx_ts_feed ts;
27298 struct dmx_section_feed sec;
27299 - } feed;
27300 + } __no_const feed;
27301
27302 union {
27303 dmx_ts_cb ts;
27304 dmx_section_cb sec;
27305 - } cb;
27306 + } __no_const cb;
27307
27308 struct dvb_demux *demux;
27309 void *priv;
27310 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c
27311 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
27312 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
27313 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27314 const struct dvb_device *template, void *priv, int type)
27315 {
27316 struct dvb_device *dvbdev;
27317 - struct file_operations *dvbdevfops;
27318 + file_operations_no_const *dvbdevfops;
27319 struct device *clsdev;
27320 int minor;
27321 int id;
27322 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c
27323 --- linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
27324 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
27325 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27326 struct dib0700_adapter_state {
27327 int (*set_param_save) (struct dvb_frontend *,
27328 struct dvb_frontend_parameters *);
27329 -};
27330 +} __no_const;
27331
27332 static int dib7070_set_param_override(struct dvb_frontend *fe,
27333 struct dvb_frontend_parameters *fep)
27334 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27335 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27336 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27337 @@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27338 if (!buf)
27339 return -ENOMEM;
27340
27341 + pax_track_stack();
27342 +
27343 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27344 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27345 hx.addr, hx.len, hx.chk);
27346 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h
27347 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
27348 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
27349 @@ -97,7 +97,7 @@
27350 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
27351
27352 struct dibusb_state {
27353 - struct dib_fe_xfer_ops ops;
27354 + dib_fe_xfer_ops_no_const ops;
27355 int mt2060_present;
27356 u8 tuner_addr;
27357 };
27358 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c
27359 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
27360 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
27361 @@ -95,7 +95,7 @@ struct su3000_state {
27362
27363 struct s6x0_state {
27364 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27365 -};
27366 +} __no_const;
27367
27368 /* debug */
27369 static int dvb_usb_dw2102_debug;
27370 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c
27371 --- linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27372 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27373 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27374 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27375 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27376
27377 + pax_track_stack();
27378
27379 data[0] = 0x8a;
27380 len_in = 1;
27381 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27382 int ret = 0, len_in;
27383 u8 data[512] = {0};
27384
27385 + pax_track_stack();
27386 +
27387 data[0] = 0x0a;
27388 len_in = 1;
27389 info("FRM Firmware Cold Reset");
27390 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000.h linux-3.0.4/drivers/media/dvb/frontends/dib3000.h
27391 --- linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
27392 +++ linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
27393 @@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27394 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27395 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27396 };
27397 +typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27398
27399 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27400 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27401 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27402 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27403 #else
27404 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27405 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27406 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c
27407 --- linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27408 +++ linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27409 @@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27410 static struct dvb_frontend_ops dib3000mb_ops;
27411
27412 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27413 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27414 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27415 {
27416 struct dib3000_state* state = NULL;
27417
27418 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c
27419 --- linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27420 +++ linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27421 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27422 int ret = -1;
27423 int sync;
27424
27425 + pax_track_stack();
27426 +
27427 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27428
27429 fcp = 3000;
27430 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/or51211.c linux-3.0.4/drivers/media/dvb/frontends/or51211.c
27431 --- linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27432 +++ linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27433 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27434 u8 tudata[585];
27435 int i;
27436
27437 + pax_track_stack();
27438 +
27439 dprintk("Firmware is %zd bytes\n",fw->size);
27440
27441 /* Get eprom data */
27442 diff -urNp linux-3.0.4/drivers/media/video/cx18/cx18-driver.c linux-3.0.4/drivers/media/video/cx18/cx18-driver.c
27443 --- linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27444 +++ linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27445 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27446 struct i2c_client c;
27447 u8 eedata[256];
27448
27449 + pax_track_stack();
27450 +
27451 memset(&c, 0, sizeof(c));
27452 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27453 c.adapter = &cx->i2c_adap[0];
27454 diff -urNp linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c
27455 --- linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
27456 +++ linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
27457 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27458 bool handle = false;
27459 struct ir_raw_event ir_core_event[64];
27460
27461 + pax_track_stack();
27462 +
27463 do {
27464 num = 0;
27465 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27466 diff -urNp linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27467 --- linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
27468 +++ linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
27469 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27470 u8 *eeprom;
27471 struct tveeprom tvdata;
27472
27473 + pax_track_stack();
27474 +
27475 memset(&tvdata,0,sizeof(tvdata));
27476
27477 eeprom = pvr2_eeprom_fetch(hdw);
27478 diff -urNp linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c
27479 --- linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
27480 +++ linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
27481 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27482 unsigned char localPAT[256];
27483 unsigned char localPMT[256];
27484
27485 + pax_track_stack();
27486 +
27487 /* Set video format - must be done first as it resets other settings */
27488 set_reg8(client, 0x41, h->video_format);
27489
27490 diff -urNp linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c
27491 --- linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
27492 +++ linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
27493 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27494 u8 tmp[512];
27495 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27496
27497 + pax_track_stack();
27498 +
27499 /* While any outstand message on the bus exists... */
27500 do {
27501
27502 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27503 u8 tmp[512];
27504 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27505
27506 + pax_track_stack();
27507 +
27508 while (loop) {
27509
27510 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27511 diff -urNp linux-3.0.4/drivers/media/video/timblogiw.c linux-3.0.4/drivers/media/video/timblogiw.c
27512 --- linux-3.0.4/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
27513 +++ linux-3.0.4/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
27514 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
27515
27516 /* Platform device functions */
27517
27518 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27519 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
27520 .vidioc_querycap = timblogiw_querycap,
27521 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27522 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27523 diff -urNp linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c
27524 --- linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
27525 +++ linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
27526 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
27527 unsigned char rv, gv, bv;
27528 static unsigned char *Y, *U, *V;
27529
27530 + pax_track_stack();
27531 +
27532 frame = usbvision->cur_frame;
27533 image_size = frame->frmwidth * frame->frmheight;
27534 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27535 diff -urNp linux-3.0.4/drivers/media/video/videobuf-dma-sg.c linux-3.0.4/drivers/media/video/videobuf-dma-sg.c
27536 --- linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
27537 +++ linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
27538 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27539 {
27540 struct videobuf_queue q;
27541
27542 + pax_track_stack();
27543 +
27544 /* Required to make generic handler to call __videobuf_alloc */
27545 q.int_ops = &sg_ops;
27546
27547 diff -urNp linux-3.0.4/drivers/message/fusion/mptbase.c linux-3.0.4/drivers/message/fusion/mptbase.c
27548 --- linux-3.0.4/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
27549 +++ linux-3.0.4/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
27550 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
27551 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27552 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27553
27554 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27555 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27556 +#else
27557 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27558 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27559 +#endif
27560 +
27561 /*
27562 * Rounding UP to nearest 4-kB boundary here...
27563 */
27564 diff -urNp linux-3.0.4/drivers/message/fusion/mptsas.c linux-3.0.4/drivers/message/fusion/mptsas.c
27565 --- linux-3.0.4/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
27566 +++ linux-3.0.4/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
27567 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27568 return 0;
27569 }
27570
27571 +static inline void
27572 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27573 +{
27574 + if (phy_info->port_details) {
27575 + phy_info->port_details->rphy = rphy;
27576 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27577 + ioc->name, rphy));
27578 + }
27579 +
27580 + if (rphy) {
27581 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27582 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27583 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27584 + ioc->name, rphy, rphy->dev.release));
27585 + }
27586 +}
27587 +
27588 /* no mutex */
27589 static void
27590 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27591 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27592 return NULL;
27593 }
27594
27595 -static inline void
27596 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27597 -{
27598 - if (phy_info->port_details) {
27599 - phy_info->port_details->rphy = rphy;
27600 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27601 - ioc->name, rphy));
27602 - }
27603 -
27604 - if (rphy) {
27605 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27606 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27607 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27608 - ioc->name, rphy, rphy->dev.release));
27609 - }
27610 -}
27611 -
27612 static inline struct sas_port *
27613 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27614 {
27615 diff -urNp linux-3.0.4/drivers/message/fusion/mptscsih.c linux-3.0.4/drivers/message/fusion/mptscsih.c
27616 --- linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
27617 +++ linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
27618 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27619
27620 h = shost_priv(SChost);
27621
27622 - if (h) {
27623 - if (h->info_kbuf == NULL)
27624 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27625 - return h->info_kbuf;
27626 - h->info_kbuf[0] = '\0';
27627 + if (!h)
27628 + return NULL;
27629
27630 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27631 - h->info_kbuf[size-1] = '\0';
27632 - }
27633 + if (h->info_kbuf == NULL)
27634 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27635 + return h->info_kbuf;
27636 + h->info_kbuf[0] = '\0';
27637 +
27638 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27639 + h->info_kbuf[size-1] = '\0';
27640
27641 return h->info_kbuf;
27642 }
27643 diff -urNp linux-3.0.4/drivers/message/i2o/i2o_config.c linux-3.0.4/drivers/message/i2o/i2o_config.c
27644 --- linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
27645 +++ linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
27646 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27647 struct i2o_message *msg;
27648 unsigned int iop;
27649
27650 + pax_track_stack();
27651 +
27652 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27653 return -EFAULT;
27654
27655 diff -urNp linux-3.0.4/drivers/message/i2o/i2o_proc.c linux-3.0.4/drivers/message/i2o/i2o_proc.c
27656 --- linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
27657 +++ linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
27658 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27659 "Array Controller Device"
27660 };
27661
27662 -static char *chtostr(u8 * chars, int n)
27663 -{
27664 - char tmp[256];
27665 - tmp[0] = 0;
27666 - return strncat(tmp, (char *)chars, n);
27667 -}
27668 -
27669 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27670 char *group)
27671 {
27672 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27673
27674 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27675 seq_printf(seq, "%-#8x", ddm_table.module_id);
27676 - seq_printf(seq, "%-29s",
27677 - chtostr(ddm_table.module_name_version, 28));
27678 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27679 seq_printf(seq, "%9d ", ddm_table.data_size);
27680 seq_printf(seq, "%8d", ddm_table.code_size);
27681
27682 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27683
27684 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27685 seq_printf(seq, "%-#8x", dst->module_id);
27686 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27687 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27688 + seq_printf(seq, "%-.28s", dst->module_name_version);
27689 + seq_printf(seq, "%-.8s", dst->date);
27690 seq_printf(seq, "%8d ", dst->module_size);
27691 seq_printf(seq, "%8d ", dst->mpb_size);
27692 seq_printf(seq, "0x%04x", dst->module_flags);
27693 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27694 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27695 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27696 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27697 - seq_printf(seq, "Vendor info : %s\n",
27698 - chtostr((u8 *) (work32 + 2), 16));
27699 - seq_printf(seq, "Product info : %s\n",
27700 - chtostr((u8 *) (work32 + 6), 16));
27701 - seq_printf(seq, "Description : %s\n",
27702 - chtostr((u8 *) (work32 + 10), 16));
27703 - seq_printf(seq, "Product rev. : %s\n",
27704 - chtostr((u8 *) (work32 + 14), 8));
27705 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27706 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27707 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27708 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27709
27710 seq_printf(seq, "Serial number : ");
27711 print_serial_number(seq, (u8 *) (work32 + 16),
27712 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27713 }
27714
27715 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27716 - seq_printf(seq, "Module name : %s\n",
27717 - chtostr(result.module_name, 24));
27718 - seq_printf(seq, "Module revision : %s\n",
27719 - chtostr(result.module_rev, 8));
27720 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
27721 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27722
27723 seq_printf(seq, "Serial number : ");
27724 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27725 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27726 return 0;
27727 }
27728
27729 - seq_printf(seq, "Device name : %s\n",
27730 - chtostr(result.device_name, 64));
27731 - seq_printf(seq, "Service name : %s\n",
27732 - chtostr(result.service_name, 64));
27733 - seq_printf(seq, "Physical name : %s\n",
27734 - chtostr(result.physical_location, 64));
27735 - seq_printf(seq, "Instance number : %s\n",
27736 - chtostr(result.instance_number, 4));
27737 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
27738 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
27739 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27740 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27741
27742 return 0;
27743 }
27744 diff -urNp linux-3.0.4/drivers/message/i2o/iop.c linux-3.0.4/drivers/message/i2o/iop.c
27745 --- linux-3.0.4/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
27746 +++ linux-3.0.4/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
27747 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27748
27749 spin_lock_irqsave(&c->context_list_lock, flags);
27750
27751 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27752 - atomic_inc(&c->context_list_counter);
27753 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27754 + atomic_inc_unchecked(&c->context_list_counter);
27755
27756 - entry->context = atomic_read(&c->context_list_counter);
27757 + entry->context = atomic_read_unchecked(&c->context_list_counter);
27758
27759 list_add(&entry->list, &c->context_list);
27760
27761 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27762
27763 #if BITS_PER_LONG == 64
27764 spin_lock_init(&c->context_list_lock);
27765 - atomic_set(&c->context_list_counter, 0);
27766 + atomic_set_unchecked(&c->context_list_counter, 0);
27767 INIT_LIST_HEAD(&c->context_list);
27768 #endif
27769
27770 diff -urNp linux-3.0.4/drivers/mfd/abx500-core.c linux-3.0.4/drivers/mfd/abx500-core.c
27771 --- linux-3.0.4/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
27772 +++ linux-3.0.4/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
27773 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27774
27775 struct abx500_device_entry {
27776 struct list_head list;
27777 - struct abx500_ops ops;
27778 + abx500_ops_no_const ops;
27779 struct device *dev;
27780 };
27781
27782 diff -urNp linux-3.0.4/drivers/mfd/janz-cmodio.c linux-3.0.4/drivers/mfd/janz-cmodio.c
27783 --- linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
27784 +++ linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
27785 @@ -13,6 +13,7 @@
27786
27787 #include <linux/kernel.h>
27788 #include <linux/module.h>
27789 +#include <linux/slab.h>
27790 #include <linux/init.h>
27791 #include <linux/pci.h>
27792 #include <linux/interrupt.h>
27793 diff -urNp linux-3.0.4/drivers/mfd/wm8350-i2c.c linux-3.0.4/drivers/mfd/wm8350-i2c.c
27794 --- linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
27795 +++ linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
27796 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27797 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27798 int ret;
27799
27800 + pax_track_stack();
27801 +
27802 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27803 return -EINVAL;
27804
27805 diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c
27806 --- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
27807 +++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
27808 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27809 * the lid is closed. This leads to interrupts as soon as a little move
27810 * is done.
27811 */
27812 - atomic_inc(&lis3_dev.count);
27813 + atomic_inc_unchecked(&lis3_dev.count);
27814
27815 wake_up_interruptible(&lis3_dev.misc_wait);
27816 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27817 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27818 if (lis3_dev.pm_dev)
27819 pm_runtime_get_sync(lis3_dev.pm_dev);
27820
27821 - atomic_set(&lis3_dev.count, 0);
27822 + atomic_set_unchecked(&lis3_dev.count, 0);
27823 return 0;
27824 }
27825
27826 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27827 add_wait_queue(&lis3_dev.misc_wait, &wait);
27828 while (true) {
27829 set_current_state(TASK_INTERRUPTIBLE);
27830 - data = atomic_xchg(&lis3_dev.count, 0);
27831 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27832 if (data)
27833 break;
27834
27835 @@ -583,7 +583,7 @@ out:
27836 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27837 {
27838 poll_wait(file, &lis3_dev.misc_wait, wait);
27839 - if (atomic_read(&lis3_dev.count))
27840 + if (atomic_read_unchecked(&lis3_dev.count))
27841 return POLLIN | POLLRDNORM;
27842 return 0;
27843 }
27844 diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h
27845 --- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
27846 +++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
27847 @@ -265,7 +265,7 @@ struct lis3lv02d {
27848 struct input_polled_dev *idev; /* input device */
27849 struct platform_device *pdev; /* platform device */
27850 struct regulator_bulk_data regulators[2];
27851 - atomic_t count; /* interrupt count after last read */
27852 + atomic_unchecked_t count; /* interrupt count after last read */
27853 union axis_conversion ac; /* hw -> logical axis */
27854 int mapped_btns[3];
27855
27856 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c
27857 --- linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
27858 +++ linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
27859 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27860 unsigned long nsec;
27861
27862 nsec = CLKS2NSEC(clks);
27863 - atomic_long_inc(&mcs_op_statistics[op].count);
27864 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
27865 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
27866 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
27867 if (mcs_op_statistics[op].max < nsec)
27868 mcs_op_statistics[op].max = nsec;
27869 }
27870 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c
27871 --- linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
27872 +++ linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
27873 @@ -32,9 +32,9 @@
27874
27875 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
27876
27877 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
27878 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
27879 {
27880 - unsigned long val = atomic_long_read(v);
27881 + unsigned long val = atomic_long_read_unchecked(v);
27882
27883 seq_printf(s, "%16lu %s\n", val, id);
27884 }
27885 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
27886
27887 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
27888 for (op = 0; op < mcsop_last; op++) {
27889 - count = atomic_long_read(&mcs_op_statistics[op].count);
27890 - total = atomic_long_read(&mcs_op_statistics[op].total);
27891 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
27892 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
27893 max = mcs_op_statistics[op].max;
27894 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
27895 count ? total / count : 0, max);
27896 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/grutables.h linux-3.0.4/drivers/misc/sgi-gru/grutables.h
27897 --- linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
27898 +++ linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
27899 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
27900 * GRU statistics.
27901 */
27902 struct gru_stats_s {
27903 - atomic_long_t vdata_alloc;
27904 - atomic_long_t vdata_free;
27905 - atomic_long_t gts_alloc;
27906 - atomic_long_t gts_free;
27907 - atomic_long_t gms_alloc;
27908 - atomic_long_t gms_free;
27909 - atomic_long_t gts_double_allocate;
27910 - atomic_long_t assign_context;
27911 - atomic_long_t assign_context_failed;
27912 - atomic_long_t free_context;
27913 - atomic_long_t load_user_context;
27914 - atomic_long_t load_kernel_context;
27915 - atomic_long_t lock_kernel_context;
27916 - atomic_long_t unlock_kernel_context;
27917 - atomic_long_t steal_user_context;
27918 - atomic_long_t steal_kernel_context;
27919 - atomic_long_t steal_context_failed;
27920 - atomic_long_t nopfn;
27921 - atomic_long_t asid_new;
27922 - atomic_long_t asid_next;
27923 - atomic_long_t asid_wrap;
27924 - atomic_long_t asid_reuse;
27925 - atomic_long_t intr;
27926 - atomic_long_t intr_cbr;
27927 - atomic_long_t intr_tfh;
27928 - atomic_long_t intr_spurious;
27929 - atomic_long_t intr_mm_lock_failed;
27930 - atomic_long_t call_os;
27931 - atomic_long_t call_os_wait_queue;
27932 - atomic_long_t user_flush_tlb;
27933 - atomic_long_t user_unload_context;
27934 - atomic_long_t user_exception;
27935 - atomic_long_t set_context_option;
27936 - atomic_long_t check_context_retarget_intr;
27937 - atomic_long_t check_context_unload;
27938 - atomic_long_t tlb_dropin;
27939 - atomic_long_t tlb_preload_page;
27940 - atomic_long_t tlb_dropin_fail_no_asid;
27941 - atomic_long_t tlb_dropin_fail_upm;
27942 - atomic_long_t tlb_dropin_fail_invalid;
27943 - atomic_long_t tlb_dropin_fail_range_active;
27944 - atomic_long_t tlb_dropin_fail_idle;
27945 - atomic_long_t tlb_dropin_fail_fmm;
27946 - atomic_long_t tlb_dropin_fail_no_exception;
27947 - atomic_long_t tfh_stale_on_fault;
27948 - atomic_long_t mmu_invalidate_range;
27949 - atomic_long_t mmu_invalidate_page;
27950 - atomic_long_t flush_tlb;
27951 - atomic_long_t flush_tlb_gru;
27952 - atomic_long_t flush_tlb_gru_tgh;
27953 - atomic_long_t flush_tlb_gru_zero_asid;
27954 -
27955 - atomic_long_t copy_gpa;
27956 - atomic_long_t read_gpa;
27957 -
27958 - atomic_long_t mesq_receive;
27959 - atomic_long_t mesq_receive_none;
27960 - atomic_long_t mesq_send;
27961 - atomic_long_t mesq_send_failed;
27962 - atomic_long_t mesq_noop;
27963 - atomic_long_t mesq_send_unexpected_error;
27964 - atomic_long_t mesq_send_lb_overflow;
27965 - atomic_long_t mesq_send_qlimit_reached;
27966 - atomic_long_t mesq_send_amo_nacked;
27967 - atomic_long_t mesq_send_put_nacked;
27968 - atomic_long_t mesq_page_overflow;
27969 - atomic_long_t mesq_qf_locked;
27970 - atomic_long_t mesq_qf_noop_not_full;
27971 - atomic_long_t mesq_qf_switch_head_failed;
27972 - atomic_long_t mesq_qf_unexpected_error;
27973 - atomic_long_t mesq_noop_unexpected_error;
27974 - atomic_long_t mesq_noop_lb_overflow;
27975 - atomic_long_t mesq_noop_qlimit_reached;
27976 - atomic_long_t mesq_noop_amo_nacked;
27977 - atomic_long_t mesq_noop_put_nacked;
27978 - atomic_long_t mesq_noop_page_overflow;
27979 + atomic_long_unchecked_t vdata_alloc;
27980 + atomic_long_unchecked_t vdata_free;
27981 + atomic_long_unchecked_t gts_alloc;
27982 + atomic_long_unchecked_t gts_free;
27983 + atomic_long_unchecked_t gms_alloc;
27984 + atomic_long_unchecked_t gms_free;
27985 + atomic_long_unchecked_t gts_double_allocate;
27986 + atomic_long_unchecked_t assign_context;
27987 + atomic_long_unchecked_t assign_context_failed;
27988 + atomic_long_unchecked_t free_context;
27989 + atomic_long_unchecked_t load_user_context;
27990 + atomic_long_unchecked_t load_kernel_context;
27991 + atomic_long_unchecked_t lock_kernel_context;
27992 + atomic_long_unchecked_t unlock_kernel_context;
27993 + atomic_long_unchecked_t steal_user_context;
27994 + atomic_long_unchecked_t steal_kernel_context;
27995 + atomic_long_unchecked_t steal_context_failed;
27996 + atomic_long_unchecked_t nopfn;
27997 + atomic_long_unchecked_t asid_new;
27998 + atomic_long_unchecked_t asid_next;
27999 + atomic_long_unchecked_t asid_wrap;
28000 + atomic_long_unchecked_t asid_reuse;
28001 + atomic_long_unchecked_t intr;
28002 + atomic_long_unchecked_t intr_cbr;
28003 + atomic_long_unchecked_t intr_tfh;
28004 + atomic_long_unchecked_t intr_spurious;
28005 + atomic_long_unchecked_t intr_mm_lock_failed;
28006 + atomic_long_unchecked_t call_os;
28007 + atomic_long_unchecked_t call_os_wait_queue;
28008 + atomic_long_unchecked_t user_flush_tlb;
28009 + atomic_long_unchecked_t user_unload_context;
28010 + atomic_long_unchecked_t user_exception;
28011 + atomic_long_unchecked_t set_context_option;
28012 + atomic_long_unchecked_t check_context_retarget_intr;
28013 + atomic_long_unchecked_t check_context_unload;
28014 + atomic_long_unchecked_t tlb_dropin;
28015 + atomic_long_unchecked_t tlb_preload_page;
28016 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28017 + atomic_long_unchecked_t tlb_dropin_fail_upm;
28018 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
28019 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
28020 + atomic_long_unchecked_t tlb_dropin_fail_idle;
28021 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
28022 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28023 + atomic_long_unchecked_t tfh_stale_on_fault;
28024 + atomic_long_unchecked_t mmu_invalidate_range;
28025 + atomic_long_unchecked_t mmu_invalidate_page;
28026 + atomic_long_unchecked_t flush_tlb;
28027 + atomic_long_unchecked_t flush_tlb_gru;
28028 + atomic_long_unchecked_t flush_tlb_gru_tgh;
28029 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28030 +
28031 + atomic_long_unchecked_t copy_gpa;
28032 + atomic_long_unchecked_t read_gpa;
28033 +
28034 + atomic_long_unchecked_t mesq_receive;
28035 + atomic_long_unchecked_t mesq_receive_none;
28036 + atomic_long_unchecked_t mesq_send;
28037 + atomic_long_unchecked_t mesq_send_failed;
28038 + atomic_long_unchecked_t mesq_noop;
28039 + atomic_long_unchecked_t mesq_send_unexpected_error;
28040 + atomic_long_unchecked_t mesq_send_lb_overflow;
28041 + atomic_long_unchecked_t mesq_send_qlimit_reached;
28042 + atomic_long_unchecked_t mesq_send_amo_nacked;
28043 + atomic_long_unchecked_t mesq_send_put_nacked;
28044 + atomic_long_unchecked_t mesq_page_overflow;
28045 + atomic_long_unchecked_t mesq_qf_locked;
28046 + atomic_long_unchecked_t mesq_qf_noop_not_full;
28047 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
28048 + atomic_long_unchecked_t mesq_qf_unexpected_error;
28049 + atomic_long_unchecked_t mesq_noop_unexpected_error;
28050 + atomic_long_unchecked_t mesq_noop_lb_overflow;
28051 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
28052 + atomic_long_unchecked_t mesq_noop_amo_nacked;
28053 + atomic_long_unchecked_t mesq_noop_put_nacked;
28054 + atomic_long_unchecked_t mesq_noop_page_overflow;
28055
28056 };
28057
28058 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28059 tghop_invalidate, mcsop_last};
28060
28061 struct mcs_op_statistic {
28062 - atomic_long_t count;
28063 - atomic_long_t total;
28064 + atomic_long_unchecked_t count;
28065 + atomic_long_unchecked_t total;
28066 unsigned long max;
28067 };
28068
28069 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28070
28071 #define STAT(id) do { \
28072 if (gru_options & OPT_STATS) \
28073 - atomic_long_inc(&gru_stats.id); \
28074 + atomic_long_inc_unchecked(&gru_stats.id); \
28075 } while (0)
28076
28077 #ifdef CONFIG_SGI_GRU_DEBUG
28078 diff -urNp linux-3.0.4/drivers/misc/sgi-xp/xp.h linux-3.0.4/drivers/misc/sgi-xp/xp.h
28079 --- linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
28080 +++ linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
28081 @@ -289,7 +289,7 @@ struct xpc_interface {
28082 xpc_notify_func, void *);
28083 void (*received) (short, int, void *);
28084 enum xp_retval (*partid_to_nasids) (short, void *);
28085 -};
28086 +} __no_const;
28087
28088 extern struct xpc_interface xpc_interface;
28089
28090 diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c
28091 --- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
28092 +++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
28093 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28094 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28095 unsigned long timeo = jiffies + HZ;
28096
28097 + pax_track_stack();
28098 +
28099 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28100 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28101 goto sleep;
28102 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28103 unsigned long initial_adr;
28104 int initial_len = len;
28105
28106 + pax_track_stack();
28107 +
28108 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28109 adr += chip->start;
28110 initial_adr = adr;
28111 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28112 int retries = 3;
28113 int ret;
28114
28115 + pax_track_stack();
28116 +
28117 adr += chip->start;
28118
28119 retry:
28120 diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c
28121 --- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
28122 +++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
28123 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28124 unsigned long cmd_addr;
28125 struct cfi_private *cfi = map->fldrv_priv;
28126
28127 + pax_track_stack();
28128 +
28129 adr += chip->start;
28130
28131 /* Ensure cmd read/writes are aligned. */
28132 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28133 DECLARE_WAITQUEUE(wait, current);
28134 int wbufsize, z;
28135
28136 + pax_track_stack();
28137 +
28138 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28139 if (adr & (map_bankwidth(map)-1))
28140 return -EINVAL;
28141 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28142 DECLARE_WAITQUEUE(wait, current);
28143 int ret = 0;
28144
28145 + pax_track_stack();
28146 +
28147 adr += chip->start;
28148
28149 /* Let's determine this according to the interleave only once */
28150 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28151 unsigned long timeo = jiffies + HZ;
28152 DECLARE_WAITQUEUE(wait, current);
28153
28154 + pax_track_stack();
28155 +
28156 adr += chip->start;
28157
28158 /* Let's determine this according to the interleave only once */
28159 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28160 unsigned long timeo = jiffies + HZ;
28161 DECLARE_WAITQUEUE(wait, current);
28162
28163 + pax_track_stack();
28164 +
28165 adr += chip->start;
28166
28167 /* Let's determine this according to the interleave only once */
28168 diff -urNp linux-3.0.4/drivers/mtd/devices/doc2000.c linux-3.0.4/drivers/mtd/devices/doc2000.c
28169 --- linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
28170 +++ linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
28171 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28172
28173 /* The ECC will not be calculated correctly if less than 512 is written */
28174 /* DBB-
28175 - if (len != 0x200 && eccbuf)
28176 + if (len != 0x200)
28177 printk(KERN_WARNING
28178 "ECC needs a full sector write (adr: %lx size %lx)\n",
28179 (long) to, (long) len);
28180 diff -urNp linux-3.0.4/drivers/mtd/devices/doc2001.c linux-3.0.4/drivers/mtd/devices/doc2001.c
28181 --- linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
28182 +++ linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
28183 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28184 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28185
28186 /* Don't allow read past end of device */
28187 - if (from >= this->totlen)
28188 + if (from >= this->totlen || !len)
28189 return -EINVAL;
28190
28191 /* Don't allow a single read to cross a 512-byte block boundary */
28192 diff -urNp linux-3.0.4/drivers/mtd/ftl.c linux-3.0.4/drivers/mtd/ftl.c
28193 --- linux-3.0.4/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
28194 +++ linux-3.0.4/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
28195 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28196 loff_t offset;
28197 uint16_t srcunitswap = cpu_to_le16(srcunit);
28198
28199 + pax_track_stack();
28200 +
28201 eun = &part->EUNInfo[srcunit];
28202 xfer = &part->XferInfo[xferunit];
28203 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28204 diff -urNp linux-3.0.4/drivers/mtd/inftlcore.c linux-3.0.4/drivers/mtd/inftlcore.c
28205 --- linux-3.0.4/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28206 +++ linux-3.0.4/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28207 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28208 struct inftl_oob oob;
28209 size_t retlen;
28210
28211 + pax_track_stack();
28212 +
28213 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28214 "pending=%d)\n", inftl, thisVUC, pendingblock);
28215
28216 diff -urNp linux-3.0.4/drivers/mtd/inftlmount.c linux-3.0.4/drivers/mtd/inftlmount.c
28217 --- linux-3.0.4/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28218 +++ linux-3.0.4/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28219 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28220 struct INFTLPartition *ip;
28221 size_t retlen;
28222
28223 + pax_track_stack();
28224 +
28225 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28226
28227 /*
28228 diff -urNp linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c
28229 --- linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28230 +++ linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28231 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28232 {
28233 map_word pfow_val[4];
28234
28235 + pax_track_stack();
28236 +
28237 /* Check identification string */
28238 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28239 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28240 diff -urNp linux-3.0.4/drivers/mtd/mtdchar.c linux-3.0.4/drivers/mtd/mtdchar.c
28241 --- linux-3.0.4/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28242 +++ linux-3.0.4/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28243 @@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28244 u_long size;
28245 struct mtd_info_user info;
28246
28247 + pax_track_stack();
28248 +
28249 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28250
28251 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28252 diff -urNp linux-3.0.4/drivers/mtd/nand/denali.c linux-3.0.4/drivers/mtd/nand/denali.c
28253 --- linux-3.0.4/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28254 +++ linux-3.0.4/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28255 @@ -26,6 +26,7 @@
28256 #include <linux/pci.h>
28257 #include <linux/mtd/mtd.h>
28258 #include <linux/module.h>
28259 +#include <linux/slab.h>
28260
28261 #include "denali.h"
28262
28263 diff -urNp linux-3.0.4/drivers/mtd/nftlcore.c linux-3.0.4/drivers/mtd/nftlcore.c
28264 --- linux-3.0.4/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28265 +++ linux-3.0.4/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28266 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28267 int inplace = 1;
28268 size_t retlen;
28269
28270 + pax_track_stack();
28271 +
28272 memset(BlockMap, 0xff, sizeof(BlockMap));
28273 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28274
28275 diff -urNp linux-3.0.4/drivers/mtd/nftlmount.c linux-3.0.4/drivers/mtd/nftlmount.c
28276 --- linux-3.0.4/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28277 +++ linux-3.0.4/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28278 @@ -24,6 +24,7 @@
28279 #include <asm/errno.h>
28280 #include <linux/delay.h>
28281 #include <linux/slab.h>
28282 +#include <linux/sched.h>
28283 #include <linux/mtd/mtd.h>
28284 #include <linux/mtd/nand.h>
28285 #include <linux/mtd/nftl.h>
28286 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28287 struct mtd_info *mtd = nftl->mbd.mtd;
28288 unsigned int i;
28289
28290 + pax_track_stack();
28291 +
28292 /* Assume logical EraseSize == physical erasesize for starting the scan.
28293 We'll sort it out later if we find a MediaHeader which says otherwise */
28294 /* Actually, we won't. The new DiskOnChip driver has already scanned
28295 diff -urNp linux-3.0.4/drivers/mtd/ubi/build.c linux-3.0.4/drivers/mtd/ubi/build.c
28296 --- linux-3.0.4/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28297 +++ linux-3.0.4/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28298 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28299 static int __init bytes_str_to_int(const char *str)
28300 {
28301 char *endp;
28302 - unsigned long result;
28303 + unsigned long result, scale = 1;
28304
28305 result = simple_strtoul(str, &endp, 0);
28306 if (str == endp || result >= INT_MAX) {
28307 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28308
28309 switch (*endp) {
28310 case 'G':
28311 - result *= 1024;
28312 + scale *= 1024;
28313 case 'M':
28314 - result *= 1024;
28315 + scale *= 1024;
28316 case 'K':
28317 - result *= 1024;
28318 + scale *= 1024;
28319 if (endp[1] == 'i' && endp[2] == 'B')
28320 endp += 2;
28321 case '\0':
28322 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28323 return -EINVAL;
28324 }
28325
28326 - return result;
28327 + if ((intoverflow_t)result*scale >= INT_MAX) {
28328 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28329 + str);
28330 + return -EINVAL;
28331 + }
28332 +
28333 + return result*scale;
28334 }
28335
28336 /**
28337 diff -urNp linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c
28338 --- linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28339 +++ linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28340 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28341 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28342 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28343
28344 -static struct bfa_ioc_hwif nw_hwif_ct;
28345 +static struct bfa_ioc_hwif nw_hwif_ct = {
28346 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28347 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28348 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28349 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28350 + .ioc_map_port = bfa_ioc_ct_map_port,
28351 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28352 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28353 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28354 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28355 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28356 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28357 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28358 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28359 +};
28360
28361 /**
28362 * Called from bfa_ioc_attach() to map asic specific calls.
28363 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28364 void
28365 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28366 {
28367 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28368 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28369 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28370 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28371 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28372 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28373 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28374 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28375 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28376 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28377 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28378 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28379 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28380 -
28381 ioc->ioc_hwif = &nw_hwif_ct;
28382 }
28383
28384 diff -urNp linux-3.0.4/drivers/net/bna/bnad.c linux-3.0.4/drivers/net/bna/bnad.c
28385 --- linux-3.0.4/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28386 +++ linux-3.0.4/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28387 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28388 struct bna_intr_info *intr_info =
28389 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28390 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28391 - struct bna_tx_event_cbfn tx_cbfn;
28392 + static struct bna_tx_event_cbfn tx_cbfn = {
28393 + /* Initialize the tx event handlers */
28394 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28395 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28396 + .tx_stall_cbfn = bnad_cb_tx_stall,
28397 + .tx_resume_cbfn = bnad_cb_tx_resume,
28398 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28399 + };
28400 struct bna_tx *tx;
28401 unsigned long flags;
28402
28403 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28404 tx_config->txq_depth = bnad->txq_depth;
28405 tx_config->tx_type = BNA_TX_T_REGULAR;
28406
28407 - /* Initialize the tx event handlers */
28408 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28409 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28410 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28411 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28412 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28413 -
28414 /* Get BNA's resource requirement for one tx object */
28415 spin_lock_irqsave(&bnad->bna_lock, flags);
28416 bna_tx_res_req(bnad->num_txq_per_tx,
28417 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28418 struct bna_intr_info *intr_info =
28419 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28420 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28421 - struct bna_rx_event_cbfn rx_cbfn;
28422 + static struct bna_rx_event_cbfn rx_cbfn = {
28423 + /* Initialize the Rx event handlers */
28424 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28425 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28426 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28427 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28428 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28429 + .rx_post_cbfn = bnad_cb_rx_post
28430 + };
28431 struct bna_rx *rx;
28432 unsigned long flags;
28433
28434 /* Initialize the Rx object configuration */
28435 bnad_init_rx_config(bnad, rx_config);
28436
28437 - /* Initialize the Rx event handlers */
28438 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28439 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28440 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28441 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28442 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28443 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28444 -
28445 /* Get BNA's resource requirement for one Rx object */
28446 spin_lock_irqsave(&bnad->bna_lock, flags);
28447 bna_rx_res_req(rx_config, res_info);
28448 diff -urNp linux-3.0.4/drivers/net/bnx2.c linux-3.0.4/drivers/net/bnx2.c
28449 --- linux-3.0.4/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
28450 +++ linux-3.0.4/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
28451 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28452 int rc = 0;
28453 u32 magic, csum;
28454
28455 + pax_track_stack();
28456 +
28457 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28458 goto test_nvram_done;
28459
28460 diff -urNp linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c
28461 --- linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
28462 +++ linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
28463 @@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
28464 int i, rc;
28465 u32 magic, crc;
28466
28467 + pax_track_stack();
28468 +
28469 if (BP_NOMCP(bp))
28470 return 0;
28471
28472 diff -urNp linux-3.0.4/drivers/net/cxgb3/l2t.h linux-3.0.4/drivers/net/cxgb3/l2t.h
28473 --- linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
28474 +++ linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
28475 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28476 */
28477 struct l2t_skb_cb {
28478 arp_failure_handler_func arp_failure_handler;
28479 -};
28480 +} __no_const;
28481
28482 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28483
28484 diff -urNp linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c
28485 --- linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
28486 +++ linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
28487 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
28488 unsigned int nchan = adap->params.nports;
28489 struct msix_entry entries[MAX_INGQ + 1];
28490
28491 + pax_track_stack();
28492 +
28493 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28494 entries[i].entry = i;
28495
28496 diff -urNp linux-3.0.4/drivers/net/cxgb4/t4_hw.c linux-3.0.4/drivers/net/cxgb4/t4_hw.c
28497 --- linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
28498 +++ linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
28499 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28500 u8 vpd[VPD_LEN], csum;
28501 unsigned int vpdr_len, kw_offset, id_len;
28502
28503 + pax_track_stack();
28504 +
28505 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28506 if (ret < 0)
28507 return ret;
28508 diff -urNp linux-3.0.4/drivers/net/e1000e/82571.c linux-3.0.4/drivers/net/e1000e/82571.c
28509 --- linux-3.0.4/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
28510 +++ linux-3.0.4/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
28511 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28512 {
28513 struct e1000_hw *hw = &adapter->hw;
28514 struct e1000_mac_info *mac = &hw->mac;
28515 - struct e1000_mac_operations *func = &mac->ops;
28516 + e1000_mac_operations_no_const *func = &mac->ops;
28517 u32 swsm = 0;
28518 u32 swsm2 = 0;
28519 bool force_clear_smbi = false;
28520 diff -urNp linux-3.0.4/drivers/net/e1000e/es2lan.c linux-3.0.4/drivers/net/e1000e/es2lan.c
28521 --- linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
28522 +++ linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
28523 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28524 {
28525 struct e1000_hw *hw = &adapter->hw;
28526 struct e1000_mac_info *mac = &hw->mac;
28527 - struct e1000_mac_operations *func = &mac->ops;
28528 + e1000_mac_operations_no_const *func = &mac->ops;
28529
28530 /* Set media type */
28531 switch (adapter->pdev->device) {
28532 diff -urNp linux-3.0.4/drivers/net/e1000e/hw.h linux-3.0.4/drivers/net/e1000e/hw.h
28533 --- linux-3.0.4/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
28534 +++ linux-3.0.4/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
28535 @@ -776,6 +776,7 @@ struct e1000_mac_operations {
28536 void (*write_vfta)(struct e1000_hw *, u32, u32);
28537 s32 (*read_mac_addr)(struct e1000_hw *);
28538 };
28539 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28540
28541 /* Function pointers for the PHY. */
28542 struct e1000_phy_operations {
28543 @@ -799,6 +800,7 @@ struct e1000_phy_operations {
28544 void (*power_up)(struct e1000_hw *);
28545 void (*power_down)(struct e1000_hw *);
28546 };
28547 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28548
28549 /* Function pointers for the NVM. */
28550 struct e1000_nvm_operations {
28551 @@ -810,9 +812,10 @@ struct e1000_nvm_operations {
28552 s32 (*validate)(struct e1000_hw *);
28553 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28554 };
28555 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28556
28557 struct e1000_mac_info {
28558 - struct e1000_mac_operations ops;
28559 + e1000_mac_operations_no_const ops;
28560 u8 addr[ETH_ALEN];
28561 u8 perm_addr[ETH_ALEN];
28562
28563 @@ -853,7 +856,7 @@ struct e1000_mac_info {
28564 };
28565
28566 struct e1000_phy_info {
28567 - struct e1000_phy_operations ops;
28568 + e1000_phy_operations_no_const ops;
28569
28570 enum e1000_phy_type type;
28571
28572 @@ -887,7 +890,7 @@ struct e1000_phy_info {
28573 };
28574
28575 struct e1000_nvm_info {
28576 - struct e1000_nvm_operations ops;
28577 + e1000_nvm_operations_no_const ops;
28578
28579 enum e1000_nvm_type type;
28580 enum e1000_nvm_override override;
28581 diff -urNp linux-3.0.4/drivers/net/hamradio/6pack.c linux-3.0.4/drivers/net/hamradio/6pack.c
28582 --- linux-3.0.4/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
28583 +++ linux-3.0.4/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
28584 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28585 unsigned char buf[512];
28586 int count1;
28587
28588 + pax_track_stack();
28589 +
28590 if (!count)
28591 return;
28592
28593 diff -urNp linux-3.0.4/drivers/net/igb/e1000_hw.h linux-3.0.4/drivers/net/igb/e1000_hw.h
28594 --- linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
28595 +++ linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
28596 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28597 s32 (*read_mac_addr)(struct e1000_hw *);
28598 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28599 };
28600 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28601
28602 struct e1000_phy_operations {
28603 s32 (*acquire)(struct e1000_hw *);
28604 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28605 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28606 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28607 };
28608 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28609
28610 struct e1000_nvm_operations {
28611 s32 (*acquire)(struct e1000_hw *);
28612 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28613 s32 (*update)(struct e1000_hw *);
28614 s32 (*validate)(struct e1000_hw *);
28615 };
28616 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28617
28618 struct e1000_info {
28619 s32 (*get_invariants)(struct e1000_hw *);
28620 @@ -350,7 +353,7 @@ struct e1000_info {
28621 extern const struct e1000_info e1000_82575_info;
28622
28623 struct e1000_mac_info {
28624 - struct e1000_mac_operations ops;
28625 + e1000_mac_operations_no_const ops;
28626
28627 u8 addr[6];
28628 u8 perm_addr[6];
28629 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28630 };
28631
28632 struct e1000_phy_info {
28633 - struct e1000_phy_operations ops;
28634 + e1000_phy_operations_no_const ops;
28635
28636 enum e1000_phy_type type;
28637
28638 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28639 };
28640
28641 struct e1000_nvm_info {
28642 - struct e1000_nvm_operations ops;
28643 + e1000_nvm_operations_no_const ops;
28644 enum e1000_nvm_type type;
28645 enum e1000_nvm_override override;
28646
28647 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28648 s32 (*check_for_ack)(struct e1000_hw *, u16);
28649 s32 (*check_for_rst)(struct e1000_hw *, u16);
28650 };
28651 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28652
28653 struct e1000_mbx_stats {
28654 u32 msgs_tx;
28655 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28656 };
28657
28658 struct e1000_mbx_info {
28659 - struct e1000_mbx_operations ops;
28660 + e1000_mbx_operations_no_const ops;
28661 struct e1000_mbx_stats stats;
28662 u32 timeout;
28663 u32 usec_delay;
28664 diff -urNp linux-3.0.4/drivers/net/igbvf/vf.h linux-3.0.4/drivers/net/igbvf/vf.h
28665 --- linux-3.0.4/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
28666 +++ linux-3.0.4/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
28667 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28668 s32 (*read_mac_addr)(struct e1000_hw *);
28669 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28670 };
28671 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28672
28673 struct e1000_mac_info {
28674 - struct e1000_mac_operations ops;
28675 + e1000_mac_operations_no_const ops;
28676 u8 addr[6];
28677 u8 perm_addr[6];
28678
28679 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28680 s32 (*check_for_ack)(struct e1000_hw *);
28681 s32 (*check_for_rst)(struct e1000_hw *);
28682 };
28683 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28684
28685 struct e1000_mbx_stats {
28686 u32 msgs_tx;
28687 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28688 };
28689
28690 struct e1000_mbx_info {
28691 - struct e1000_mbx_operations ops;
28692 + e1000_mbx_operations_no_const ops;
28693 struct e1000_mbx_stats stats;
28694 u32 timeout;
28695 u32 usec_delay;
28696 diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_main.c linux-3.0.4/drivers/net/ixgb/ixgb_main.c
28697 --- linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
28698 +++ linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
28699 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
28700 u32 rctl;
28701 int i;
28702
28703 + pax_track_stack();
28704 +
28705 /* Check for Promiscuous and All Multicast modes */
28706
28707 rctl = IXGB_READ_REG(hw, RCTL);
28708 diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_param.c linux-3.0.4/drivers/net/ixgb/ixgb_param.c
28709 --- linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
28710 +++ linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
28711 @@ -261,6 +261,9 @@ void __devinit
28712 ixgb_check_options(struct ixgb_adapter *adapter)
28713 {
28714 int bd = adapter->bd_number;
28715 +
28716 + pax_track_stack();
28717 +
28718 if (bd >= IXGB_MAX_NIC) {
28719 pr_notice("Warning: no configuration for board #%i\n", bd);
28720 pr_notice("Using defaults for all values\n");
28721 diff -urNp linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h
28722 --- linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
28723 +++ linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
28724 @@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
28725 s32 (*update_checksum)(struct ixgbe_hw *);
28726 u16 (*calc_checksum)(struct ixgbe_hw *);
28727 };
28728 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28729
28730 struct ixgbe_mac_operations {
28731 s32 (*init_hw)(struct ixgbe_hw *);
28732 @@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
28733 /* Flow Control */
28734 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28735 };
28736 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28737
28738 struct ixgbe_phy_operations {
28739 s32 (*identify)(struct ixgbe_hw *);
28740 @@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
28741 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28742 s32 (*check_overtemp)(struct ixgbe_hw *);
28743 };
28744 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28745
28746 struct ixgbe_eeprom_info {
28747 - struct ixgbe_eeprom_operations ops;
28748 + ixgbe_eeprom_operations_no_const ops;
28749 enum ixgbe_eeprom_type type;
28750 u32 semaphore_delay;
28751 u16 word_size;
28752 @@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
28753
28754 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28755 struct ixgbe_mac_info {
28756 - struct ixgbe_mac_operations ops;
28757 + ixgbe_mac_operations_no_const ops;
28758 enum ixgbe_mac_type type;
28759 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28760 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28761 @@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
28762 };
28763
28764 struct ixgbe_phy_info {
28765 - struct ixgbe_phy_operations ops;
28766 + ixgbe_phy_operations_no_const ops;
28767 struct mdio_if_info mdio;
28768 enum ixgbe_phy_type type;
28769 u32 id;
28770 @@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
28771 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28772 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28773 };
28774 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28775
28776 struct ixgbe_mbx_stats {
28777 u32 msgs_tx;
28778 @@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
28779 };
28780
28781 struct ixgbe_mbx_info {
28782 - struct ixgbe_mbx_operations ops;
28783 + ixgbe_mbx_operations_no_const ops;
28784 struct ixgbe_mbx_stats stats;
28785 u32 timeout;
28786 u32 usec_delay;
28787 diff -urNp linux-3.0.4/drivers/net/ixgbevf/vf.h linux-3.0.4/drivers/net/ixgbevf/vf.h
28788 --- linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
28789 +++ linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
28790 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
28791 s32 (*clear_vfta)(struct ixgbe_hw *);
28792 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28793 };
28794 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28795
28796 enum ixgbe_mac_type {
28797 ixgbe_mac_unknown = 0,
28798 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
28799 };
28800
28801 struct ixgbe_mac_info {
28802 - struct ixgbe_mac_operations ops;
28803 + ixgbe_mac_operations_no_const ops;
28804 u8 addr[6];
28805 u8 perm_addr[6];
28806
28807 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
28808 s32 (*check_for_ack)(struct ixgbe_hw *);
28809 s32 (*check_for_rst)(struct ixgbe_hw *);
28810 };
28811 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28812
28813 struct ixgbe_mbx_stats {
28814 u32 msgs_tx;
28815 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
28816 };
28817
28818 struct ixgbe_mbx_info {
28819 - struct ixgbe_mbx_operations ops;
28820 + ixgbe_mbx_operations_no_const ops;
28821 struct ixgbe_mbx_stats stats;
28822 u32 timeout;
28823 u32 udelay;
28824 diff -urNp linux-3.0.4/drivers/net/ksz884x.c linux-3.0.4/drivers/net/ksz884x.c
28825 --- linux-3.0.4/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
28826 +++ linux-3.0.4/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
28827 @@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
28828 int rc;
28829 u64 counter[TOTAL_PORT_COUNTER_NUM];
28830
28831 + pax_track_stack();
28832 +
28833 mutex_lock(&hw_priv->lock);
28834 n = SWITCH_PORT_NUM;
28835 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28836 diff -urNp linux-3.0.4/drivers/net/mlx4/main.c linux-3.0.4/drivers/net/mlx4/main.c
28837 --- linux-3.0.4/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
28838 +++ linux-3.0.4/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
28839 @@ -40,6 +40,7 @@
28840 #include <linux/dma-mapping.h>
28841 #include <linux/slab.h>
28842 #include <linux/io-mapping.h>
28843 +#include <linux/sched.h>
28844
28845 #include <linux/mlx4/device.h>
28846 #include <linux/mlx4/doorbell.h>
28847 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28848 u64 icm_size;
28849 int err;
28850
28851 + pax_track_stack();
28852 +
28853 err = mlx4_QUERY_FW(dev);
28854 if (err) {
28855 if (err == -EACCES)
28856 diff -urNp linux-3.0.4/drivers/net/niu.c linux-3.0.4/drivers/net/niu.c
28857 --- linux-3.0.4/drivers/net/niu.c 2011-08-23 21:44:40.000000000 -0400
28858 +++ linux-3.0.4/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
28859 @@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
28860 int i, num_irqs, err;
28861 u8 first_ldg;
28862
28863 + pax_track_stack();
28864 +
28865 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
28866 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
28867 ldg_num_map[i] = first_ldg + i;
28868 diff -urNp linux-3.0.4/drivers/net/pcnet32.c linux-3.0.4/drivers/net/pcnet32.c
28869 --- linux-3.0.4/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
28870 +++ linux-3.0.4/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
28871 @@ -82,7 +82,7 @@ static int cards_found;
28872 /*
28873 * VLB I/O addresses
28874 */
28875 -static unsigned int pcnet32_portlist[] __initdata =
28876 +static unsigned int pcnet32_portlist[] __devinitdata =
28877 { 0x300, 0x320, 0x340, 0x360, 0 };
28878
28879 static int pcnet32_debug;
28880 @@ -270,7 +270,7 @@ struct pcnet32_private {
28881 struct sk_buff **rx_skbuff;
28882 dma_addr_t *tx_dma_addr;
28883 dma_addr_t *rx_dma_addr;
28884 - struct pcnet32_access a;
28885 + struct pcnet32_access *a;
28886 spinlock_t lock; /* Guard lock */
28887 unsigned int cur_rx, cur_tx; /* The next free ring entry */
28888 unsigned int rx_ring_size; /* current rx ring size */
28889 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
28890 u16 val;
28891
28892 netif_wake_queue(dev);
28893 - val = lp->a.read_csr(ioaddr, CSR3);
28894 + val = lp->a->read_csr(ioaddr, CSR3);
28895 val &= 0x00ff;
28896 - lp->a.write_csr(ioaddr, CSR3, val);
28897 + lp->a->write_csr(ioaddr, CSR3, val);
28898 napi_enable(&lp->napi);
28899 }
28900
28901 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
28902 r = mii_link_ok(&lp->mii_if);
28903 } else if (lp->chip_version >= PCNET32_79C970A) {
28904 ulong ioaddr = dev->base_addr; /* card base I/O address */
28905 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
28906 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
28907 } else { /* can not detect link on really old chips */
28908 r = 1;
28909 }
28910 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
28911 pcnet32_netif_stop(dev);
28912
28913 spin_lock_irqsave(&lp->lock, flags);
28914 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28915 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28916
28917 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
28918
28919 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
28920 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
28921 {
28922 struct pcnet32_private *lp = netdev_priv(dev);
28923 - struct pcnet32_access *a = &lp->a; /* access to registers */
28924 + struct pcnet32_access *a = lp->a; /* access to registers */
28925 ulong ioaddr = dev->base_addr; /* card base I/O address */
28926 struct sk_buff *skb; /* sk buff */
28927 int x, i; /* counters */
28928 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
28929 pcnet32_netif_stop(dev);
28930
28931 spin_lock_irqsave(&lp->lock, flags);
28932 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28933 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28934
28935 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
28936
28937 /* Reset the PCNET32 */
28938 - lp->a.reset(ioaddr);
28939 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28940 + lp->a->reset(ioaddr);
28941 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28942
28943 /* switch pcnet32 to 32bit mode */
28944 - lp->a.write_bcr(ioaddr, 20, 2);
28945 + lp->a->write_bcr(ioaddr, 20, 2);
28946
28947 /* purge & init rings but don't actually restart */
28948 pcnet32_restart(dev, 0x0000);
28949
28950 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28951 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28952
28953 /* Initialize Transmit buffers. */
28954 size = data_len + 15;
28955 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
28956
28957 /* set int loopback in CSR15 */
28958 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
28959 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
28960 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
28961
28962 teststatus = cpu_to_le16(0x8000);
28963 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28964 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28965
28966 /* Check status of descriptors */
28967 for (x = 0; x < numbuffs; x++) {
28968 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
28969 }
28970 }
28971
28972 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28973 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28974 wmb();
28975 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
28976 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
28977 @@ -1015,7 +1015,7 @@ clean_up:
28978 pcnet32_restart(dev, CSR0_NORMAL);
28979 } else {
28980 pcnet32_purge_rx_ring(dev);
28981 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28982 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28983 }
28984 spin_unlock_irqrestore(&lp->lock, flags);
28985
28986 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
28987 enum ethtool_phys_id_state state)
28988 {
28989 struct pcnet32_private *lp = netdev_priv(dev);
28990 - struct pcnet32_access *a = &lp->a;
28991 + struct pcnet32_access *a = lp->a;
28992 ulong ioaddr = dev->base_addr;
28993 unsigned long flags;
28994 int i;
28995 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
28996 {
28997 int csr5;
28998 struct pcnet32_private *lp = netdev_priv(dev);
28999 - struct pcnet32_access *a = &lp->a;
29000 + struct pcnet32_access *a = lp->a;
29001 ulong ioaddr = dev->base_addr;
29002 int ticks;
29003
29004 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
29005 spin_lock_irqsave(&lp->lock, flags);
29006 if (pcnet32_tx(dev)) {
29007 /* reset the chip to clear the error condition, then restart */
29008 - lp->a.reset(ioaddr);
29009 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29010 + lp->a->reset(ioaddr);
29011 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29012 pcnet32_restart(dev, CSR0_START);
29013 netif_wake_queue(dev);
29014 }
29015 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29016 __napi_complete(napi);
29017
29018 /* clear interrupt masks */
29019 - val = lp->a.read_csr(ioaddr, CSR3);
29020 + val = lp->a->read_csr(ioaddr, CSR3);
29021 val &= 0x00ff;
29022 - lp->a.write_csr(ioaddr, CSR3, val);
29023 + lp->a->write_csr(ioaddr, CSR3, val);
29024
29025 /* Set interrupt enable. */
29026 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29027 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29028
29029 spin_unlock_irqrestore(&lp->lock, flags);
29030 }
29031 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29032 int i, csr0;
29033 u16 *buff = ptr;
29034 struct pcnet32_private *lp = netdev_priv(dev);
29035 - struct pcnet32_access *a = &lp->a;
29036 + struct pcnet32_access *a = lp->a;
29037 ulong ioaddr = dev->base_addr;
29038 unsigned long flags;
29039
29040 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29041 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29042 if (lp->phymask & (1 << j)) {
29043 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29044 - lp->a.write_bcr(ioaddr, 33,
29045 + lp->a->write_bcr(ioaddr, 33,
29046 (j << 5) | i);
29047 - *buff++ = lp->a.read_bcr(ioaddr, 34);
29048 + *buff++ = lp->a->read_bcr(ioaddr, 34);
29049 }
29050 }
29051 }
29052 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29053 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29054 lp->options |= PCNET32_PORT_FD;
29055
29056 - lp->a = *a;
29057 + lp->a = a;
29058
29059 /* prior to register_netdev, dev->name is not yet correct */
29060 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29061 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29062 if (lp->mii) {
29063 /* lp->phycount and lp->phymask are set to 0 by memset above */
29064
29065 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29066 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29067 /* scan for PHYs */
29068 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29069 unsigned short id1, id2;
29070 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29071 pr_info("Found PHY %04x:%04x at address %d\n",
29072 id1, id2, i);
29073 }
29074 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29075 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29076 if (lp->phycount > 1)
29077 lp->options |= PCNET32_PORT_MII;
29078 }
29079 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29080 }
29081
29082 /* Reset the PCNET32 */
29083 - lp->a.reset(ioaddr);
29084 + lp->a->reset(ioaddr);
29085
29086 /* switch pcnet32 to 32bit mode */
29087 - lp->a.write_bcr(ioaddr, 20, 2);
29088 + lp->a->write_bcr(ioaddr, 20, 2);
29089
29090 netif_printk(lp, ifup, KERN_DEBUG, dev,
29091 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29092 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29093 (u32) (lp->init_dma_addr));
29094
29095 /* set/reset autoselect bit */
29096 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
29097 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
29098 if (lp->options & PCNET32_PORT_ASEL)
29099 val |= 2;
29100 - lp->a.write_bcr(ioaddr, 2, val);
29101 + lp->a->write_bcr(ioaddr, 2, val);
29102
29103 /* handle full duplex setting */
29104 if (lp->mii_if.full_duplex) {
29105 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
29106 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
29107 if (lp->options & PCNET32_PORT_FD) {
29108 val |= 1;
29109 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29110 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29111 if (lp->chip_version == 0x2627)
29112 val |= 3;
29113 }
29114 - lp->a.write_bcr(ioaddr, 9, val);
29115 + lp->a->write_bcr(ioaddr, 9, val);
29116 }
29117
29118 /* set/reset GPSI bit in test register */
29119 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29120 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29121 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29122 val |= 0x10;
29123 - lp->a.write_csr(ioaddr, 124, val);
29124 + lp->a->write_csr(ioaddr, 124, val);
29125
29126 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29127 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29128 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29129 * duplex, and/or enable auto negotiation, and clear DANAS
29130 */
29131 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29132 - lp->a.write_bcr(ioaddr, 32,
29133 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
29134 + lp->a->write_bcr(ioaddr, 32,
29135 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
29136 /* disable Auto Negotiation, set 10Mpbs, HD */
29137 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29138 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29139 if (lp->options & PCNET32_PORT_FD)
29140 val |= 0x10;
29141 if (lp->options & PCNET32_PORT_100)
29142 val |= 0x08;
29143 - lp->a.write_bcr(ioaddr, 32, val);
29144 + lp->a->write_bcr(ioaddr, 32, val);
29145 } else {
29146 if (lp->options & PCNET32_PORT_ASEL) {
29147 - lp->a.write_bcr(ioaddr, 32,
29148 - lp->a.read_bcr(ioaddr,
29149 + lp->a->write_bcr(ioaddr, 32,
29150 + lp->a->read_bcr(ioaddr,
29151 32) | 0x0080);
29152 /* enable auto negotiate, setup, disable fd */
29153 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29154 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29155 val |= 0x20;
29156 - lp->a.write_bcr(ioaddr, 32, val);
29157 + lp->a->write_bcr(ioaddr, 32, val);
29158 }
29159 }
29160 } else {
29161 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29162 * There is really no good other way to handle multiple PHYs
29163 * other than turning off all automatics
29164 */
29165 - val = lp->a.read_bcr(ioaddr, 2);
29166 - lp->a.write_bcr(ioaddr, 2, val & ~2);
29167 - val = lp->a.read_bcr(ioaddr, 32);
29168 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29169 + val = lp->a->read_bcr(ioaddr, 2);
29170 + lp->a->write_bcr(ioaddr, 2, val & ~2);
29171 + val = lp->a->read_bcr(ioaddr, 32);
29172 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29173
29174 if (!(lp->options & PCNET32_PORT_ASEL)) {
29175 /* setup ecmd */
29176 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29177 ethtool_cmd_speed_set(&ecmd,
29178 (lp->options & PCNET32_PORT_100) ?
29179 SPEED_100 : SPEED_10);
29180 - bcr9 = lp->a.read_bcr(ioaddr, 9);
29181 + bcr9 = lp->a->read_bcr(ioaddr, 9);
29182
29183 if (lp->options & PCNET32_PORT_FD) {
29184 ecmd.duplex = DUPLEX_FULL;
29185 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29186 ecmd.duplex = DUPLEX_HALF;
29187 bcr9 |= ~(1 << 0);
29188 }
29189 - lp->a.write_bcr(ioaddr, 9, bcr9);
29190 + lp->a->write_bcr(ioaddr, 9, bcr9);
29191 }
29192
29193 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29194 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29195
29196 #ifdef DO_DXSUFLO
29197 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29198 - val = lp->a.read_csr(ioaddr, CSR3);
29199 + val = lp->a->read_csr(ioaddr, CSR3);
29200 val |= 0x40;
29201 - lp->a.write_csr(ioaddr, CSR3, val);
29202 + lp->a->write_csr(ioaddr, CSR3, val);
29203 }
29204 #endif
29205
29206 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29207 napi_enable(&lp->napi);
29208
29209 /* Re-initialize the PCNET32, and start it when done. */
29210 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29211 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29212 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29213 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29214
29215 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29216 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29217 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29218 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29219
29220 netif_start_queue(dev);
29221
29222 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29223
29224 i = 0;
29225 while (i++ < 100)
29226 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29227 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29228 break;
29229 /*
29230 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29231 * reports that doing so triggers a bug in the '974.
29232 */
29233 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29234 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29235
29236 netif_printk(lp, ifup, KERN_DEBUG, dev,
29237 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29238 i,
29239 (u32) (lp->init_dma_addr),
29240 - lp->a.read_csr(ioaddr, CSR0));
29241 + lp->a->read_csr(ioaddr, CSR0));
29242
29243 spin_unlock_irqrestore(&lp->lock, flags);
29244
29245 @@ -2218,7 +2218,7 @@ err_free_ring:
29246 * Switch back to 16bit mode to avoid problems with dumb
29247 * DOS packet driver after a warm reboot
29248 */
29249 - lp->a.write_bcr(ioaddr, 20, 4);
29250 + lp->a->write_bcr(ioaddr, 20, 4);
29251
29252 err_free_irq:
29253 spin_unlock_irqrestore(&lp->lock, flags);
29254 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29255
29256 /* wait for stop */
29257 for (i = 0; i < 100; i++)
29258 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29259 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29260 break;
29261
29262 if (i >= 100)
29263 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29264 return;
29265
29266 /* ReInit Ring */
29267 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29268 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29269 i = 0;
29270 while (i++ < 1000)
29271 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29272 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29273 break;
29274
29275 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29276 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29277 }
29278
29279 static void pcnet32_tx_timeout(struct net_device *dev)
29280 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29281 /* Transmitter timeout, serious problems. */
29282 if (pcnet32_debug & NETIF_MSG_DRV)
29283 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29284 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29285 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29286 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29287 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29288 dev->stats.tx_errors++;
29289 if (netif_msg_tx_err(lp)) {
29290 int i;
29291 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29292
29293 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29294 "%s() called, csr0 %4.4x\n",
29295 - __func__, lp->a.read_csr(ioaddr, CSR0));
29296 + __func__, lp->a->read_csr(ioaddr, CSR0));
29297
29298 /* Default status -- will not enable Successful-TxDone
29299 * interrupt when that option is available to us.
29300 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29301 dev->stats.tx_bytes += skb->len;
29302
29303 /* Trigger an immediate send poll. */
29304 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29305 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29306
29307 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29308 lp->tx_full = 1;
29309 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29310
29311 spin_lock(&lp->lock);
29312
29313 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29314 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29315 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29316 if (csr0 == 0xffff)
29317 break; /* PCMCIA remove happened */
29318 /* Acknowledge all of the current interrupt sources ASAP. */
29319 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29320 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29321
29322 netif_printk(lp, intr, KERN_DEBUG, dev,
29323 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29324 - csr0, lp->a.read_csr(ioaddr, CSR0));
29325 + csr0, lp->a->read_csr(ioaddr, CSR0));
29326
29327 /* Log misc errors. */
29328 if (csr0 & 0x4000)
29329 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29330 if (napi_schedule_prep(&lp->napi)) {
29331 u16 val;
29332 /* set interrupt masks */
29333 - val = lp->a.read_csr(ioaddr, CSR3);
29334 + val = lp->a->read_csr(ioaddr, CSR3);
29335 val |= 0x5f00;
29336 - lp->a.write_csr(ioaddr, CSR3, val);
29337 + lp->a->write_csr(ioaddr, CSR3, val);
29338
29339 __napi_schedule(&lp->napi);
29340 break;
29341 }
29342 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29343 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29344 }
29345
29346 netif_printk(lp, intr, KERN_DEBUG, dev,
29347 "exiting interrupt, csr0=%#4.4x\n",
29348 - lp->a.read_csr(ioaddr, CSR0));
29349 + lp->a->read_csr(ioaddr, CSR0));
29350
29351 spin_unlock(&lp->lock);
29352
29353 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29354
29355 spin_lock_irqsave(&lp->lock, flags);
29356
29357 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29358 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29359
29360 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29361 "Shutting down ethercard, status was %2.2x\n",
29362 - lp->a.read_csr(ioaddr, CSR0));
29363 + lp->a->read_csr(ioaddr, CSR0));
29364
29365 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29366 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29367 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29368
29369 /*
29370 * Switch back to 16bit mode to avoid problems with dumb
29371 * DOS packet driver after a warm reboot
29372 */
29373 - lp->a.write_bcr(ioaddr, 20, 4);
29374 + lp->a->write_bcr(ioaddr, 20, 4);
29375
29376 spin_unlock_irqrestore(&lp->lock, flags);
29377
29378 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29379 unsigned long flags;
29380
29381 spin_lock_irqsave(&lp->lock, flags);
29382 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29383 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29384 spin_unlock_irqrestore(&lp->lock, flags);
29385
29386 return &dev->stats;
29387 @@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29388 if (dev->flags & IFF_ALLMULTI) {
29389 ib->filter[0] = cpu_to_le32(~0U);
29390 ib->filter[1] = cpu_to_le32(~0U);
29391 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29392 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29393 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29394 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29395 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29396 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29397 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29398 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29399 return;
29400 }
29401 /* clear the multicast filter */
29402 @@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29403 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29404 }
29405 for (i = 0; i < 4; i++)
29406 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29407 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29408 le16_to_cpu(mcast_table[i]));
29409 }
29410
29411 @@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29412
29413 spin_lock_irqsave(&lp->lock, flags);
29414 suspended = pcnet32_suspend(dev, &flags, 0);
29415 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29416 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29417 if (dev->flags & IFF_PROMISC) {
29418 /* Log any net taps. */
29419 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29420 lp->init_block->mode =
29421 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29422 7);
29423 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29424 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29425 } else {
29426 lp->init_block->mode =
29427 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29428 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29429 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29430 pcnet32_load_multicast(dev);
29431 }
29432
29433 if (suspended) {
29434 int csr5;
29435 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29436 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29437 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29438 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29439 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29440 } else {
29441 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29442 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29443 pcnet32_restart(dev, CSR0_NORMAL);
29444 netif_wake_queue(dev);
29445 }
29446 @@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29447 if (!lp->mii)
29448 return 0;
29449
29450 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29451 - val_out = lp->a.read_bcr(ioaddr, 34);
29452 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29453 + val_out = lp->a->read_bcr(ioaddr, 34);
29454
29455 return val_out;
29456 }
29457 @@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
29458 if (!lp->mii)
29459 return;
29460
29461 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29462 - lp->a.write_bcr(ioaddr, 34, val);
29463 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29464 + lp->a->write_bcr(ioaddr, 34, val);
29465 }
29466
29467 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29468 @@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
29469 curr_link = mii_link_ok(&lp->mii_if);
29470 } else {
29471 ulong ioaddr = dev->base_addr; /* card base I/O address */
29472 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29473 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29474 }
29475 if (!curr_link) {
29476 if (prev_link || verbose) {
29477 @@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
29478 (ecmd.duplex == DUPLEX_FULL)
29479 ? "full" : "half");
29480 }
29481 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29482 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29483 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29484 if (lp->mii_if.full_duplex)
29485 bcr9 |= (1 << 0);
29486 else
29487 bcr9 &= ~(1 << 0);
29488 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
29489 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
29490 }
29491 } else {
29492 netif_info(lp, link, dev, "link up\n");
29493 diff -urNp linux-3.0.4/drivers/net/ppp_generic.c linux-3.0.4/drivers/net/ppp_generic.c
29494 --- linux-3.0.4/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
29495 +++ linux-3.0.4/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
29496 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29497 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29498 struct ppp_stats stats;
29499 struct ppp_comp_stats cstats;
29500 - char *vers;
29501
29502 switch (cmd) {
29503 case SIOCGPPPSTATS:
29504 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29505 break;
29506
29507 case SIOCGPPPVER:
29508 - vers = PPP_VERSION;
29509 - if (copy_to_user(addr, vers, strlen(vers) + 1))
29510 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29511 break;
29512 err = 0;
29513 break;
29514 diff -urNp linux-3.0.4/drivers/net/r8169.c linux-3.0.4/drivers/net/r8169.c
29515 --- linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:44:40.000000000 -0400
29516 +++ linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
29517 @@ -645,12 +645,12 @@ struct rtl8169_private {
29518 struct mdio_ops {
29519 void (*write)(void __iomem *, int, int);
29520 int (*read)(void __iomem *, int);
29521 - } mdio_ops;
29522 + } __no_const mdio_ops;
29523
29524 struct pll_power_ops {
29525 void (*down)(struct rtl8169_private *);
29526 void (*up)(struct rtl8169_private *);
29527 - } pll_power_ops;
29528 + } __no_const pll_power_ops;
29529
29530 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29531 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29532 diff -urNp linux-3.0.4/drivers/net/tg3.h linux-3.0.4/drivers/net/tg3.h
29533 --- linux-3.0.4/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
29534 +++ linux-3.0.4/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
29535 @@ -134,6 +134,7 @@
29536 #define CHIPREV_ID_5750_A0 0x4000
29537 #define CHIPREV_ID_5750_A1 0x4001
29538 #define CHIPREV_ID_5750_A3 0x4003
29539 +#define CHIPREV_ID_5750_C1 0x4201
29540 #define CHIPREV_ID_5750_C2 0x4202
29541 #define CHIPREV_ID_5752_A0_HW 0x5000
29542 #define CHIPREV_ID_5752_A0 0x6000
29543 diff -urNp linux-3.0.4/drivers/net/tokenring/abyss.c linux-3.0.4/drivers/net/tokenring/abyss.c
29544 --- linux-3.0.4/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
29545 +++ linux-3.0.4/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
29546 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29547
29548 static int __init abyss_init (void)
29549 {
29550 - abyss_netdev_ops = tms380tr_netdev_ops;
29551 + pax_open_kernel();
29552 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29553
29554 - abyss_netdev_ops.ndo_open = abyss_open;
29555 - abyss_netdev_ops.ndo_stop = abyss_close;
29556 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29557 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29558 + pax_close_kernel();
29559
29560 return pci_register_driver(&abyss_driver);
29561 }
29562 diff -urNp linux-3.0.4/drivers/net/tokenring/madgemc.c linux-3.0.4/drivers/net/tokenring/madgemc.c
29563 --- linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
29564 +++ linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
29565 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29566
29567 static int __init madgemc_init (void)
29568 {
29569 - madgemc_netdev_ops = tms380tr_netdev_ops;
29570 - madgemc_netdev_ops.ndo_open = madgemc_open;
29571 - madgemc_netdev_ops.ndo_stop = madgemc_close;
29572 + pax_open_kernel();
29573 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29574 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29575 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29576 + pax_close_kernel();
29577
29578 return mca_register_driver (&madgemc_driver);
29579 }
29580 diff -urNp linux-3.0.4/drivers/net/tokenring/proteon.c linux-3.0.4/drivers/net/tokenring/proteon.c
29581 --- linux-3.0.4/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
29582 +++ linux-3.0.4/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
29583 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29584 struct platform_device *pdev;
29585 int i, num = 0, err = 0;
29586
29587 - proteon_netdev_ops = tms380tr_netdev_ops;
29588 - proteon_netdev_ops.ndo_open = proteon_open;
29589 - proteon_netdev_ops.ndo_stop = tms380tr_close;
29590 + pax_open_kernel();
29591 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29592 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29593 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29594 + pax_close_kernel();
29595
29596 err = platform_driver_register(&proteon_driver);
29597 if (err)
29598 diff -urNp linux-3.0.4/drivers/net/tokenring/skisa.c linux-3.0.4/drivers/net/tokenring/skisa.c
29599 --- linux-3.0.4/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
29600 +++ linux-3.0.4/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
29601 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29602 struct platform_device *pdev;
29603 int i, num = 0, err = 0;
29604
29605 - sk_isa_netdev_ops = tms380tr_netdev_ops;
29606 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
29607 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29608 + pax_open_kernel();
29609 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29610 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29611 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29612 + pax_close_kernel();
29613
29614 err = platform_driver_register(&sk_isa_driver);
29615 if (err)
29616 diff -urNp linux-3.0.4/drivers/net/tulip/de2104x.c linux-3.0.4/drivers/net/tulip/de2104x.c
29617 --- linux-3.0.4/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
29618 +++ linux-3.0.4/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
29619 @@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
29620 struct de_srom_info_leaf *il;
29621 void *bufp;
29622
29623 + pax_track_stack();
29624 +
29625 /* download entire eeprom */
29626 for (i = 0; i < DE_EEPROM_WORDS; i++)
29627 ((__le16 *)ee_data)[i] =
29628 diff -urNp linux-3.0.4/drivers/net/tulip/de4x5.c linux-3.0.4/drivers/net/tulip/de4x5.c
29629 --- linux-3.0.4/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
29630 +++ linux-3.0.4/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
29631 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29632 for (i=0; i<ETH_ALEN; i++) {
29633 tmp.addr[i] = dev->dev_addr[i];
29634 }
29635 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29636 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29637 break;
29638
29639 case DE4X5_SET_HWADDR: /* Set the hardware address */
29640 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29641 spin_lock_irqsave(&lp->lock, flags);
29642 memcpy(&statbuf, &lp->pktStats, ioc->len);
29643 spin_unlock_irqrestore(&lp->lock, flags);
29644 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
29645 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29646 return -EFAULT;
29647 break;
29648 }
29649 diff -urNp linux-3.0.4/drivers/net/usb/hso.c linux-3.0.4/drivers/net/usb/hso.c
29650 --- linux-3.0.4/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
29651 +++ linux-3.0.4/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
29652 @@ -71,7 +71,7 @@
29653 #include <asm/byteorder.h>
29654 #include <linux/serial_core.h>
29655 #include <linux/serial.h>
29656 -
29657 +#include <asm/local.h>
29658
29659 #define MOD_AUTHOR "Option Wireless"
29660 #define MOD_DESCRIPTION "USB High Speed Option driver"
29661 @@ -257,7 +257,7 @@ struct hso_serial {
29662
29663 /* from usb_serial_port */
29664 struct tty_struct *tty;
29665 - int open_count;
29666 + local_t open_count;
29667 spinlock_t serial_lock;
29668
29669 int (*write_data) (struct hso_serial *serial);
29670 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29671 struct urb *urb;
29672
29673 urb = serial->rx_urb[0];
29674 - if (serial->open_count > 0) {
29675 + if (local_read(&serial->open_count) > 0) {
29676 count = put_rxbuf_data(urb, serial);
29677 if (count == -1)
29678 return;
29679 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29680 DUMP1(urb->transfer_buffer, urb->actual_length);
29681
29682 /* Anyone listening? */
29683 - if (serial->open_count == 0)
29684 + if (local_read(&serial->open_count) == 0)
29685 return;
29686
29687 if (status == 0) {
29688 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29689 spin_unlock_irq(&serial->serial_lock);
29690
29691 /* check for port already opened, if not set the termios */
29692 - serial->open_count++;
29693 - if (serial->open_count == 1) {
29694 + if (local_inc_return(&serial->open_count) == 1) {
29695 serial->rx_state = RX_IDLE;
29696 /* Force default termio settings */
29697 _hso_serial_set_termios(tty, NULL);
29698 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29699 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29700 if (result) {
29701 hso_stop_serial_device(serial->parent);
29702 - serial->open_count--;
29703 + local_dec(&serial->open_count);
29704 kref_put(&serial->parent->ref, hso_serial_ref_free);
29705 }
29706 } else {
29707 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29708
29709 /* reset the rts and dtr */
29710 /* do the actual close */
29711 - serial->open_count--;
29712 + local_dec(&serial->open_count);
29713
29714 - if (serial->open_count <= 0) {
29715 - serial->open_count = 0;
29716 + if (local_read(&serial->open_count) <= 0) {
29717 + local_set(&serial->open_count, 0);
29718 spin_lock_irq(&serial->serial_lock);
29719 if (serial->tty == tty) {
29720 serial->tty->driver_data = NULL;
29721 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29722
29723 /* the actual setup */
29724 spin_lock_irqsave(&serial->serial_lock, flags);
29725 - if (serial->open_count)
29726 + if (local_read(&serial->open_count))
29727 _hso_serial_set_termios(tty, old);
29728 else
29729 tty->termios = old;
29730 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29731 D1("Pending read interrupt on port %d\n", i);
29732 spin_lock(&serial->serial_lock);
29733 if (serial->rx_state == RX_IDLE &&
29734 - serial->open_count > 0) {
29735 + local_read(&serial->open_count) > 0) {
29736 /* Setup and send a ctrl req read on
29737 * port i */
29738 if (!serial->rx_urb_filled[0]) {
29739 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
29740 /* Start all serial ports */
29741 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29742 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29743 - if (dev2ser(serial_table[i])->open_count) {
29744 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
29745 result =
29746 hso_start_serial_device(serial_table[i], GFP_NOIO);
29747 hso_kick_transmit(dev2ser(serial_table[i]));
29748 diff -urNp linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29749 --- linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29750 +++ linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
29751 @@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
29752 * Return with error code if any of the queue indices
29753 * is out of range
29754 */
29755 - if (p->ring_index[i] < 0 ||
29756 - p->ring_index[i] >= adapter->num_rx_queues)
29757 + if (p->ring_index[i] >= adapter->num_rx_queues)
29758 return -EINVAL;
29759 }
29760
29761 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-config.h linux-3.0.4/drivers/net/vxge/vxge-config.h
29762 --- linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
29763 +++ linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
29764 @@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
29765 void (*link_down)(struct __vxge_hw_device *devh);
29766 void (*crit_err)(struct __vxge_hw_device *devh,
29767 enum vxge_hw_event type, u64 ext_data);
29768 -};
29769 +} __no_const;
29770
29771 /*
29772 * struct __vxge_hw_blockpool_entry - Block private data structure
29773 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-main.c linux-3.0.4/drivers/net/vxge/vxge-main.c
29774 --- linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
29775 +++ linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
29776 @@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29777 struct sk_buff *completed[NR_SKB_COMPLETED];
29778 int more;
29779
29780 + pax_track_stack();
29781 +
29782 do {
29783 more = 0;
29784 skb_ptr = completed;
29785 @@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
29786 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29787 int index;
29788
29789 + pax_track_stack();
29790 +
29791 /*
29792 * Filling
29793 * - itable with bucket numbers
29794 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-traffic.h linux-3.0.4/drivers/net/vxge/vxge-traffic.h
29795 --- linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
29796 +++ linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
29797 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29798 struct vxge_hw_mempool_dma *dma_object,
29799 u32 index,
29800 u32 is_last);
29801 -};
29802 +} __no_const;
29803
29804 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29805 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29806 diff -urNp linux-3.0.4/drivers/net/wan/cycx_x25.c linux-3.0.4/drivers/net/wan/cycx_x25.c
29807 --- linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
29808 +++ linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
29809 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29810 unsigned char hex[1024],
29811 * phex = hex;
29812
29813 + pax_track_stack();
29814 +
29815 if (len >= (sizeof(hex) / 2))
29816 len = (sizeof(hex) / 2) - 1;
29817
29818 diff -urNp linux-3.0.4/drivers/net/wan/hdlc_x25.c linux-3.0.4/drivers/net/wan/hdlc_x25.c
29819 --- linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
29820 +++ linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
29821 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29822
29823 static int x25_open(struct net_device *dev)
29824 {
29825 - struct lapb_register_struct cb;
29826 + static struct lapb_register_struct cb = {
29827 + .connect_confirmation = x25_connected,
29828 + .connect_indication = x25_connected,
29829 + .disconnect_confirmation = x25_disconnected,
29830 + .disconnect_indication = x25_disconnected,
29831 + .data_indication = x25_data_indication,
29832 + .data_transmit = x25_data_transmit
29833 + };
29834 int result;
29835
29836 - cb.connect_confirmation = x25_connected;
29837 - cb.connect_indication = x25_connected;
29838 - cb.disconnect_confirmation = x25_disconnected;
29839 - cb.disconnect_indication = x25_disconnected;
29840 - cb.data_indication = x25_data_indication;
29841 - cb.data_transmit = x25_data_transmit;
29842 -
29843 result = lapb_register(dev, &cb);
29844 if (result != LAPB_OK)
29845 return result;
29846 diff -urNp linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c
29847 --- linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
29848 +++ linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
29849 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29850 int do_autopm = 1;
29851 DECLARE_COMPLETION_ONSTACK(notif_completion);
29852
29853 + pax_track_stack();
29854 +
29855 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
29856 i2400m, ack, ack_size);
29857 BUG_ON(_ack == i2400m->bm_ack_buf);
29858 diff -urNp linux-3.0.4/drivers/net/wireless/airo.c linux-3.0.4/drivers/net/wireless/airo.c
29859 --- linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:44:40.000000000 -0400
29860 +++ linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
29861 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
29862 BSSListElement * loop_net;
29863 BSSListElement * tmp_net;
29864
29865 + pax_track_stack();
29866 +
29867 /* Blow away current list of scan results */
29868 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
29869 list_move_tail (&loop_net->list, &ai->network_free_list);
29870 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
29871 WepKeyRid wkr;
29872 int rc;
29873
29874 + pax_track_stack();
29875 +
29876 memset( &mySsid, 0, sizeof( mySsid ) );
29877 kfree (ai->flash);
29878 ai->flash = NULL;
29879 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
29880 __le32 *vals = stats.vals;
29881 int len;
29882
29883 + pax_track_stack();
29884 +
29885 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29886 return -ENOMEM;
29887 data = file->private_data;
29888 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
29889 /* If doLoseSync is not 1, we won't do a Lose Sync */
29890 int doLoseSync = -1;
29891
29892 + pax_track_stack();
29893 +
29894 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29895 return -ENOMEM;
29896 data = file->private_data;
29897 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
29898 int i;
29899 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
29900
29901 + pax_track_stack();
29902 +
29903 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
29904 if (!qual)
29905 return -ENOMEM;
29906 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
29907 CapabilityRid cap_rid;
29908 __le32 *vals = stats_rid.vals;
29909
29910 + pax_track_stack();
29911 +
29912 /* Get stats out of the card */
29913 clear_bit(JOB_WSTATS, &local->jobs);
29914 if (local->power.event) {
29915 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c
29916 --- linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
29917 +++ linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
29918 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
29919 unsigned int v;
29920 u64 tsf;
29921
29922 + pax_track_stack();
29923 +
29924 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
29925 len += snprintf(buf+len, sizeof(buf)-len,
29926 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
29927 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
29928 unsigned int len = 0;
29929 unsigned int i;
29930
29931 + pax_track_stack();
29932 +
29933 len += snprintf(buf+len, sizeof(buf)-len,
29934 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
29935
29936 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
29937 unsigned int i;
29938 unsigned int v;
29939
29940 + pax_track_stack();
29941 +
29942 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
29943 sc->ah->ah_ant_mode);
29944 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
29945 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
29946 unsigned int len = 0;
29947 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
29948
29949 + pax_track_stack();
29950 +
29951 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
29952 sc->bssidmask);
29953 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
29954 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
29955 unsigned int len = 0;
29956 int i;
29957
29958 + pax_track_stack();
29959 +
29960 len += snprintf(buf+len, sizeof(buf)-len,
29961 "RX\n---------------------\n");
29962 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
29963 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
29964 char buf[700];
29965 unsigned int len = 0;
29966
29967 + pax_track_stack();
29968 +
29969 len += snprintf(buf+len, sizeof(buf)-len,
29970 "HW has PHY error counters:\t%s\n",
29971 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
29972 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
29973 struct ath5k_buf *bf, *bf0;
29974 int i, n;
29975
29976 + pax_track_stack();
29977 +
29978 len += snprintf(buf+len, sizeof(buf)-len,
29979 "available txbuffers: %d\n", sc->txbuf_len);
29980
29981 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
29982 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
29983 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
29984 @@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
29985 int i, im, j;
29986 int nmeasurement;
29987
29988 + pax_track_stack();
29989 +
29990 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
29991 if (ah->txchainmask & (1 << i))
29992 num_chains++;
29993 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
29994 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
29995 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
29996 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
29997 int theta_low_bin = 0;
29998 int i;
29999
30000 + pax_track_stack();
30001 +
30002 /* disregard any bin that contains <= 16 samples */
30003 thresh_accum_cnt = 16;
30004 scale_factor = 5;
30005 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c
30006 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
30007 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
30008 @@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
30009 char buf[512];
30010 unsigned int len = 0;
30011
30012 + pax_track_stack();
30013 +
30014 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30015 len += snprintf(buf + len, sizeof(buf) - len,
30016 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30017 @@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30018 u8 addr[ETH_ALEN];
30019 u32 tmp;
30020
30021 + pax_track_stack();
30022 +
30023 len += snprintf(buf + len, sizeof(buf) - len,
30024 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30025 wiphy_name(sc->hw->wiphy),
30026 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30027 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
30028 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
30029 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30030 unsigned int len = 0;
30031 int ret = 0;
30032
30033 + pax_track_stack();
30034 +
30035 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30036
30037 ath9k_htc_ps_wakeup(priv);
30038 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30039 unsigned int len = 0;
30040 int ret = 0;
30041
30042 + pax_track_stack();
30043 +
30044 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30045
30046 ath9k_htc_ps_wakeup(priv);
30047 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30048 unsigned int len = 0;
30049 int ret = 0;
30050
30051 + pax_track_stack();
30052 +
30053 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30054
30055 ath9k_htc_ps_wakeup(priv);
30056 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30057 char buf[512];
30058 unsigned int len = 0;
30059
30060 + pax_track_stack();
30061 +
30062 len += snprintf(buf + len, sizeof(buf) - len,
30063 "%20s : %10u\n", "Buffers queued",
30064 priv->debug.tx_stats.buf_queued);
30065 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30066 char buf[512];
30067 unsigned int len = 0;
30068
30069 + pax_track_stack();
30070 +
30071 spin_lock_bh(&priv->tx.tx_lock);
30072
30073 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30074 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30075 char buf[512];
30076 unsigned int len = 0;
30077
30078 + pax_track_stack();
30079 +
30080 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30081 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30082
30083 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h
30084 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:44:40.000000000 -0400
30085 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
30086 @@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30087
30088 /* ANI */
30089 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30090 -};
30091 +} __no_const;
30092
30093 /**
30094 * struct ath_hw_ops - callbacks used by hardware code and driver code
30095 @@ -637,7 +637,7 @@ struct ath_hw_ops {
30096 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30097 struct ath_hw_antcomb_conf *antconf);
30098
30099 -};
30100 +} __no_const;
30101
30102 struct ath_nf_limits {
30103 s16 max;
30104 @@ -650,7 +650,7 @@ struct ath_nf_limits {
30105 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
30106
30107 struct ath_hw {
30108 - struct ath_ops reg_ops;
30109 + ath_ops_no_const reg_ops;
30110
30111 struct ieee80211_hw *hw;
30112 struct ath_common common;
30113 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath.h linux-3.0.4/drivers/net/wireless/ath/ath.h
30114 --- linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
30115 +++ linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
30116 @@ -121,6 +121,7 @@ struct ath_ops {
30117 void (*write_flush) (void *);
30118 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30119 };
30120 +typedef struct ath_ops __no_const ath_ops_no_const;
30121
30122 struct ath_common;
30123 struct ath_bus_ops;
30124 diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c
30125 --- linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
30126 +++ linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
30127 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30128 int err;
30129 DECLARE_SSID_BUF(ssid);
30130
30131 + pax_track_stack();
30132 +
30133 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30134
30135 if (ssid_len)
30136 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30137 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30138 int err;
30139
30140 + pax_track_stack();
30141 +
30142 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30143 idx, keylen, len);
30144
30145 diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30146 --- linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
30147 +++ linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
30148 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30149 unsigned long flags;
30150 DECLARE_SSID_BUF(ssid);
30151
30152 + pax_track_stack();
30153 +
30154 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30155 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30156 print_ssid(ssid, info_element->data, info_element->len),
30157 diff -urNp linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30158 --- linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
30159 +++ linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
30160 @@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30161 */
30162 if (iwl3945_mod_params.disable_hw_scan) {
30163 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30164 - iwl3945_hw_ops.hw_scan = NULL;
30165 + pax_open_kernel();
30166 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30167 + pax_close_kernel();
30168 }
30169
30170 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30171 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30172 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
30173 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
30174 @@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30175 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30176 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30177
30178 + pax_track_stack();
30179 +
30180 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30181
30182 /* Treat uninitialized rate scaling data same as non-existing. */
30183 @@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30184 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30185 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30186
30187 + pax_track_stack();
30188 +
30189 /* Override starting rate (index 0) if needed for debug purposes */
30190 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30191
30192 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30193 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
30194 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
30195 @@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30196 int pos = 0;
30197 const size_t bufsz = sizeof(buf);
30198
30199 + pax_track_stack();
30200 +
30201 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30202 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30203 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30204 @@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30205 char buf[256 * NUM_IWL_RXON_CTX];
30206 const size_t bufsz = sizeof(buf);
30207
30208 + pax_track_stack();
30209 +
30210 for_each_context(priv, ctx) {
30211 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30212 ctx->ctxid);
30213 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30214 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30215 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30216 @@ -68,8 +68,8 @@ do {
30217 } while (0)
30218
30219 #else
30220 -#define IWL_DEBUG(__priv, level, fmt, args...)
30221 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30222 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30223 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30224 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30225 const void *p, u32 len)
30226 {}
30227 diff -urNp linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30228 --- linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30229 +++ linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30230 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30231 int buf_len = 512;
30232 size_t len = 0;
30233
30234 + pax_track_stack();
30235 +
30236 if (*ppos != 0)
30237 return 0;
30238 if (count < sizeof(buf))
30239 diff -urNp linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c
30240 --- linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30241 +++ linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30242 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30243 return -EINVAL;
30244
30245 if (fake_hw_scan) {
30246 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30247 - mac80211_hwsim_ops.sw_scan_start = NULL;
30248 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30249 + pax_open_kernel();
30250 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30251 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30252 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30253 + pax_close_kernel();
30254 }
30255
30256 spin_lock_init(&hwsim_radio_lock);
30257 diff -urNp linux-3.0.4/drivers/net/wireless/rndis_wlan.c linux-3.0.4/drivers/net/wireless/rndis_wlan.c
30258 --- linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30259 +++ linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30260 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30261
30262 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30263
30264 - if (rts_threshold < 0 || rts_threshold > 2347)
30265 + if (rts_threshold > 2347)
30266 rts_threshold = 2347;
30267
30268 tmp = cpu_to_le32(rts_threshold);
30269 diff -urNp linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30270 --- linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30271 +++ linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30272 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30273 u8 rfpath;
30274 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30275
30276 + pax_track_stack();
30277 +
30278 precommoncmdcnt = 0;
30279 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30280 MAX_PRECMD_CNT,
30281 diff -urNp linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h
30282 --- linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30283 +++ linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30284 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
30285 void (*reset)(struct wl1251 *wl);
30286 void (*enable_irq)(struct wl1251 *wl);
30287 void (*disable_irq)(struct wl1251 *wl);
30288 -};
30289 +} __no_const;
30290
30291 struct wl1251 {
30292 struct ieee80211_hw *hw;
30293 diff -urNp linux-3.0.4/drivers/net/wireless/wl12xx/spi.c linux-3.0.4/drivers/net/wireless/wl12xx/spi.c
30294 --- linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30295 +++ linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30296 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30297 u32 chunk_len;
30298 int i;
30299
30300 + pax_track_stack();
30301 +
30302 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30303
30304 spi_message_init(&m);
30305 diff -urNp linux-3.0.4/drivers/oprofile/buffer_sync.c linux-3.0.4/drivers/oprofile/buffer_sync.c
30306 --- linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30307 +++ linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30308 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30309 if (cookie == NO_COOKIE)
30310 offset = pc;
30311 if (cookie == INVALID_COOKIE) {
30312 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30313 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30314 offset = pc;
30315 }
30316 if (cookie != last_cookie) {
30317 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30318 /* add userspace sample */
30319
30320 if (!mm) {
30321 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30322 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30323 return 0;
30324 }
30325
30326 cookie = lookup_dcookie(mm, s->eip, &offset);
30327
30328 if (cookie == INVALID_COOKIE) {
30329 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30330 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30331 return 0;
30332 }
30333
30334 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30335 /* ignore backtraces if failed to add a sample */
30336 if (state == sb_bt_start) {
30337 state = sb_bt_ignore;
30338 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30339 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30340 }
30341 }
30342 release_mm(mm);
30343 diff -urNp linux-3.0.4/drivers/oprofile/event_buffer.c linux-3.0.4/drivers/oprofile/event_buffer.c
30344 --- linux-3.0.4/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30345 +++ linux-3.0.4/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30346 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30347 }
30348
30349 if (buffer_pos == buffer_size) {
30350 - atomic_inc(&oprofile_stats.event_lost_overflow);
30351 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30352 return;
30353 }
30354
30355 diff -urNp linux-3.0.4/drivers/oprofile/oprof.c linux-3.0.4/drivers/oprofile/oprof.c
30356 --- linux-3.0.4/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30357 +++ linux-3.0.4/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30358 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30359 if (oprofile_ops.switch_events())
30360 return;
30361
30362 - atomic_inc(&oprofile_stats.multiplex_counter);
30363 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30364 start_switch_worker();
30365 }
30366
30367 diff -urNp linux-3.0.4/drivers/oprofile/oprofilefs.c linux-3.0.4/drivers/oprofile/oprofilefs.c
30368 --- linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30369 +++ linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30370 @@ -186,7 +186,7 @@ static const struct file_operations atom
30371
30372
30373 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30374 - char const *name, atomic_t *val)
30375 + char const *name, atomic_unchecked_t *val)
30376 {
30377 return __oprofilefs_create_file(sb, root, name,
30378 &atomic_ro_fops, 0444, val);
30379 diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.c linux-3.0.4/drivers/oprofile/oprofile_stats.c
30380 --- linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30381 +++ linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30382 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30383 cpu_buf->sample_invalid_eip = 0;
30384 }
30385
30386 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30387 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30388 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30389 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30390 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30391 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30392 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30393 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30394 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30395 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30396 }
30397
30398
30399 diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.h linux-3.0.4/drivers/oprofile/oprofile_stats.h
30400 --- linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30401 +++ linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30402 @@ -13,11 +13,11 @@
30403 #include <asm/atomic.h>
30404
30405 struct oprofile_stat_struct {
30406 - atomic_t sample_lost_no_mm;
30407 - atomic_t sample_lost_no_mapping;
30408 - atomic_t bt_lost_no_mapping;
30409 - atomic_t event_lost_overflow;
30410 - atomic_t multiplex_counter;
30411 + atomic_unchecked_t sample_lost_no_mm;
30412 + atomic_unchecked_t sample_lost_no_mapping;
30413 + atomic_unchecked_t bt_lost_no_mapping;
30414 + atomic_unchecked_t event_lost_overflow;
30415 + atomic_unchecked_t multiplex_counter;
30416 };
30417
30418 extern struct oprofile_stat_struct oprofile_stats;
30419 diff -urNp linux-3.0.4/drivers/parport/procfs.c linux-3.0.4/drivers/parport/procfs.c
30420 --- linux-3.0.4/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30421 +++ linux-3.0.4/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30422 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30423
30424 *ppos += len;
30425
30426 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30427 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30428 }
30429
30430 #ifdef CONFIG_PARPORT_1284
30431 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30432
30433 *ppos += len;
30434
30435 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30436 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30437 }
30438 #endif /* IEEE1284.3 support. */
30439
30440 diff -urNp linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h
30441 --- linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30442 +++ linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30443 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30444 int (*hardware_test) (struct slot* slot, u32 value);
30445 u8 (*get_power) (struct slot* slot);
30446 int (*set_power) (struct slot* slot, int value);
30447 -};
30448 +} __no_const;
30449
30450 struct cpci_hp_controller {
30451 unsigned int irq;
30452 diff -urNp linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c
30453 --- linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
30454 +++ linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
30455 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30456
30457 void compaq_nvram_init (void __iomem *rom_start)
30458 {
30459 +
30460 +#ifndef CONFIG_PAX_KERNEXEC
30461 if (rom_start) {
30462 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30463 }
30464 +#endif
30465 +
30466 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30467
30468 /* initialize our int15 lock */
30469 diff -urNp linux-3.0.4/drivers/pci/pcie/aspm.c linux-3.0.4/drivers/pci/pcie/aspm.c
30470 --- linux-3.0.4/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
30471 +++ linux-3.0.4/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
30472 @@ -27,9 +27,9 @@
30473 #define MODULE_PARAM_PREFIX "pcie_aspm."
30474
30475 /* Note: those are not register definitions */
30476 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30477 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30478 -#define ASPM_STATE_L1 (4) /* L1 state */
30479 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30480 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30481 +#define ASPM_STATE_L1 (4U) /* L1 state */
30482 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30483 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30484
30485 diff -urNp linux-3.0.4/drivers/pci/probe.c linux-3.0.4/drivers/pci/probe.c
30486 --- linux-3.0.4/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
30487 +++ linux-3.0.4/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
30488 @@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
30489 u32 l, sz, mask;
30490 u16 orig_cmd;
30491
30492 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30493 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30494
30495 if (!dev->mmio_always_on) {
30496 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30497 diff -urNp linux-3.0.4/drivers/pci/proc.c linux-3.0.4/drivers/pci/proc.c
30498 --- linux-3.0.4/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
30499 +++ linux-3.0.4/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
30500 @@ -476,7 +476,16 @@ static const struct file_operations proc
30501 static int __init pci_proc_init(void)
30502 {
30503 struct pci_dev *dev = NULL;
30504 +
30505 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30506 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30507 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30508 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30509 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30510 +#endif
30511 +#else
30512 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30513 +#endif
30514 proc_create("devices", 0, proc_bus_pci_dir,
30515 &proc_bus_pci_dev_operations);
30516 proc_initialized = 1;
30517 diff -urNp linux-3.0.4/drivers/pci/xen-pcifront.c linux-3.0.4/drivers/pci/xen-pcifront.c
30518 --- linux-3.0.4/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
30519 +++ linux-3.0.4/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
30520 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30521 struct pcifront_sd *sd = bus->sysdata;
30522 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30523
30524 + pax_track_stack();
30525 +
30526 if (verbose_request)
30527 dev_info(&pdev->xdev->dev,
30528 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30529 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30530 struct pcifront_sd *sd = bus->sysdata;
30531 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30532
30533 + pax_track_stack();
30534 +
30535 if (verbose_request)
30536 dev_info(&pdev->xdev->dev,
30537 "write dev=%04x:%02x:%02x.%01x - "
30538 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30539 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30540 struct msi_desc *entry;
30541
30542 + pax_track_stack();
30543 +
30544 if (nvec > SH_INFO_MAX_VEC) {
30545 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30546 " Increase SH_INFO_MAX_VEC.\n", nvec);
30547 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30548 struct pcifront_sd *sd = dev->bus->sysdata;
30549 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30550
30551 + pax_track_stack();
30552 +
30553 err = do_pci_op(pdev, &op);
30554
30555 /* What should do for error ? */
30556 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30557 struct pcifront_sd *sd = dev->bus->sysdata;
30558 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30559
30560 + pax_track_stack();
30561 +
30562 err = do_pci_op(pdev, &op);
30563 if (likely(!err)) {
30564 vector[0] = op.value;
30565 diff -urNp linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c
30566 --- linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
30567 +++ linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
30568 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
30569 return 0;
30570 }
30571
30572 -void static hotkey_mask_warn_incomplete_mask(void)
30573 +static void hotkey_mask_warn_incomplete_mask(void)
30574 {
30575 /* log only what the user can fix... */
30576 const u32 wantedmask = hotkey_driver_mask &
30577 diff -urNp linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c
30578 --- linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
30579 +++ linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
30580 @@ -59,7 +59,7 @@ do { \
30581 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30582 } while(0)
30583
30584 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30585 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30586 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30587
30588 /*
30589 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30590
30591 cpu = get_cpu();
30592 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30593 +
30594 + pax_open_kernel();
30595 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30596 + pax_close_kernel();
30597
30598 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30599 spin_lock_irqsave(&pnp_bios_lock, flags);
30600 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30601 :"memory");
30602 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30603
30604 + pax_open_kernel();
30605 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30606 + pax_close_kernel();
30607 +
30608 put_cpu();
30609
30610 /* If we get here and this is set then the PnP BIOS faulted on us. */
30611 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30612 return status;
30613 }
30614
30615 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30616 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30617 {
30618 int i;
30619
30620 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30621 pnp_bios_callpoint.offset = header->fields.pm16offset;
30622 pnp_bios_callpoint.segment = PNP_CS16;
30623
30624 + pax_open_kernel();
30625 +
30626 for_each_possible_cpu(i) {
30627 struct desc_struct *gdt = get_cpu_gdt_table(i);
30628 if (!gdt)
30629 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30630 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30631 (unsigned long)__va(header->fields.pm16dseg));
30632 }
30633 +
30634 + pax_close_kernel();
30635 }
30636 diff -urNp linux-3.0.4/drivers/pnp/resource.c linux-3.0.4/drivers/pnp/resource.c
30637 --- linux-3.0.4/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
30638 +++ linux-3.0.4/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
30639 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30640 return 1;
30641
30642 /* check if the resource is valid */
30643 - if (*irq < 0 || *irq > 15)
30644 + if (*irq > 15)
30645 return 0;
30646
30647 /* check if the resource is reserved */
30648 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30649 return 1;
30650
30651 /* check if the resource is valid */
30652 - if (*dma < 0 || *dma == 4 || *dma > 7)
30653 + if (*dma == 4 || *dma > 7)
30654 return 0;
30655
30656 /* check if the resource is reserved */
30657 diff -urNp linux-3.0.4/drivers/power/bq27x00_battery.c linux-3.0.4/drivers/power/bq27x00_battery.c
30658 --- linux-3.0.4/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
30659 +++ linux-3.0.4/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
30660 @@ -67,7 +67,7 @@
30661 struct bq27x00_device_info;
30662 struct bq27x00_access_methods {
30663 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30664 -};
30665 +} __no_const;
30666
30667 enum bq27x00_chip { BQ27000, BQ27500 };
30668
30669 diff -urNp linux-3.0.4/drivers/regulator/max8660.c linux-3.0.4/drivers/regulator/max8660.c
30670 --- linux-3.0.4/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
30671 +++ linux-3.0.4/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
30672 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30673 max8660->shadow_regs[MAX8660_OVER1] = 5;
30674 } else {
30675 /* Otherwise devices can be toggled via software */
30676 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
30677 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
30678 + pax_open_kernel();
30679 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30680 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30681 + pax_close_kernel();
30682 }
30683
30684 /*
30685 diff -urNp linux-3.0.4/drivers/regulator/mc13892-regulator.c linux-3.0.4/drivers/regulator/mc13892-regulator.c
30686 --- linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
30687 +++ linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
30688 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
30689 }
30690 mc13xxx_unlock(mc13892);
30691
30692 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30693 + pax_open_kernel();
30694 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30695 = mc13892_vcam_set_mode;
30696 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30697 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30698 = mc13892_vcam_get_mode;
30699 + pax_close_kernel();
30700 for (i = 0; i < pdata->num_regulators; i++) {
30701 init_data = &pdata->regulators[i];
30702 priv->regulators[i] = regulator_register(
30703 diff -urNp linux-3.0.4/drivers/rtc/rtc-dev.c linux-3.0.4/drivers/rtc/rtc-dev.c
30704 --- linux-3.0.4/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
30705 +++ linux-3.0.4/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
30706 @@ -14,6 +14,7 @@
30707 #include <linux/module.h>
30708 #include <linux/rtc.h>
30709 #include <linux/sched.h>
30710 +#include <linux/grsecurity.h>
30711 #include "rtc-core.h"
30712
30713 static dev_t rtc_devt;
30714 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30715 if (copy_from_user(&tm, uarg, sizeof(tm)))
30716 return -EFAULT;
30717
30718 + gr_log_timechange();
30719 +
30720 return rtc_set_time(rtc, &tm);
30721
30722 case RTC_PIE_ON:
30723 diff -urNp linux-3.0.4/drivers/scsi/aacraid/aacraid.h linux-3.0.4/drivers/scsi/aacraid/aacraid.h
30724 --- linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
30725 +++ linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
30726 @@ -492,7 +492,7 @@ struct adapter_ops
30727 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30728 /* Administrative operations */
30729 int (*adapter_comm)(struct aac_dev * dev, int comm);
30730 -};
30731 +} __no_const;
30732
30733 /*
30734 * Define which interrupt handler needs to be installed
30735 diff -urNp linux-3.0.4/drivers/scsi/aacraid/commctrl.c linux-3.0.4/drivers/scsi/aacraid/commctrl.c
30736 --- linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
30737 +++ linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
30738 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30739 u32 actual_fibsize64, actual_fibsize = 0;
30740 int i;
30741
30742 + pax_track_stack();
30743
30744 if (dev->in_reset) {
30745 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30746 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfad.c linux-3.0.4/drivers/scsi/bfa/bfad.c
30747 --- linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
30748 +++ linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
30749 @@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30750 struct bfad_vport_s *vport, *vport_new;
30751 struct bfa_fcs_driver_info_s driver_info;
30752
30753 + pax_track_stack();
30754 +
30755 /* Fill the driver_info info to fcs*/
30756 memset(&driver_info, 0, sizeof(driver_info));
30757 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30758 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c
30759 --- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
30760 +++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
30761 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30762 u16 len, count;
30763 u16 templen;
30764
30765 + pax_track_stack();
30766 +
30767 /*
30768 * get hba attributes
30769 */
30770 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30771 u8 count = 0;
30772 u16 templen;
30773
30774 + pax_track_stack();
30775 +
30776 /*
30777 * get port attributes
30778 */
30779 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c
30780 --- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
30781 +++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
30782 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30783 struct fc_rpsc_speed_info_s speeds;
30784 struct bfa_port_attr_s pport_attr;
30785
30786 + pax_track_stack();
30787 +
30788 bfa_trc(port->fcs, rx_fchs->s_id);
30789 bfa_trc(port->fcs, rx_fchs->d_id);
30790
30791 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa.h linux-3.0.4/drivers/scsi/bfa/bfa.h
30792 --- linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
30793 +++ linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
30794 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30795 u32 *nvecs, u32 *maxvec);
30796 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30797 u32 *end);
30798 -};
30799 +} __no_const;
30800 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30801
30802 struct bfa_iocfc_s {
30803 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h
30804 --- linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
30805 +++ linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
30806 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30807 bfa_ioc_disable_cbfn_t disable_cbfn;
30808 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30809 bfa_ioc_reset_cbfn_t reset_cbfn;
30810 -};
30811 +} __no_const;
30812
30813 /*
30814 * Heartbeat failure notification queue element.
30815 @@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
30816 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30817 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30818 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30819 -};
30820 +} __no_const;
30821
30822 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30823 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30824 diff -urNp linux-3.0.4/drivers/scsi/BusLogic.c linux-3.0.4/drivers/scsi/BusLogic.c
30825 --- linux-3.0.4/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
30826 +++ linux-3.0.4/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
30827 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30828 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30829 *PrototypeHostAdapter)
30830 {
30831 + pax_track_stack();
30832 +
30833 /*
30834 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30835 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30836 diff -urNp linux-3.0.4/drivers/scsi/dpt_i2o.c linux-3.0.4/drivers/scsi/dpt_i2o.c
30837 --- linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
30838 +++ linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
30839 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30840 dma_addr_t addr;
30841 ulong flags = 0;
30842
30843 + pax_track_stack();
30844 +
30845 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30846 // get user msg size in u32s
30847 if(get_user(size, &user_msg[0])){
30848 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
30849 s32 rcode;
30850 dma_addr_t addr;
30851
30852 + pax_track_stack();
30853 +
30854 memset(msg, 0 , sizeof(msg));
30855 len = scsi_bufflen(cmd);
30856 direction = 0x00000000;
30857 diff -urNp linux-3.0.4/drivers/scsi/eata.c linux-3.0.4/drivers/scsi/eata.c
30858 --- linux-3.0.4/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
30859 +++ linux-3.0.4/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
30860 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
30861 struct hostdata *ha;
30862 char name[16];
30863
30864 + pax_track_stack();
30865 +
30866 sprintf(name, "%s%d", driver_name, j);
30867
30868 if (!request_region(port_base, REGION_SIZE, driver_name)) {
30869 diff -urNp linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c
30870 --- linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
30871 +++ linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
30872 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
30873 } buf;
30874 int rc;
30875
30876 + pax_track_stack();
30877 +
30878 fiph = (struct fip_header *)skb->data;
30879 sub = fiph->fip_subcode;
30880
30881 diff -urNp linux-3.0.4/drivers/scsi/gdth.c linux-3.0.4/drivers/scsi/gdth.c
30882 --- linux-3.0.4/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
30883 +++ linux-3.0.4/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
30884 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
30885 unsigned long flags;
30886 gdth_ha_str *ha;
30887
30888 + pax_track_stack();
30889 +
30890 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
30891 return -EFAULT;
30892 ha = gdth_find_ha(ldrv.ionode);
30893 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
30894 gdth_ha_str *ha;
30895 int rval;
30896
30897 + pax_track_stack();
30898 +
30899 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
30900 res.number >= MAX_HDRIVES)
30901 return -EFAULT;
30902 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
30903 gdth_ha_str *ha;
30904 int rval;
30905
30906 + pax_track_stack();
30907 +
30908 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
30909 return -EFAULT;
30910 ha = gdth_find_ha(gen.ionode);
30911 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
30912 int i;
30913 gdth_cmd_str gdtcmd;
30914 char cmnd[MAX_COMMAND_SIZE];
30915 +
30916 + pax_track_stack();
30917 +
30918 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
30919
30920 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
30921 diff -urNp linux-3.0.4/drivers/scsi/gdth_proc.c linux-3.0.4/drivers/scsi/gdth_proc.c
30922 --- linux-3.0.4/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
30923 +++ linux-3.0.4/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
30924 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
30925 u64 paddr;
30926
30927 char cmnd[MAX_COMMAND_SIZE];
30928 +
30929 + pax_track_stack();
30930 +
30931 memset(cmnd, 0xff, 12);
30932 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
30933
30934 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
30935 gdth_hget_str *phg;
30936 char cmnd[MAX_COMMAND_SIZE];
30937
30938 + pax_track_stack();
30939 +
30940 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
30941 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
30942 if (!gdtcmd || !estr)
30943 diff -urNp linux-3.0.4/drivers/scsi/hosts.c linux-3.0.4/drivers/scsi/hosts.c
30944 --- linux-3.0.4/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
30945 +++ linux-3.0.4/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
30946 @@ -42,7 +42,7 @@
30947 #include "scsi_logging.h"
30948
30949
30950 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
30951 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
30952
30953
30954 static void scsi_host_cls_release(struct device *dev)
30955 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
30956 * subtract one because we increment first then return, but we need to
30957 * know what the next host number was before increment
30958 */
30959 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
30960 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
30961 shost->dma_channel = 0xff;
30962
30963 /* These three are default values which can be overridden */
30964 diff -urNp linux-3.0.4/drivers/scsi/hpsa.c linux-3.0.4/drivers/scsi/hpsa.c
30965 --- linux-3.0.4/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
30966 +++ linux-3.0.4/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
30967 @@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
30968 u32 a;
30969
30970 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30971 - return h->access.command_completed(h);
30972 + return h->access->command_completed(h);
30973
30974 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30975 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30976 @@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
30977 while (!list_empty(&h->reqQ)) {
30978 c = list_entry(h->reqQ.next, struct CommandList, list);
30979 /* can't do anything if fifo is full */
30980 - if ((h->access.fifo_full(h))) {
30981 + if ((h->access->fifo_full(h))) {
30982 dev_warn(&h->pdev->dev, "fifo full\n");
30983 break;
30984 }
30985 @@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
30986 h->Qdepth--;
30987
30988 /* Tell the controller execute command */
30989 - h->access.submit_command(h, c);
30990 + h->access->submit_command(h, c);
30991
30992 /* Put job onto the completed Q */
30993 addQ(&h->cmpQ, c);
30994 @@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
30995
30996 static inline unsigned long get_next_completion(struct ctlr_info *h)
30997 {
30998 - return h->access.command_completed(h);
30999 + return h->access->command_completed(h);
31000 }
31001
31002 static inline bool interrupt_pending(struct ctlr_info *h)
31003 {
31004 - return h->access.intr_pending(h);
31005 + return h->access->intr_pending(h);
31006 }
31007
31008 static inline long interrupt_not_for_us(struct ctlr_info *h)
31009 {
31010 - return (h->access.intr_pending(h) == 0) ||
31011 + return (h->access->intr_pending(h) == 0) ||
31012 (h->interrupts_enabled == 0);
31013 }
31014
31015 @@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31016 if (prod_index < 0)
31017 return -ENODEV;
31018 h->product_name = products[prod_index].product_name;
31019 - h->access = *(products[prod_index].access);
31020 + h->access = products[prod_index].access;
31021
31022 if (hpsa_board_disabled(h->pdev)) {
31023 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31024 @@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31025 }
31026
31027 /* make sure the board interrupts are off */
31028 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31029 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31030
31031 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31032 goto clean2;
31033 @@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31034 * fake ones to scoop up any residual completions.
31035 */
31036 spin_lock_irqsave(&h->lock, flags);
31037 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31038 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31039 spin_unlock_irqrestore(&h->lock, flags);
31040 free_irq(h->intr[h->intr_mode], h);
31041 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31042 @@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31043 dev_info(&h->pdev->dev, "Board READY.\n");
31044 dev_info(&h->pdev->dev,
31045 "Waiting for stale completions to drain.\n");
31046 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31047 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31048 msleep(10000);
31049 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31050 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31051
31052 rc = controller_reset_failed(h->cfgtable);
31053 if (rc)
31054 @@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31055 }
31056
31057 /* Turn the interrupts on so we can service requests */
31058 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31059 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31060
31061 hpsa_hba_inquiry(h);
31062 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
31063 @@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31064 * To write all data in the battery backed cache to disks
31065 */
31066 hpsa_flush_cache(h);
31067 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31068 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31069 free_irq(h->intr[h->intr_mode], h);
31070 #ifdef CONFIG_PCI_MSI
31071 if (h->msix_vector)
31072 @@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31073 return;
31074 }
31075 /* Change the access methods to the performant access methods */
31076 - h->access = SA5_performant_access;
31077 + h->access = &SA5_performant_access;
31078 h->transMethod = CFGTBL_Trans_Performant;
31079 }
31080
31081 diff -urNp linux-3.0.4/drivers/scsi/hpsa.h linux-3.0.4/drivers/scsi/hpsa.h
31082 --- linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:44:40.000000000 -0400
31083 +++ linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
31084 @@ -73,7 +73,7 @@ struct ctlr_info {
31085 unsigned int msix_vector;
31086 unsigned int msi_vector;
31087 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31088 - struct access_method access;
31089 + struct access_method *access;
31090
31091 /* queue and queue Info */
31092 struct list_head reqQ;
31093 diff -urNp linux-3.0.4/drivers/scsi/ips.h linux-3.0.4/drivers/scsi/ips.h
31094 --- linux-3.0.4/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
31095 +++ linux-3.0.4/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
31096 @@ -1027,7 +1027,7 @@ typedef struct {
31097 int (*intr)(struct ips_ha *);
31098 void (*enableint)(struct ips_ha *);
31099 uint32_t (*statupd)(struct ips_ha *);
31100 -} ips_hw_func_t;
31101 +} __no_const ips_hw_func_t;
31102
31103 typedef struct ips_ha {
31104 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31105 diff -urNp linux-3.0.4/drivers/scsi/libfc/fc_exch.c linux-3.0.4/drivers/scsi/libfc/fc_exch.c
31106 --- linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
31107 +++ linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
31108 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31109 * all together if not used XXX
31110 */
31111 struct {
31112 - atomic_t no_free_exch;
31113 - atomic_t no_free_exch_xid;
31114 - atomic_t xid_not_found;
31115 - atomic_t xid_busy;
31116 - atomic_t seq_not_found;
31117 - atomic_t non_bls_resp;
31118 + atomic_unchecked_t no_free_exch;
31119 + atomic_unchecked_t no_free_exch_xid;
31120 + atomic_unchecked_t xid_not_found;
31121 + atomic_unchecked_t xid_busy;
31122 + atomic_unchecked_t seq_not_found;
31123 + atomic_unchecked_t non_bls_resp;
31124 } stats;
31125 };
31126
31127 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31128 /* allocate memory for exchange */
31129 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31130 if (!ep) {
31131 - atomic_inc(&mp->stats.no_free_exch);
31132 + atomic_inc_unchecked(&mp->stats.no_free_exch);
31133 goto out;
31134 }
31135 memset(ep, 0, sizeof(*ep));
31136 @@ -761,7 +761,7 @@ out:
31137 return ep;
31138 err:
31139 spin_unlock_bh(&pool->lock);
31140 - atomic_inc(&mp->stats.no_free_exch_xid);
31141 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31142 mempool_free(ep, mp->ep_pool);
31143 return NULL;
31144 }
31145 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31146 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31147 ep = fc_exch_find(mp, xid);
31148 if (!ep) {
31149 - atomic_inc(&mp->stats.xid_not_found);
31150 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31151 reject = FC_RJT_OX_ID;
31152 goto out;
31153 }
31154 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31155 ep = fc_exch_find(mp, xid);
31156 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31157 if (ep) {
31158 - atomic_inc(&mp->stats.xid_busy);
31159 + atomic_inc_unchecked(&mp->stats.xid_busy);
31160 reject = FC_RJT_RX_ID;
31161 goto rel;
31162 }
31163 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31164 }
31165 xid = ep->xid; /* get our XID */
31166 } else if (!ep) {
31167 - atomic_inc(&mp->stats.xid_not_found);
31168 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31169 reject = FC_RJT_RX_ID; /* XID not found */
31170 goto out;
31171 }
31172 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31173 } else {
31174 sp = &ep->seq;
31175 if (sp->id != fh->fh_seq_id) {
31176 - atomic_inc(&mp->stats.seq_not_found);
31177 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31178 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31179 goto rel;
31180 }
31181 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31182
31183 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31184 if (!ep) {
31185 - atomic_inc(&mp->stats.xid_not_found);
31186 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31187 goto out;
31188 }
31189 if (ep->esb_stat & ESB_ST_COMPLETE) {
31190 - atomic_inc(&mp->stats.xid_not_found);
31191 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31192 goto rel;
31193 }
31194 if (ep->rxid == FC_XID_UNKNOWN)
31195 ep->rxid = ntohs(fh->fh_rx_id);
31196 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31197 - atomic_inc(&mp->stats.xid_not_found);
31198 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31199 goto rel;
31200 }
31201 if (ep->did != ntoh24(fh->fh_s_id) &&
31202 ep->did != FC_FID_FLOGI) {
31203 - atomic_inc(&mp->stats.xid_not_found);
31204 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31205 goto rel;
31206 }
31207 sof = fr_sof(fp);
31208 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31209 sp->ssb_stat |= SSB_ST_RESP;
31210 sp->id = fh->fh_seq_id;
31211 } else if (sp->id != fh->fh_seq_id) {
31212 - atomic_inc(&mp->stats.seq_not_found);
31213 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31214 goto rel;
31215 }
31216
31217 @@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31218 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31219
31220 if (!sp)
31221 - atomic_inc(&mp->stats.xid_not_found);
31222 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31223 else
31224 - atomic_inc(&mp->stats.non_bls_resp);
31225 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31226
31227 fc_frame_free(fp);
31228 }
31229 diff -urNp linux-3.0.4/drivers/scsi/libsas/sas_ata.c linux-3.0.4/drivers/scsi/libsas/sas_ata.c
31230 --- linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31231 +++ linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31232 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31233 .postreset = ata_std_postreset,
31234 .error_handler = ata_std_error_handler,
31235 .post_internal_cmd = sas_ata_post_internal,
31236 - .qc_defer = ata_std_qc_defer,
31237 + .qc_defer = ata_std_qc_defer,
31238 .qc_prep = ata_noop_qc_prep,
31239 .qc_issue = sas_ata_qc_issue,
31240 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31241 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c
31242 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31243 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31244 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31245
31246 #include <linux/debugfs.h>
31247
31248 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31249 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31250 static unsigned long lpfc_debugfs_start_time = 0L;
31251
31252 /* iDiag */
31253 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31254 lpfc_debugfs_enable = 0;
31255
31256 len = 0;
31257 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31258 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31259 (lpfc_debugfs_max_disc_trc - 1);
31260 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31261 dtp = vport->disc_trc + i;
31262 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31263 lpfc_debugfs_enable = 0;
31264
31265 len = 0;
31266 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31267 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31268 (lpfc_debugfs_max_slow_ring_trc - 1);
31269 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31270 dtp = phba->slow_ring_trc + i;
31271 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31272 uint32_t *ptr;
31273 char buffer[1024];
31274
31275 + pax_track_stack();
31276 +
31277 off = 0;
31278 spin_lock_irq(&phba->hbalock);
31279
31280 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31281 !vport || !vport->disc_trc)
31282 return;
31283
31284 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31285 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31286 (lpfc_debugfs_max_disc_trc - 1);
31287 dtp = vport->disc_trc + index;
31288 dtp->fmt = fmt;
31289 dtp->data1 = data1;
31290 dtp->data2 = data2;
31291 dtp->data3 = data3;
31292 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31293 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31294 dtp->jif = jiffies;
31295 #endif
31296 return;
31297 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31298 !phba || !phba->slow_ring_trc)
31299 return;
31300
31301 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31302 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31303 (lpfc_debugfs_max_slow_ring_trc - 1);
31304 dtp = phba->slow_ring_trc + index;
31305 dtp->fmt = fmt;
31306 dtp->data1 = data1;
31307 dtp->data2 = data2;
31308 dtp->data3 = data3;
31309 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31310 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31311 dtp->jif = jiffies;
31312 #endif
31313 return;
31314 @@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31315 "slow_ring buffer\n");
31316 goto debug_failed;
31317 }
31318 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31319 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31320 memset(phba->slow_ring_trc, 0,
31321 (sizeof(struct lpfc_debugfs_trc) *
31322 lpfc_debugfs_max_slow_ring_trc));
31323 @@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31324 "buffer\n");
31325 goto debug_failed;
31326 }
31327 - atomic_set(&vport->disc_trc_cnt, 0);
31328 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31329
31330 snprintf(name, sizeof(name), "discovery_trace");
31331 vport->debug_disc_trc =
31332 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc.h linux-3.0.4/drivers/scsi/lpfc/lpfc.h
31333 --- linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31334 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31335 @@ -420,7 +420,7 @@ struct lpfc_vport {
31336 struct dentry *debug_nodelist;
31337 struct dentry *vport_debugfs_root;
31338 struct lpfc_debugfs_trc *disc_trc;
31339 - atomic_t disc_trc_cnt;
31340 + atomic_unchecked_t disc_trc_cnt;
31341 #endif
31342 uint8_t stat_data_enabled;
31343 uint8_t stat_data_blocked;
31344 @@ -826,8 +826,8 @@ struct lpfc_hba {
31345 struct timer_list fabric_block_timer;
31346 unsigned long bit_flags;
31347 #define FABRIC_COMANDS_BLOCKED 0
31348 - atomic_t num_rsrc_err;
31349 - atomic_t num_cmd_success;
31350 + atomic_unchecked_t num_rsrc_err;
31351 + atomic_unchecked_t num_cmd_success;
31352 unsigned long last_rsrc_error_time;
31353 unsigned long last_ramp_down_time;
31354 unsigned long last_ramp_up_time;
31355 @@ -841,7 +841,7 @@ struct lpfc_hba {
31356 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31357 struct dentry *debug_slow_ring_trc;
31358 struct lpfc_debugfs_trc *slow_ring_trc;
31359 - atomic_t slow_ring_trc_cnt;
31360 + atomic_unchecked_t slow_ring_trc_cnt;
31361 /* iDiag debugfs sub-directory */
31362 struct dentry *idiag_root;
31363 struct dentry *idiag_pci_cfg;
31364 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c
31365 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31366 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31367 @@ -9923,8 +9923,10 @@ lpfc_init(void)
31368 printk(LPFC_COPYRIGHT "\n");
31369
31370 if (lpfc_enable_npiv) {
31371 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31372 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31373 + pax_open_kernel();
31374 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31375 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31376 + pax_close_kernel();
31377 }
31378 lpfc_transport_template =
31379 fc_attach_transport(&lpfc_transport_functions);
31380 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c
31381 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31382 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31383 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31384 uint32_t evt_posted;
31385
31386 spin_lock_irqsave(&phba->hbalock, flags);
31387 - atomic_inc(&phba->num_rsrc_err);
31388 + atomic_inc_unchecked(&phba->num_rsrc_err);
31389 phba->last_rsrc_error_time = jiffies;
31390
31391 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31392 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31393 unsigned long flags;
31394 struct lpfc_hba *phba = vport->phba;
31395 uint32_t evt_posted;
31396 - atomic_inc(&phba->num_cmd_success);
31397 + atomic_inc_unchecked(&phba->num_cmd_success);
31398
31399 if (vport->cfg_lun_queue_depth <= queue_depth)
31400 return;
31401 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31402 unsigned long num_rsrc_err, num_cmd_success;
31403 int i;
31404
31405 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31406 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31407 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31408 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31409
31410 vports = lpfc_create_vport_work_array(phba);
31411 if (vports != NULL)
31412 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31413 }
31414 }
31415 lpfc_destroy_vport_work_array(phba, vports);
31416 - atomic_set(&phba->num_rsrc_err, 0);
31417 - atomic_set(&phba->num_cmd_success, 0);
31418 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31419 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31420 }
31421
31422 /**
31423 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31424 }
31425 }
31426 lpfc_destroy_vport_work_array(phba, vports);
31427 - atomic_set(&phba->num_rsrc_err, 0);
31428 - atomic_set(&phba->num_cmd_success, 0);
31429 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31430 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31431 }
31432
31433 /**
31434 diff -urNp linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c
31435 --- linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31436 +++ linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31437 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31438 int rval;
31439 int i;
31440
31441 + pax_track_stack();
31442 +
31443 // Allocate memory for the base list of scb for management module.
31444 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31445
31446 diff -urNp linux-3.0.4/drivers/scsi/osd/osd_initiator.c linux-3.0.4/drivers/scsi/osd/osd_initiator.c
31447 --- linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31448 +++ linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31449 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31450 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31451 int ret;
31452
31453 + pax_track_stack();
31454 +
31455 or = osd_start_request(od, GFP_KERNEL);
31456 if (!or)
31457 return -ENOMEM;
31458 diff -urNp linux-3.0.4/drivers/scsi/pmcraid.c linux-3.0.4/drivers/scsi/pmcraid.c
31459 --- linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:44:40.000000000 -0400
31460 +++ linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
31461 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31462 res->scsi_dev = scsi_dev;
31463 scsi_dev->hostdata = res;
31464 res->change_detected = 0;
31465 - atomic_set(&res->read_failures, 0);
31466 - atomic_set(&res->write_failures, 0);
31467 + atomic_set_unchecked(&res->read_failures, 0);
31468 + atomic_set_unchecked(&res->write_failures, 0);
31469 rc = 0;
31470 }
31471 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31472 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31473
31474 /* If this was a SCSI read/write command keep count of errors */
31475 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31476 - atomic_inc(&res->read_failures);
31477 + atomic_inc_unchecked(&res->read_failures);
31478 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31479 - atomic_inc(&res->write_failures);
31480 + atomic_inc_unchecked(&res->write_failures);
31481
31482 if (!RES_IS_GSCSI(res->cfg_entry) &&
31483 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31484 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31485 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31486 * hrrq_id assigned here in queuecommand
31487 */
31488 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31489 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31490 pinstance->num_hrrq;
31491 cmd->cmd_done = pmcraid_io_done;
31492
31493 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31494 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31495 * hrrq_id assigned here in queuecommand
31496 */
31497 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31498 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31499 pinstance->num_hrrq;
31500
31501 if (request_size) {
31502 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
31503
31504 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31505 /* add resources only after host is added into system */
31506 - if (!atomic_read(&pinstance->expose_resources))
31507 + if (!atomic_read_unchecked(&pinstance->expose_resources))
31508 return;
31509
31510 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31511 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
31512 init_waitqueue_head(&pinstance->reset_wait_q);
31513
31514 atomic_set(&pinstance->outstanding_cmds, 0);
31515 - atomic_set(&pinstance->last_message_id, 0);
31516 - atomic_set(&pinstance->expose_resources, 0);
31517 + atomic_set_unchecked(&pinstance->last_message_id, 0);
31518 + atomic_set_unchecked(&pinstance->expose_resources, 0);
31519
31520 INIT_LIST_HEAD(&pinstance->free_res_q);
31521 INIT_LIST_HEAD(&pinstance->used_res_q);
31522 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
31523 /* Schedule worker thread to handle CCN and take care of adding and
31524 * removing devices to OS
31525 */
31526 - atomic_set(&pinstance->expose_resources, 1);
31527 + atomic_set_unchecked(&pinstance->expose_resources, 1);
31528 schedule_work(&pinstance->worker_q);
31529 return rc;
31530
31531 diff -urNp linux-3.0.4/drivers/scsi/pmcraid.h linux-3.0.4/drivers/scsi/pmcraid.h
31532 --- linux-3.0.4/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
31533 +++ linux-3.0.4/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
31534 @@ -749,7 +749,7 @@ struct pmcraid_instance {
31535 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31536
31537 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31538 - atomic_t last_message_id;
31539 + atomic_unchecked_t last_message_id;
31540
31541 /* configuration table */
31542 struct pmcraid_config_table *cfg_table;
31543 @@ -778,7 +778,7 @@ struct pmcraid_instance {
31544 atomic_t outstanding_cmds;
31545
31546 /* should add/delete resources to mid-layer now ?*/
31547 - atomic_t expose_resources;
31548 + atomic_unchecked_t expose_resources;
31549
31550
31551
31552 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
31553 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31554 };
31555 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31556 - atomic_t read_failures; /* count of failed READ commands */
31557 - atomic_t write_failures; /* count of failed WRITE commands */
31558 + atomic_unchecked_t read_failures; /* count of failed READ commands */
31559 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31560
31561 /* To indicate add/delete/modify during CCN */
31562 u8 change_detected;
31563 diff -urNp linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h
31564 --- linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
31565 +++ linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
31566 @@ -2244,7 +2244,7 @@ struct isp_operations {
31567 int (*get_flash_version) (struct scsi_qla_host *, void *);
31568 int (*start_scsi) (srb_t *);
31569 int (*abort_isp) (struct scsi_qla_host *);
31570 -};
31571 +} __no_const;
31572
31573 /* MSI-X Support *************************************************************/
31574
31575 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h
31576 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
31577 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
31578 @@ -256,7 +256,7 @@ struct ddb_entry {
31579 atomic_t retry_relogin_timer; /* Min Time between relogins
31580 * (4000 only) */
31581 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31582 - atomic_t relogin_retry_count; /* Num of times relogin has been
31583 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31584 * retried */
31585
31586 uint16_t port;
31587 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c
31588 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
31589 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
31590 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31591 ddb_entry->fw_ddb_index = fw_ddb_index;
31592 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31593 atomic_set(&ddb_entry->relogin_timer, 0);
31594 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31595 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31596 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31597 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31598 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31599 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31600 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31601 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31602 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31603 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31604 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31605 atomic_set(&ddb_entry->relogin_timer, 0);
31606 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31607 iscsi_unblock_session(ddb_entry->sess);
31608 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c
31609 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
31610 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
31611 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
31612 ddb_entry->fw_ddb_device_state ==
31613 DDB_DS_SESSION_FAILED) {
31614 /* Reset retry relogin timer */
31615 - atomic_inc(&ddb_entry->relogin_retry_count);
31616 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31617 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31618 " timed out-retrying"
31619 " relogin (%d)\n",
31620 ha->host_no,
31621 ddb_entry->fw_ddb_index,
31622 - atomic_read(&ddb_entry->
31623 + atomic_read_unchecked(&ddb_entry->
31624 relogin_retry_count))
31625 );
31626 start_dpc++;
31627 diff -urNp linux-3.0.4/drivers/scsi/scsi.c linux-3.0.4/drivers/scsi/scsi.c
31628 --- linux-3.0.4/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
31629 +++ linux-3.0.4/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
31630 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31631 unsigned long timeout;
31632 int rtn = 0;
31633
31634 - atomic_inc(&cmd->device->iorequest_cnt);
31635 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31636
31637 /* check if the device is still usable */
31638 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31639 diff -urNp linux-3.0.4/drivers/scsi/scsi_debug.c linux-3.0.4/drivers/scsi/scsi_debug.c
31640 --- linux-3.0.4/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
31641 +++ linux-3.0.4/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
31642 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31643 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31644 unsigned char *cmd = (unsigned char *)scp->cmnd;
31645
31646 + pax_track_stack();
31647 +
31648 if ((errsts = check_readiness(scp, 1, devip)))
31649 return errsts;
31650 memset(arr, 0, sizeof(arr));
31651 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31652 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31653 unsigned char *cmd = (unsigned char *)scp->cmnd;
31654
31655 + pax_track_stack();
31656 +
31657 if ((errsts = check_readiness(scp, 1, devip)))
31658 return errsts;
31659 memset(arr, 0, sizeof(arr));
31660 diff -urNp linux-3.0.4/drivers/scsi/scsi_lib.c linux-3.0.4/drivers/scsi/scsi_lib.c
31661 --- linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:44:40.000000000 -0400
31662 +++ linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
31663 @@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
31664 shost = sdev->host;
31665 scsi_init_cmd_errh(cmd);
31666 cmd->result = DID_NO_CONNECT << 16;
31667 - atomic_inc(&cmd->device->iorequest_cnt);
31668 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31669
31670 /*
31671 * SCSI request completion path will do scsi_device_unbusy(),
31672 @@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
31673
31674 INIT_LIST_HEAD(&cmd->eh_entry);
31675
31676 - atomic_inc(&cmd->device->iodone_cnt);
31677 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
31678 if (cmd->result)
31679 - atomic_inc(&cmd->device->ioerr_cnt);
31680 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31681
31682 disposition = scsi_decide_disposition(cmd);
31683 if (disposition != SUCCESS &&
31684 diff -urNp linux-3.0.4/drivers/scsi/scsi_sysfs.c linux-3.0.4/drivers/scsi/scsi_sysfs.c
31685 --- linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
31686 +++ linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
31687 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31688 char *buf) \
31689 { \
31690 struct scsi_device *sdev = to_scsi_device(dev); \
31691 - unsigned long long count = atomic_read(&sdev->field); \
31692 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
31693 return snprintf(buf, 20, "0x%llx\n", count); \
31694 } \
31695 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31696 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_fc.c linux-3.0.4/drivers/scsi/scsi_transport_fc.c
31697 --- linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
31698 +++ linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
31699 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31700 * Netlink Infrastructure
31701 */
31702
31703 -static atomic_t fc_event_seq;
31704 +static atomic_unchecked_t fc_event_seq;
31705
31706 /**
31707 * fc_get_event_number - Obtain the next sequential FC event number
31708 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
31709 u32
31710 fc_get_event_number(void)
31711 {
31712 - return atomic_add_return(1, &fc_event_seq);
31713 + return atomic_add_return_unchecked(1, &fc_event_seq);
31714 }
31715 EXPORT_SYMBOL(fc_get_event_number);
31716
31717 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void
31718 {
31719 int error;
31720
31721 - atomic_set(&fc_event_seq, 0);
31722 + atomic_set_unchecked(&fc_event_seq, 0);
31723
31724 error = transport_class_register(&fc_host_class);
31725 if (error)
31726 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
31727 char *cp;
31728
31729 *val = simple_strtoul(buf, &cp, 0);
31730 - if ((*cp && (*cp != '\n')) || (*val < 0))
31731 + if (*cp && (*cp != '\n'))
31732 return -EINVAL;
31733 /*
31734 * Check for overflow; dev_loss_tmo is u32
31735 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c
31736 --- linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
31737 +++ linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
31738 @@ -83,7 +83,7 @@ struct iscsi_internal {
31739 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31740 };
31741
31742 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31743 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31744 static struct workqueue_struct *iscsi_eh_timer_workq;
31745
31746 /*
31747 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31748 int err;
31749
31750 ihost = shost->shost_data;
31751 - session->sid = atomic_add_return(1, &iscsi_session_nr);
31752 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31753
31754 if (id == ISCSI_MAX_TARGET) {
31755 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31756 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31757 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31758 ISCSI_TRANSPORT_VERSION);
31759
31760 - atomic_set(&iscsi_session_nr, 0);
31761 + atomic_set_unchecked(&iscsi_session_nr, 0);
31762
31763 err = class_register(&iscsi_transport_class);
31764 if (err)
31765 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_srp.c linux-3.0.4/drivers/scsi/scsi_transport_srp.c
31766 --- linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
31767 +++ linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
31768 @@ -33,7 +33,7 @@
31769 #include "scsi_transport_srp_internal.h"
31770
31771 struct srp_host_attrs {
31772 - atomic_t next_port_id;
31773 + atomic_unchecked_t next_port_id;
31774 };
31775 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31776
31777 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31778 struct Scsi_Host *shost = dev_to_shost(dev);
31779 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31780
31781 - atomic_set(&srp_host->next_port_id, 0);
31782 + atomic_set_unchecked(&srp_host->next_port_id, 0);
31783 return 0;
31784 }
31785
31786 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31787 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31788 rport->roles = ids->roles;
31789
31790 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31791 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31792 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31793
31794 transport_setup_device(&rport->dev);
31795 diff -urNp linux-3.0.4/drivers/scsi/sg.c linux-3.0.4/drivers/scsi/sg.c
31796 --- linux-3.0.4/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
31797 +++ linux-3.0.4/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
31798 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31799 const struct file_operations * fops;
31800 };
31801
31802 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31803 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31804 {"allow_dio", &adio_fops},
31805 {"debug", &debug_fops},
31806 {"def_reserved_size", &dressz_fops},
31807 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31808 {
31809 int k, mask;
31810 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31811 - struct sg_proc_leaf * leaf;
31812 + const struct sg_proc_leaf * leaf;
31813
31814 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31815 if (!sg_proc_sgp)
31816 diff -urNp linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31817 --- linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
31818 +++ linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
31819 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31820 int do_iounmap = 0;
31821 int do_disable_device = 1;
31822
31823 + pax_track_stack();
31824 +
31825 memset(&sym_dev, 0, sizeof(sym_dev));
31826 memset(&nvram, 0, sizeof(nvram));
31827 sym_dev.pdev = pdev;
31828 diff -urNp linux-3.0.4/drivers/scsi/vmw_pvscsi.c linux-3.0.4/drivers/scsi/vmw_pvscsi.c
31829 --- linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
31830 +++ linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
31831 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31832 dma_addr_t base;
31833 unsigned i;
31834
31835 + pax_track_stack();
31836 +
31837 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31838 cmd.reqRingNumPages = adapter->req_pages;
31839 cmd.cmpRingNumPages = adapter->cmp_pages;
31840 diff -urNp linux-3.0.4/drivers/spi/spi.c linux-3.0.4/drivers/spi/spi.c
31841 --- linux-3.0.4/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
31842 +++ linux-3.0.4/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
31843 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31844 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31845
31846 /* portable code must never pass more than 32 bytes */
31847 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31848 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
31849
31850 static u8 *buf;
31851
31852 diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31853 --- linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:44:40.000000000 -0400
31854 +++ linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
31855 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
31856 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31857
31858
31859 -static struct net_device_ops ar6000_netdev_ops = {
31860 +static net_device_ops_no_const ar6000_netdev_ops = {
31861 .ndo_init = NULL,
31862 .ndo_open = ar6000_open,
31863 .ndo_stop = ar6000_close,
31864 diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31865 --- linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
31866 +++ linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
31867 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31868 typedef struct ar6k_pal_config_s
31869 {
31870 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
31871 -}ar6k_pal_config_t;
31872 +} __no_const ar6k_pal_config_t;
31873
31874 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
31875 #endif /* _AR6K_PAL_H_ */
31876 diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31877 --- linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
31878 +++ linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
31879 @@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31880 free_netdev(ifp->net);
31881 }
31882 /* Allocate etherdev, including space for private structure */
31883 - ifp->net = alloc_etherdev(sizeof(dhd));
31884 + ifp->net = alloc_etherdev(sizeof(*dhd));
31885 if (!ifp->net) {
31886 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31887 ret = -ENOMEM;
31888 }
31889 if (ret == 0) {
31890 strcpy(ifp->net->name, ifp->name);
31891 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31892 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31893 err = dhd_net_attach(&dhd->pub, ifp->idx);
31894 if (err != 0) {
31895 DHD_ERROR(("%s: dhd_net_attach failed, "
31896 @@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31897 strcpy(nv_path, nvram_path);
31898
31899 /* Allocate etherdev, including space for private structure */
31900 - net = alloc_etherdev(sizeof(dhd));
31901 + net = alloc_etherdev(sizeof(*dhd));
31902 if (!net) {
31903 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31904 goto fail;
31905 @@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31906 /*
31907 * Save the dhd_info into the priv
31908 */
31909 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31910 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31911
31912 /* Set network interface name if it was provided as module parameter */
31913 if (iface_name[0]) {
31914 @@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31915 /*
31916 * Save the dhd_info into the priv
31917 */
31918 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31919 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31920
31921 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
31922 g_bus = bus;
31923 diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
31924 --- linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
31925 +++ linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
31926 @@ -593,7 +593,7 @@ struct phy_func_ptr {
31927 initfn_t carrsuppr;
31928 rxsigpwrfn_t rxsigpwr;
31929 detachfn_t detach;
31930 -};
31931 +} __no_const;
31932 typedef struct phy_func_ptr phy_func_ptr_t;
31933
31934 struct phy_info {
31935 diff -urNp linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h
31936 --- linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
31937 +++ linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
31938 @@ -185,7 +185,7 @@ typedef struct {
31939 u16 func, uint bustype, void *regsva, void *param);
31940 /* detach from device */
31941 void (*detach) (void *ch);
31942 -} bcmsdh_driver_t;
31943 +} __no_const bcmsdh_driver_t;
31944
31945 /* platform specific/high level functions */
31946 extern int bcmsdh_register(bcmsdh_driver_t *driver);
31947 diff -urNp linux-3.0.4/drivers/staging/et131x/et1310_tx.c linux-3.0.4/drivers/staging/et131x/et1310_tx.c
31948 --- linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
31949 +++ linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
31950 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
31951 struct net_device_stats *stats = &etdev->net_stats;
31952
31953 if (tcb->flags & fMP_DEST_BROAD)
31954 - atomic_inc(&etdev->Stats.brdcstxmt);
31955 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
31956 else if (tcb->flags & fMP_DEST_MULTI)
31957 - atomic_inc(&etdev->Stats.multixmt);
31958 + atomic_inc_unchecked(&etdev->Stats.multixmt);
31959 else
31960 - atomic_inc(&etdev->Stats.unixmt);
31961 + atomic_inc_unchecked(&etdev->Stats.unixmt);
31962
31963 if (tcb->skb) {
31964 stats->tx_bytes += tcb->skb->len;
31965 diff -urNp linux-3.0.4/drivers/staging/et131x/et131x_adapter.h linux-3.0.4/drivers/staging/et131x/et131x_adapter.h
31966 --- linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
31967 +++ linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
31968 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
31969 * operations
31970 */
31971 u32 unircv; /* # multicast packets received */
31972 - atomic_t unixmt; /* # multicast packets for Tx */
31973 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
31974 u32 multircv; /* # multicast packets received */
31975 - atomic_t multixmt; /* # multicast packets for Tx */
31976 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
31977 u32 brdcstrcv; /* # broadcast packets received */
31978 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
31979 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
31980 u32 norcvbuf; /* # Rx packets discarded */
31981 u32 noxmtbuf; /* # Tx packets discarded */
31982
31983 diff -urNp linux-3.0.4/drivers/staging/hv/channel.c linux-3.0.4/drivers/staging/hv/channel.c
31984 --- linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:44:40.000000000 -0400
31985 +++ linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
31986 @@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
31987 int ret = 0;
31988 int t;
31989
31990 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31991 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31992 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31993 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31994
31995 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31996 if (ret)
31997 diff -urNp linux-3.0.4/drivers/staging/hv/hv.c linux-3.0.4/drivers/staging/hv/hv.c
31998 --- linux-3.0.4/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
31999 +++ linux-3.0.4/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
32000 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
32001 u64 output_address = (output) ? virt_to_phys(output) : 0;
32002 u32 output_address_hi = output_address >> 32;
32003 u32 output_address_lo = output_address & 0xFFFFFFFF;
32004 - volatile void *hypercall_page = hv_context.hypercall_page;
32005 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32006
32007 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
32008 "=a"(hv_status_lo) : "d" (control_hi),
32009 diff -urNp linux-3.0.4/drivers/staging/hv/hv_mouse.c linux-3.0.4/drivers/staging/hv/hv_mouse.c
32010 --- linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
32011 +++ linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
32012 @@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32013 if (hid_dev) {
32014 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32015
32016 - hid_dev->ll_driver->open = mousevsc_hid_open;
32017 - hid_dev->ll_driver->close = mousevsc_hid_close;
32018 + pax_open_kernel();
32019 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32020 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32021 + pax_close_kernel();
32022
32023 hid_dev->bus = BUS_VIRTUAL;
32024 hid_dev->vendor = input_device_ctx->device_info.vendor;
32025 diff -urNp linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h
32026 --- linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
32027 +++ linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
32028 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
32029 struct vmbus_connection {
32030 enum vmbus_connect_state conn_state;
32031
32032 - atomic_t next_gpadl_handle;
32033 + atomic_unchecked_t next_gpadl_handle;
32034
32035 /*
32036 * Represents channel interrupts. Each bit position represents a
32037 diff -urNp linux-3.0.4/drivers/staging/hv/rndis_filter.c linux-3.0.4/drivers/staging/hv/rndis_filter.c
32038 --- linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:44:40.000000000 -0400
32039 +++ linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
32040 @@ -43,7 +43,7 @@ struct rndis_device {
32041
32042 enum rndis_device_state state;
32043 u32 link_stat;
32044 - atomic_t new_req_id;
32045 + atomic_unchecked_t new_req_id;
32046
32047 spinlock_t request_lock;
32048 struct list_head req_list;
32049 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32050 * template
32051 */
32052 set = &rndis_msg->msg.set_req;
32053 - set->req_id = atomic_inc_return(&dev->new_req_id);
32054 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32055
32056 /* Add to the request list */
32057 spin_lock_irqsave(&dev->request_lock, flags);
32058 @@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32059
32060 /* Setup the rndis set */
32061 halt = &request->request_msg.msg.halt_req;
32062 - halt->req_id = atomic_inc_return(&dev->new_req_id);
32063 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32064
32065 /* Ignore return since this msg is optional. */
32066 rndis_filter_send_request(dev, request);
32067 diff -urNp linux-3.0.4/drivers/staging/hv/vmbus_drv.c linux-3.0.4/drivers/staging/hv/vmbus_drv.c
32068 --- linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
32069 +++ linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
32070 @@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32071 {
32072 int ret = 0;
32073
32074 - static atomic_t device_num = ATOMIC_INIT(0);
32075 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32076
32077 /* Set the device name. Otherwise, device_register() will fail. */
32078 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32079 - atomic_inc_return(&device_num));
32080 + atomic_inc_return_unchecked(&device_num));
32081
32082 /* The new device belongs to this bus */
32083 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32084 diff -urNp linux-3.0.4/drivers/staging/iio/ring_generic.h linux-3.0.4/drivers/staging/iio/ring_generic.h
32085 --- linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
32086 +++ linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
32087 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32088
32089 int (*is_enabled)(struct iio_ring_buffer *ring);
32090 int (*enable)(struct iio_ring_buffer *ring);
32091 -};
32092 +} __no_const;
32093
32094 struct iio_ring_setup_ops {
32095 int (*preenable)(struct iio_dev *);
32096 diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet.c linux-3.0.4/drivers/staging/octeon/ethernet.c
32097 --- linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
32098 +++ linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
32099 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32100 * since the RX tasklet also increments it.
32101 */
32102 #ifdef CONFIG_64BIT
32103 - atomic64_add(rx_status.dropped_packets,
32104 - (atomic64_t *)&priv->stats.rx_dropped);
32105 + atomic64_add_unchecked(rx_status.dropped_packets,
32106 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32107 #else
32108 - atomic_add(rx_status.dropped_packets,
32109 - (atomic_t *)&priv->stats.rx_dropped);
32110 + atomic_add_unchecked(rx_status.dropped_packets,
32111 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
32112 #endif
32113 }
32114
32115 diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet-rx.c linux-3.0.4/drivers/staging/octeon/ethernet-rx.c
32116 --- linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
32117 +++ linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
32118 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32119 /* Increment RX stats for virtual ports */
32120 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32121 #ifdef CONFIG_64BIT
32122 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32123 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32124 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32125 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32126 #else
32127 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32128 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32129 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32130 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32131 #endif
32132 }
32133 netif_receive_skb(skb);
32134 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32135 dev->name);
32136 */
32137 #ifdef CONFIG_64BIT
32138 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32139 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32140 #else
32141 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32142 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32143 #endif
32144 dev_kfree_skb_irq(skb);
32145 }
32146 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/inode.c linux-3.0.4/drivers/staging/pohmelfs/inode.c
32147 --- linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
32148 +++ linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
32149 @@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32150 mutex_init(&psb->mcache_lock);
32151 psb->mcache_root = RB_ROOT;
32152 psb->mcache_timeout = msecs_to_jiffies(5000);
32153 - atomic_long_set(&psb->mcache_gen, 0);
32154 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
32155
32156 psb->trans_max_pages = 100;
32157
32158 @@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32159 INIT_LIST_HEAD(&psb->crypto_ready_list);
32160 INIT_LIST_HEAD(&psb->crypto_active_list);
32161
32162 - atomic_set(&psb->trans_gen, 1);
32163 + atomic_set_unchecked(&psb->trans_gen, 1);
32164 atomic_long_set(&psb->total_inodes, 0);
32165
32166 mutex_init(&psb->state_lock);
32167 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/mcache.c linux-3.0.4/drivers/staging/pohmelfs/mcache.c
32168 --- linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
32169 +++ linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
32170 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32171 m->data = data;
32172 m->start = start;
32173 m->size = size;
32174 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
32175 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32176
32177 mutex_lock(&psb->mcache_lock);
32178 err = pohmelfs_mcache_insert(psb, m);
32179 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/netfs.h linux-3.0.4/drivers/staging/pohmelfs/netfs.h
32180 --- linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
32181 +++ linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
32182 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32183 struct pohmelfs_sb {
32184 struct rb_root mcache_root;
32185 struct mutex mcache_lock;
32186 - atomic_long_t mcache_gen;
32187 + atomic_long_unchecked_t mcache_gen;
32188 unsigned long mcache_timeout;
32189
32190 unsigned int idx;
32191
32192 unsigned int trans_retries;
32193
32194 - atomic_t trans_gen;
32195 + atomic_unchecked_t trans_gen;
32196
32197 unsigned int crypto_attached_size;
32198 unsigned int crypto_align_size;
32199 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/trans.c linux-3.0.4/drivers/staging/pohmelfs/trans.c
32200 --- linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32201 +++ linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32202 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32203 int err;
32204 struct netfs_cmd *cmd = t->iovec.iov_base;
32205
32206 - t->gen = atomic_inc_return(&psb->trans_gen);
32207 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32208
32209 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32210 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32211 diff -urNp linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h
32212 --- linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32213 +++ linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32214 @@ -83,7 +83,7 @@ struct _io_ops {
32215 u8 *pmem);
32216 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32217 u8 *pmem);
32218 -};
32219 +} __no_const;
32220
32221 struct io_req {
32222 struct list_head list;
32223 diff -urNp linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c
32224 --- linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32225 +++ linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
32226 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32227 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32228
32229 if (rlen)
32230 - if (copy_to_user(data, &resp, rlen))
32231 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32232 return -EFAULT;
32233
32234 return 0;
32235 diff -urNp linux-3.0.4/drivers/staging/tty/stallion.c linux-3.0.4/drivers/staging/tty/stallion.c
32236 --- linux-3.0.4/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32237 +++ linux-3.0.4/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32238 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32239 struct stlport stl_dummyport;
32240 struct stlport *portp;
32241
32242 + pax_track_stack();
32243 +
32244 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32245 return -EFAULT;
32246 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32247 diff -urNp linux-3.0.4/drivers/staging/usbip/usbip_common.h linux-3.0.4/drivers/staging/usbip/usbip_common.h
32248 --- linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32249 +++ linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32250 @@ -315,7 +315,7 @@ struct usbip_device {
32251 void (*shutdown)(struct usbip_device *);
32252 void (*reset)(struct usbip_device *);
32253 void (*unusable)(struct usbip_device *);
32254 - } eh_ops;
32255 + } __no_const eh_ops;
32256 };
32257
32258 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32259 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci.h linux-3.0.4/drivers/staging/usbip/vhci.h
32260 --- linux-3.0.4/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32261 +++ linux-3.0.4/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32262 @@ -94,7 +94,7 @@ struct vhci_hcd {
32263 unsigned resuming:1;
32264 unsigned long re_timeout;
32265
32266 - atomic_t seqnum;
32267 + atomic_unchecked_t seqnum;
32268
32269 /*
32270 * NOTE:
32271 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_hcd.c linux-3.0.4/drivers/staging/usbip/vhci_hcd.c
32272 --- linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:44:40.000000000 -0400
32273 +++ linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32274 @@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32275 return;
32276 }
32277
32278 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32279 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32280 if (priv->seqnum == 0xffff)
32281 dev_info(&urb->dev->dev, "seqnum max\n");
32282
32283 @@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32284 return -ENOMEM;
32285 }
32286
32287 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32288 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32289 if (unlink->seqnum == 0xffff)
32290 pr_info("seqnum max\n");
32291
32292 @@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32293 vdev->rhport = rhport;
32294 }
32295
32296 - atomic_set(&vhci->seqnum, 0);
32297 + atomic_set_unchecked(&vhci->seqnum, 0);
32298 spin_lock_init(&vhci->lock);
32299
32300 hcd->power_budget = 0; /* no limit */
32301 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_rx.c linux-3.0.4/drivers/staging/usbip/vhci_rx.c
32302 --- linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32303 +++ linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32304 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32305 if (!urb) {
32306 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32307 pr_info("max seqnum %d\n",
32308 - atomic_read(&the_controller->seqnum));
32309 + atomic_read_unchecked(&the_controller->seqnum));
32310 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32311 return;
32312 }
32313 diff -urNp linux-3.0.4/drivers/staging/vt6655/hostap.c linux-3.0.4/drivers/staging/vt6655/hostap.c
32314 --- linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32315 +++ linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32316 @@ -79,14 +79,13 @@ static int msglevel
32317 *
32318 */
32319
32320 +static net_device_ops_no_const apdev_netdev_ops;
32321 +
32322 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32323 {
32324 PSDevice apdev_priv;
32325 struct net_device *dev = pDevice->dev;
32326 int ret;
32327 - const struct net_device_ops apdev_netdev_ops = {
32328 - .ndo_start_xmit = pDevice->tx_80211,
32329 - };
32330
32331 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32332
32333 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32334 *apdev_priv = *pDevice;
32335 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32336
32337 + /* only half broken now */
32338 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32339 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32340
32341 pDevice->apdev->type = ARPHRD_IEEE80211;
32342 diff -urNp linux-3.0.4/drivers/staging/vt6656/hostap.c linux-3.0.4/drivers/staging/vt6656/hostap.c
32343 --- linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32344 +++ linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32345 @@ -80,14 +80,13 @@ static int msglevel
32346 *
32347 */
32348
32349 +static net_device_ops_no_const apdev_netdev_ops;
32350 +
32351 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32352 {
32353 PSDevice apdev_priv;
32354 struct net_device *dev = pDevice->dev;
32355 int ret;
32356 - const struct net_device_ops apdev_netdev_ops = {
32357 - .ndo_start_xmit = pDevice->tx_80211,
32358 - };
32359
32360 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32361
32362 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32363 *apdev_priv = *pDevice;
32364 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32365
32366 + /* only half broken now */
32367 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32368 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32369
32370 pDevice->apdev->type = ARPHRD_IEEE80211;
32371 diff -urNp linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c
32372 --- linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32373 +++ linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32374 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32375
32376 struct usbctlx_completor {
32377 int (*complete) (struct usbctlx_completor *);
32378 -};
32379 +} __no_const;
32380
32381 static int
32382 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32383 diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.c linux-3.0.4/drivers/staging/zcache/tmem.c
32384 --- linux-3.0.4/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32385 +++ linux-3.0.4/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32386 @@ -39,7 +39,7 @@
32387 * A tmem host implementation must use this function to register callbacks
32388 * for memory allocation.
32389 */
32390 -static struct tmem_hostops tmem_hostops;
32391 +static tmem_hostops_no_const tmem_hostops;
32392
32393 static void tmem_objnode_tree_init(void);
32394
32395 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32396 * A tmem host implementation must use this function to register
32397 * callbacks for a page-accessible memory (PAM) implementation
32398 */
32399 -static struct tmem_pamops tmem_pamops;
32400 +static tmem_pamops_no_const tmem_pamops;
32401
32402 void tmem_register_pamops(struct tmem_pamops *m)
32403 {
32404 diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.h linux-3.0.4/drivers/staging/zcache/tmem.h
32405 --- linux-3.0.4/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32406 +++ linux-3.0.4/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32407 @@ -171,6 +171,7 @@ struct tmem_pamops {
32408 int (*get_data)(struct page *, void *, struct tmem_pool *);
32409 void (*free)(void *, struct tmem_pool *);
32410 };
32411 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32412 extern void tmem_register_pamops(struct tmem_pamops *m);
32413
32414 /* memory allocation methods provided by the host implementation */
32415 @@ -180,6 +181,7 @@ struct tmem_hostops {
32416 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32417 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32418 };
32419 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32420 extern void tmem_register_hostops(struct tmem_hostops *m);
32421
32422 /* core tmem accessor functions */
32423 diff -urNp linux-3.0.4/drivers/target/target_core_alua.c linux-3.0.4/drivers/target/target_core_alua.c
32424 --- linux-3.0.4/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32425 +++ linux-3.0.4/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32426 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32427 char path[ALUA_METADATA_PATH_LEN];
32428 int len;
32429
32430 + pax_track_stack();
32431 +
32432 memset(path, 0, ALUA_METADATA_PATH_LEN);
32433
32434 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32435 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32436 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32437 int len;
32438
32439 + pax_track_stack();
32440 +
32441 memset(path, 0, ALUA_METADATA_PATH_LEN);
32442 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32443
32444 diff -urNp linux-3.0.4/drivers/target/target_core_cdb.c linux-3.0.4/drivers/target/target_core_cdb.c
32445 --- linux-3.0.4/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32446 +++ linux-3.0.4/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32447 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32448 int length = 0;
32449 unsigned char buf[SE_MODE_PAGE_BUF];
32450
32451 + pax_track_stack();
32452 +
32453 memset(buf, 0, SE_MODE_PAGE_BUF);
32454
32455 switch (cdb[2] & 0x3f) {
32456 diff -urNp linux-3.0.4/drivers/target/target_core_configfs.c linux-3.0.4/drivers/target/target_core_configfs.c
32457 --- linux-3.0.4/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
32458 +++ linux-3.0.4/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
32459 @@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
32460 ssize_t len = 0;
32461 int reg_count = 0, prf_isid;
32462
32463 + pax_track_stack();
32464 +
32465 if (!(su_dev->se_dev_ptr))
32466 return -ENODEV;
32467
32468 diff -urNp linux-3.0.4/drivers/target/target_core_pr.c linux-3.0.4/drivers/target/target_core_pr.c
32469 --- linux-3.0.4/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
32470 +++ linux-3.0.4/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
32471 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32472 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32473 u16 tpgt;
32474
32475 + pax_track_stack();
32476 +
32477 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32478 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32479 /*
32480 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32481 ssize_t len = 0;
32482 int reg_count = 0;
32483
32484 + pax_track_stack();
32485 +
32486 memset(buf, 0, pr_aptpl_buf_len);
32487 /*
32488 * Called to clear metadata once APTPL has been deactivated.
32489 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32490 char path[512];
32491 int ret;
32492
32493 + pax_track_stack();
32494 +
32495 memset(iov, 0, sizeof(struct iovec));
32496 memset(path, 0, 512);
32497
32498 diff -urNp linux-3.0.4/drivers/target/target_core_tmr.c linux-3.0.4/drivers/target/target_core_tmr.c
32499 --- linux-3.0.4/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
32500 +++ linux-3.0.4/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
32501 @@ -269,7 +269,7 @@ int core_tmr_lun_reset(
32502 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32503 T_TASK(cmd)->t_task_cdbs,
32504 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32505 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32506 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32507 atomic_read(&T_TASK(cmd)->t_transport_active),
32508 atomic_read(&T_TASK(cmd)->t_transport_stop),
32509 atomic_read(&T_TASK(cmd)->t_transport_sent));
32510 @@ -311,7 +311,7 @@ int core_tmr_lun_reset(
32511 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32512 " task: %p, t_fe_count: %d dev: %p\n", task,
32513 fe_count, dev);
32514 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32515 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32516 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32517 flags);
32518 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32519 @@ -321,7 +321,7 @@ int core_tmr_lun_reset(
32520 }
32521 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32522 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32523 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32524 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32525 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32526 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32527
32528 diff -urNp linux-3.0.4/drivers/target/target_core_transport.c linux-3.0.4/drivers/target/target_core_transport.c
32529 --- linux-3.0.4/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
32530 +++ linux-3.0.4/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
32531 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32532
32533 dev->queue_depth = dev_limits->queue_depth;
32534 atomic_set(&dev->depth_left, dev->queue_depth);
32535 - atomic_set(&dev->dev_ordered_id, 0);
32536 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
32537
32538 se_dev_set_default_attribs(dev, dev_limits);
32539
32540 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32541 * Used to determine when ORDERED commands should go from
32542 * Dormant to Active status.
32543 */
32544 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32545 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32546 smp_mb__after_atomic_inc();
32547 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32548 cmd->se_ordered_id, cmd->sam_task_attr,
32549 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32550 " t_transport_active: %d t_transport_stop: %d"
32551 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32552 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32553 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32554 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32555 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32556 atomic_read(&T_TASK(cmd)->t_transport_active),
32557 atomic_read(&T_TASK(cmd)->t_transport_stop),
32558 @@ -2673,9 +2673,9 @@ check_depth:
32559 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32560 atomic_set(&task->task_active, 1);
32561 atomic_set(&task->task_sent, 1);
32562 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32563 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32564
32565 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32566 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32567 T_TASK(cmd)->t_task_cdbs)
32568 atomic_set(&cmd->transport_sent, 1);
32569
32570 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32571 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32572 }
32573 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32574 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
32575 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32576 goto remove;
32577
32578 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32579 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32580 {
32581 int ret = 0;
32582
32583 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32584 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32585 if (!(send_status) ||
32586 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32587 return 1;
32588 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32589 */
32590 if (cmd->data_direction == DMA_TO_DEVICE) {
32591 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32592 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32593 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32594 smp_mb__after_atomic_inc();
32595 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32596 transport_new_cmd_failure(cmd);
32597 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32598 CMD_TFO(cmd)->get_task_tag(cmd),
32599 T_TASK(cmd)->t_task_cdbs,
32600 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32601 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32602 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32603 atomic_read(&T_TASK(cmd)->t_transport_active),
32604 atomic_read(&T_TASK(cmd)->t_transport_stop),
32605 atomic_read(&T_TASK(cmd)->t_transport_sent));
32606 diff -urNp linux-3.0.4/drivers/telephony/ixj.c linux-3.0.4/drivers/telephony/ixj.c
32607 --- linux-3.0.4/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
32608 +++ linux-3.0.4/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
32609 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32610 bool mContinue;
32611 char *pIn, *pOut;
32612
32613 + pax_track_stack();
32614 +
32615 if (!SCI_Prepare(j))
32616 return 0;
32617
32618 diff -urNp linux-3.0.4/drivers/tty/hvc/hvcs.c linux-3.0.4/drivers/tty/hvc/hvcs.c
32619 --- linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
32620 +++ linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
32621 @@ -83,6 +83,7 @@
32622 #include <asm/hvcserver.h>
32623 #include <asm/uaccess.h>
32624 #include <asm/vio.h>
32625 +#include <asm/local.h>
32626
32627 /*
32628 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32629 @@ -270,7 +271,7 @@ struct hvcs_struct {
32630 unsigned int index;
32631
32632 struct tty_struct *tty;
32633 - int open_count;
32634 + local_t open_count;
32635
32636 /*
32637 * Used to tell the driver kernel_thread what operations need to take
32638 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32639
32640 spin_lock_irqsave(&hvcsd->lock, flags);
32641
32642 - if (hvcsd->open_count > 0) {
32643 + if (local_read(&hvcsd->open_count) > 0) {
32644 spin_unlock_irqrestore(&hvcsd->lock, flags);
32645 printk(KERN_INFO "HVCS: vterm state unchanged. "
32646 "The hvcs device node is still in use.\n");
32647 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32648 if ((retval = hvcs_partner_connect(hvcsd)))
32649 goto error_release;
32650
32651 - hvcsd->open_count = 1;
32652 + local_set(&hvcsd->open_count, 1);
32653 hvcsd->tty = tty;
32654 tty->driver_data = hvcsd;
32655
32656 @@ -1179,7 +1180,7 @@ fast_open:
32657
32658 spin_lock_irqsave(&hvcsd->lock, flags);
32659 kref_get(&hvcsd->kref);
32660 - hvcsd->open_count++;
32661 + local_inc(&hvcsd->open_count);
32662 hvcsd->todo_mask |= HVCS_SCHED_READ;
32663 spin_unlock_irqrestore(&hvcsd->lock, flags);
32664
32665 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32666 hvcsd = tty->driver_data;
32667
32668 spin_lock_irqsave(&hvcsd->lock, flags);
32669 - if (--hvcsd->open_count == 0) {
32670 + if (local_dec_and_test(&hvcsd->open_count)) {
32671
32672 vio_disable_interrupts(hvcsd->vdev);
32673
32674 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32675 free_irq(irq, hvcsd);
32676 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32677 return;
32678 - } else if (hvcsd->open_count < 0) {
32679 + } else if (local_read(&hvcsd->open_count) < 0) {
32680 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32681 " is missmanaged.\n",
32682 - hvcsd->vdev->unit_address, hvcsd->open_count);
32683 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32684 }
32685
32686 spin_unlock_irqrestore(&hvcsd->lock, flags);
32687 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32688
32689 spin_lock_irqsave(&hvcsd->lock, flags);
32690 /* Preserve this so that we know how many kref refs to put */
32691 - temp_open_count = hvcsd->open_count;
32692 + temp_open_count = local_read(&hvcsd->open_count);
32693
32694 /*
32695 * Don't kref put inside the spinlock because the destruction
32696 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32697 hvcsd->tty->driver_data = NULL;
32698 hvcsd->tty = NULL;
32699
32700 - hvcsd->open_count = 0;
32701 + local_set(&hvcsd->open_count, 0);
32702
32703 /* This will drop any buffered data on the floor which is OK in a hangup
32704 * scenario. */
32705 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32706 * the middle of a write operation? This is a crummy place to do this
32707 * but we want to keep it all in the spinlock.
32708 */
32709 - if (hvcsd->open_count <= 0) {
32710 + if (local_read(&hvcsd->open_count) <= 0) {
32711 spin_unlock_irqrestore(&hvcsd->lock, flags);
32712 return -ENODEV;
32713 }
32714 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32715 {
32716 struct hvcs_struct *hvcsd = tty->driver_data;
32717
32718 - if (!hvcsd || hvcsd->open_count <= 0)
32719 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32720 return 0;
32721
32722 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32723 diff -urNp linux-3.0.4/drivers/tty/ipwireless/tty.c linux-3.0.4/drivers/tty/ipwireless/tty.c
32724 --- linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
32725 +++ linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
32726 @@ -29,6 +29,7 @@
32727 #include <linux/tty_driver.h>
32728 #include <linux/tty_flip.h>
32729 #include <linux/uaccess.h>
32730 +#include <asm/local.h>
32731
32732 #include "tty.h"
32733 #include "network.h"
32734 @@ -51,7 +52,7 @@ struct ipw_tty {
32735 int tty_type;
32736 struct ipw_network *network;
32737 struct tty_struct *linux_tty;
32738 - int open_count;
32739 + local_t open_count;
32740 unsigned int control_lines;
32741 struct mutex ipw_tty_mutex;
32742 int tx_bytes_queued;
32743 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32744 mutex_unlock(&tty->ipw_tty_mutex);
32745 return -ENODEV;
32746 }
32747 - if (tty->open_count == 0)
32748 + if (local_read(&tty->open_count) == 0)
32749 tty->tx_bytes_queued = 0;
32750
32751 - tty->open_count++;
32752 + local_inc(&tty->open_count);
32753
32754 tty->linux_tty = linux_tty;
32755 linux_tty->driver_data = tty;
32756 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32757
32758 static void do_ipw_close(struct ipw_tty *tty)
32759 {
32760 - tty->open_count--;
32761 -
32762 - if (tty->open_count == 0) {
32763 + if (local_dec_return(&tty->open_count) == 0) {
32764 struct tty_struct *linux_tty = tty->linux_tty;
32765
32766 if (linux_tty != NULL) {
32767 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32768 return;
32769
32770 mutex_lock(&tty->ipw_tty_mutex);
32771 - if (tty->open_count == 0) {
32772 + if (local_read(&tty->open_count) == 0) {
32773 mutex_unlock(&tty->ipw_tty_mutex);
32774 return;
32775 }
32776 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32777 return;
32778 }
32779
32780 - if (!tty->open_count) {
32781 + if (!local_read(&tty->open_count)) {
32782 mutex_unlock(&tty->ipw_tty_mutex);
32783 return;
32784 }
32785 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32786 return -ENODEV;
32787
32788 mutex_lock(&tty->ipw_tty_mutex);
32789 - if (!tty->open_count) {
32790 + if (!local_read(&tty->open_count)) {
32791 mutex_unlock(&tty->ipw_tty_mutex);
32792 return -EINVAL;
32793 }
32794 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32795 if (!tty)
32796 return -ENODEV;
32797
32798 - if (!tty->open_count)
32799 + if (!local_read(&tty->open_count))
32800 return -EINVAL;
32801
32802 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32803 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32804 if (!tty)
32805 return 0;
32806
32807 - if (!tty->open_count)
32808 + if (!local_read(&tty->open_count))
32809 return 0;
32810
32811 return tty->tx_bytes_queued;
32812 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32813 if (!tty)
32814 return -ENODEV;
32815
32816 - if (!tty->open_count)
32817 + if (!local_read(&tty->open_count))
32818 return -EINVAL;
32819
32820 return get_control_lines(tty);
32821 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32822 if (!tty)
32823 return -ENODEV;
32824
32825 - if (!tty->open_count)
32826 + if (!local_read(&tty->open_count))
32827 return -EINVAL;
32828
32829 return set_control_lines(tty, set, clear);
32830 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32831 if (!tty)
32832 return -ENODEV;
32833
32834 - if (!tty->open_count)
32835 + if (!local_read(&tty->open_count))
32836 return -EINVAL;
32837
32838 /* FIXME: Exactly how is the tty object locked here .. */
32839 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32840 against a parallel ioctl etc */
32841 mutex_lock(&ttyj->ipw_tty_mutex);
32842 }
32843 - while (ttyj->open_count)
32844 + while (local_read(&ttyj->open_count))
32845 do_ipw_close(ttyj);
32846 ipwireless_disassociate_network_ttys(network,
32847 ttyj->channel_idx);
32848 diff -urNp linux-3.0.4/drivers/tty/n_gsm.c linux-3.0.4/drivers/tty/n_gsm.c
32849 --- linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:44:40.000000000 -0400
32850 +++ linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
32851 @@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32852 return NULL;
32853 spin_lock_init(&dlci->lock);
32854 dlci->fifo = &dlci->_fifo;
32855 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32856 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32857 kfree(dlci);
32858 return NULL;
32859 }
32860 diff -urNp linux-3.0.4/drivers/tty/n_tty.c linux-3.0.4/drivers/tty/n_tty.c
32861 --- linux-3.0.4/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
32862 +++ linux-3.0.4/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
32863 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32864 {
32865 *ops = tty_ldisc_N_TTY;
32866 ops->owner = NULL;
32867 - ops->refcount = ops->flags = 0;
32868 + atomic_set(&ops->refcount, 0);
32869 + ops->flags = 0;
32870 }
32871 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32872 diff -urNp linux-3.0.4/drivers/tty/pty.c linux-3.0.4/drivers/tty/pty.c
32873 --- linux-3.0.4/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
32874 +++ linux-3.0.4/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
32875 @@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
32876 register_sysctl_table(pty_root_table);
32877
32878 /* Now create the /dev/ptmx special device */
32879 + pax_open_kernel();
32880 tty_default_fops(&ptmx_fops);
32881 - ptmx_fops.open = ptmx_open;
32882 + *(void **)&ptmx_fops.open = ptmx_open;
32883 + pax_close_kernel();
32884
32885 cdev_init(&ptmx_cdev, &ptmx_fops);
32886 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32887 diff -urNp linux-3.0.4/drivers/tty/rocket.c linux-3.0.4/drivers/tty/rocket.c
32888 --- linux-3.0.4/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
32889 +++ linux-3.0.4/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
32890 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32891 struct rocket_ports tmp;
32892 int board;
32893
32894 + pax_track_stack();
32895 +
32896 if (!retports)
32897 return -EFAULT;
32898 memset(&tmp, 0, sizeof (tmp));
32899 diff -urNp linux-3.0.4/drivers/tty/serial/kgdboc.c linux-3.0.4/drivers/tty/serial/kgdboc.c
32900 --- linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
32901 +++ linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
32902 @@ -23,8 +23,9 @@
32903 #define MAX_CONFIG_LEN 40
32904
32905 static struct kgdb_io kgdboc_io_ops;
32906 +static struct kgdb_io kgdboc_io_ops_console;
32907
32908 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32909 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32910 static int configured = -1;
32911
32912 static char config[MAX_CONFIG_LEN];
32913 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32914 kgdboc_unregister_kbd();
32915 if (configured == 1)
32916 kgdb_unregister_io_module(&kgdboc_io_ops);
32917 + else if (configured == 2)
32918 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
32919 }
32920
32921 static int configure_kgdboc(void)
32922 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32923 int err;
32924 char *cptr = config;
32925 struct console *cons;
32926 + int is_console = 0;
32927
32928 err = kgdboc_option_setup(config);
32929 if (err || !strlen(config) || isspace(config[0]))
32930 goto noconfig;
32931
32932 err = -ENODEV;
32933 - kgdboc_io_ops.is_console = 0;
32934 kgdb_tty_driver = NULL;
32935
32936 kgdboc_use_kms = 0;
32937 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32938 int idx;
32939 if (cons->device && cons->device(cons, &idx) == p &&
32940 idx == tty_line) {
32941 - kgdboc_io_ops.is_console = 1;
32942 + is_console = 1;
32943 break;
32944 }
32945 cons = cons->next;
32946 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32947 kgdb_tty_line = tty_line;
32948
32949 do_register:
32950 - err = kgdb_register_io_module(&kgdboc_io_ops);
32951 + if (is_console) {
32952 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
32953 + configured = 2;
32954 + } else {
32955 + err = kgdb_register_io_module(&kgdboc_io_ops);
32956 + configured = 1;
32957 + }
32958 if (err)
32959 goto noconfig;
32960
32961 - configured = 1;
32962 -
32963 return 0;
32964
32965 noconfig:
32966 @@ -212,7 +219,7 @@ noconfig:
32967 static int __init init_kgdboc(void)
32968 {
32969 /* Already configured? */
32970 - if (configured == 1)
32971 + if (configured >= 1)
32972 return 0;
32973
32974 return configure_kgdboc();
32975 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32976 if (config[len - 1] == '\n')
32977 config[len - 1] = '\0';
32978
32979 - if (configured == 1)
32980 + if (configured >= 1)
32981 cleanup_kgdboc();
32982
32983 /* Go and configure with the new params. */
32984 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
32985 .post_exception = kgdboc_post_exp_handler,
32986 };
32987
32988 +static struct kgdb_io kgdboc_io_ops_console = {
32989 + .name = "kgdboc",
32990 + .read_char = kgdboc_get_char,
32991 + .write_char = kgdboc_put_char,
32992 + .pre_exception = kgdboc_pre_exp_handler,
32993 + .post_exception = kgdboc_post_exp_handler,
32994 + .is_console = 1
32995 +};
32996 +
32997 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
32998 /* This is only available if kgdboc is a built in for early debugging */
32999 static int __init kgdboc_early_init(char *opt)
33000 diff -urNp linux-3.0.4/drivers/tty/serial/mrst_max3110.c linux-3.0.4/drivers/tty/serial/mrst_max3110.c
33001 --- linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
33002 +++ linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
33003 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33004 int loop = 1, num, total = 0;
33005 u8 recv_buf[512], *pbuf;
33006
33007 + pax_track_stack();
33008 +
33009 pbuf = recv_buf;
33010 do {
33011 num = max3110_read_multi(max, pbuf);
33012 diff -urNp linux-3.0.4/drivers/tty/tty_io.c linux-3.0.4/drivers/tty/tty_io.c
33013 --- linux-3.0.4/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
33014 +++ linux-3.0.4/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
33015 @@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33016
33017 void tty_default_fops(struct file_operations *fops)
33018 {
33019 - *fops = tty_fops;
33020 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33021 }
33022
33023 /*
33024 diff -urNp linux-3.0.4/drivers/tty/tty_ldisc.c linux-3.0.4/drivers/tty/tty_ldisc.c
33025 --- linux-3.0.4/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33026 +++ linux-3.0.4/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33027 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33028 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33029 struct tty_ldisc_ops *ldo = ld->ops;
33030
33031 - ldo->refcount--;
33032 + atomic_dec(&ldo->refcount);
33033 module_put(ldo->owner);
33034 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33035
33036 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33037 spin_lock_irqsave(&tty_ldisc_lock, flags);
33038 tty_ldiscs[disc] = new_ldisc;
33039 new_ldisc->num = disc;
33040 - new_ldisc->refcount = 0;
33041 + atomic_set(&new_ldisc->refcount, 0);
33042 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33043
33044 return ret;
33045 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33046 return -EINVAL;
33047
33048 spin_lock_irqsave(&tty_ldisc_lock, flags);
33049 - if (tty_ldiscs[disc]->refcount)
33050 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33051 ret = -EBUSY;
33052 else
33053 tty_ldiscs[disc] = NULL;
33054 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33055 if (ldops) {
33056 ret = ERR_PTR(-EAGAIN);
33057 if (try_module_get(ldops->owner)) {
33058 - ldops->refcount++;
33059 + atomic_inc(&ldops->refcount);
33060 ret = ldops;
33061 }
33062 }
33063 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33064 unsigned long flags;
33065
33066 spin_lock_irqsave(&tty_ldisc_lock, flags);
33067 - ldops->refcount--;
33068 + atomic_dec(&ldops->refcount);
33069 module_put(ldops->owner);
33070 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33071 }
33072 diff -urNp linux-3.0.4/drivers/tty/vt/keyboard.c linux-3.0.4/drivers/tty/vt/keyboard.c
33073 --- linux-3.0.4/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
33074 +++ linux-3.0.4/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
33075 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33076 kbd->kbdmode == VC_OFF) &&
33077 value != KVAL(K_SAK))
33078 return; /* SAK is allowed even in raw mode */
33079 +
33080 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33081 + {
33082 + void *func = fn_handler[value];
33083 + if (func == fn_show_state || func == fn_show_ptregs ||
33084 + func == fn_show_mem)
33085 + return;
33086 + }
33087 +#endif
33088 +
33089 fn_handler[value](vc);
33090 }
33091
33092 diff -urNp linux-3.0.4/drivers/tty/vt/vt.c linux-3.0.4/drivers/tty/vt/vt.c
33093 --- linux-3.0.4/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
33094 +++ linux-3.0.4/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
33095 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33096
33097 static void notify_write(struct vc_data *vc, unsigned int unicode)
33098 {
33099 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33100 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33101 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33102 }
33103
33104 diff -urNp linux-3.0.4/drivers/tty/vt/vt_ioctl.c linux-3.0.4/drivers/tty/vt/vt_ioctl.c
33105 --- linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
33106 +++ linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
33107 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33108 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33109 return -EFAULT;
33110
33111 - if (!capable(CAP_SYS_TTY_CONFIG))
33112 - perm = 0;
33113 -
33114 switch (cmd) {
33115 case KDGKBENT:
33116 key_map = key_maps[s];
33117 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33118 val = (i ? K_HOLE : K_NOSUCHMAP);
33119 return put_user(val, &user_kbe->kb_value);
33120 case KDSKBENT:
33121 + if (!capable(CAP_SYS_TTY_CONFIG))
33122 + perm = 0;
33123 +
33124 if (!perm)
33125 return -EPERM;
33126 if (!i && v == K_NOSUCHMAP) {
33127 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33128 int i, j, k;
33129 int ret;
33130
33131 - if (!capable(CAP_SYS_TTY_CONFIG))
33132 - perm = 0;
33133 -
33134 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33135 if (!kbs) {
33136 ret = -ENOMEM;
33137 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33138 kfree(kbs);
33139 return ((p && *p) ? -EOVERFLOW : 0);
33140 case KDSKBSENT:
33141 + if (!capable(CAP_SYS_TTY_CONFIG))
33142 + perm = 0;
33143 +
33144 if (!perm) {
33145 ret = -EPERM;
33146 goto reterr;
33147 diff -urNp linux-3.0.4/drivers/uio/uio.c linux-3.0.4/drivers/uio/uio.c
33148 --- linux-3.0.4/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
33149 +++ linux-3.0.4/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
33150 @@ -25,6 +25,7 @@
33151 #include <linux/kobject.h>
33152 #include <linux/cdev.h>
33153 #include <linux/uio_driver.h>
33154 +#include <asm/local.h>
33155
33156 #define UIO_MAX_DEVICES (1U << MINORBITS)
33157
33158 @@ -32,10 +33,10 @@ struct uio_device {
33159 struct module *owner;
33160 struct device *dev;
33161 int minor;
33162 - atomic_t event;
33163 + atomic_unchecked_t event;
33164 struct fasync_struct *async_queue;
33165 wait_queue_head_t wait;
33166 - int vma_count;
33167 + local_t vma_count;
33168 struct uio_info *info;
33169 struct kobject *map_dir;
33170 struct kobject *portio_dir;
33171 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33172 struct device_attribute *attr, char *buf)
33173 {
33174 struct uio_device *idev = dev_get_drvdata(dev);
33175 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33176 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33177 }
33178
33179 static struct device_attribute uio_class_attributes[] = {
33180 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33181 {
33182 struct uio_device *idev = info->uio_dev;
33183
33184 - atomic_inc(&idev->event);
33185 + atomic_inc_unchecked(&idev->event);
33186 wake_up_interruptible(&idev->wait);
33187 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33188 }
33189 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33190 }
33191
33192 listener->dev = idev;
33193 - listener->event_count = atomic_read(&idev->event);
33194 + listener->event_count = atomic_read_unchecked(&idev->event);
33195 filep->private_data = listener;
33196
33197 if (idev->info->open) {
33198 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33199 return -EIO;
33200
33201 poll_wait(filep, &idev->wait, wait);
33202 - if (listener->event_count != atomic_read(&idev->event))
33203 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33204 return POLLIN | POLLRDNORM;
33205 return 0;
33206 }
33207 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33208 do {
33209 set_current_state(TASK_INTERRUPTIBLE);
33210
33211 - event_count = atomic_read(&idev->event);
33212 + event_count = atomic_read_unchecked(&idev->event);
33213 if (event_count != listener->event_count) {
33214 if (copy_to_user(buf, &event_count, count))
33215 retval = -EFAULT;
33216 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33217 static void uio_vma_open(struct vm_area_struct *vma)
33218 {
33219 struct uio_device *idev = vma->vm_private_data;
33220 - idev->vma_count++;
33221 + local_inc(&idev->vma_count);
33222 }
33223
33224 static void uio_vma_close(struct vm_area_struct *vma)
33225 {
33226 struct uio_device *idev = vma->vm_private_data;
33227 - idev->vma_count--;
33228 + local_dec(&idev->vma_count);
33229 }
33230
33231 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33232 @@ -823,7 +824,7 @@ int __uio_register_device(struct module
33233 idev->owner = owner;
33234 idev->info = info;
33235 init_waitqueue_head(&idev->wait);
33236 - atomic_set(&idev->event, 0);
33237 + atomic_set_unchecked(&idev->event, 0);
33238
33239 ret = uio_get_minor(idev);
33240 if (ret)
33241 diff -urNp linux-3.0.4/drivers/usb/atm/cxacru.c linux-3.0.4/drivers/usb/atm/cxacru.c
33242 --- linux-3.0.4/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33243 +++ linux-3.0.4/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33244 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33245 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33246 if (ret < 2)
33247 return -EINVAL;
33248 - if (index < 0 || index > 0x7f)
33249 + if (index > 0x7f)
33250 return -EINVAL;
33251 pos += tmp;
33252
33253 diff -urNp linux-3.0.4/drivers/usb/atm/usbatm.c linux-3.0.4/drivers/usb/atm/usbatm.c
33254 --- linux-3.0.4/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33255 +++ linux-3.0.4/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33256 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33257 if (printk_ratelimit())
33258 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33259 __func__, vpi, vci);
33260 - atomic_inc(&vcc->stats->rx_err);
33261 + atomic_inc_unchecked(&vcc->stats->rx_err);
33262 return;
33263 }
33264
33265 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33266 if (length > ATM_MAX_AAL5_PDU) {
33267 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33268 __func__, length, vcc);
33269 - atomic_inc(&vcc->stats->rx_err);
33270 + atomic_inc_unchecked(&vcc->stats->rx_err);
33271 goto out;
33272 }
33273
33274 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33275 if (sarb->len < pdu_length) {
33276 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33277 __func__, pdu_length, sarb->len, vcc);
33278 - atomic_inc(&vcc->stats->rx_err);
33279 + atomic_inc_unchecked(&vcc->stats->rx_err);
33280 goto out;
33281 }
33282
33283 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33284 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33285 __func__, vcc);
33286 - atomic_inc(&vcc->stats->rx_err);
33287 + atomic_inc_unchecked(&vcc->stats->rx_err);
33288 goto out;
33289 }
33290
33291 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33292 if (printk_ratelimit())
33293 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33294 __func__, length);
33295 - atomic_inc(&vcc->stats->rx_drop);
33296 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33297 goto out;
33298 }
33299
33300 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33301
33302 vcc->push(vcc, skb);
33303
33304 - atomic_inc(&vcc->stats->rx);
33305 + atomic_inc_unchecked(&vcc->stats->rx);
33306 out:
33307 skb_trim(sarb, 0);
33308 }
33309 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33310 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33311
33312 usbatm_pop(vcc, skb);
33313 - atomic_inc(&vcc->stats->tx);
33314 + atomic_inc_unchecked(&vcc->stats->tx);
33315
33316 skb = skb_dequeue(&instance->sndqueue);
33317 }
33318 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33319 if (!left--)
33320 return sprintf(page,
33321 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33322 - atomic_read(&atm_dev->stats.aal5.tx),
33323 - atomic_read(&atm_dev->stats.aal5.tx_err),
33324 - atomic_read(&atm_dev->stats.aal5.rx),
33325 - atomic_read(&atm_dev->stats.aal5.rx_err),
33326 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33327 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33328 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33329 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33330 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33331 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33332
33333 if (!left--) {
33334 if (instance->disconnected)
33335 diff -urNp linux-3.0.4/drivers/usb/core/devices.c linux-3.0.4/drivers/usb/core/devices.c
33336 --- linux-3.0.4/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33337 +++ linux-3.0.4/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33338 @@ -126,7 +126,7 @@ static const char format_endpt[] =
33339 * time it gets called.
33340 */
33341 static struct device_connect_event {
33342 - atomic_t count;
33343 + atomic_unchecked_t count;
33344 wait_queue_head_t wait;
33345 } device_event = {
33346 .count = ATOMIC_INIT(1),
33347 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33348
33349 void usbfs_conn_disc_event(void)
33350 {
33351 - atomic_add(2, &device_event.count);
33352 + atomic_add_unchecked(2, &device_event.count);
33353 wake_up(&device_event.wait);
33354 }
33355
33356 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33357
33358 poll_wait(file, &device_event.wait, wait);
33359
33360 - event_count = atomic_read(&device_event.count);
33361 + event_count = atomic_read_unchecked(&device_event.count);
33362 if (file->f_version != event_count) {
33363 file->f_version = event_count;
33364 return POLLIN | POLLRDNORM;
33365 diff -urNp linux-3.0.4/drivers/usb/core/message.c linux-3.0.4/drivers/usb/core/message.c
33366 --- linux-3.0.4/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33367 +++ linux-3.0.4/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33368 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33369 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33370 if (buf) {
33371 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33372 - if (len > 0) {
33373 - smallbuf = kmalloc(++len, GFP_NOIO);
33374 + if (len++ > 0) {
33375 + smallbuf = kmalloc(len, GFP_NOIO);
33376 if (!smallbuf)
33377 return buf;
33378 memcpy(smallbuf, buf, len);
33379 diff -urNp linux-3.0.4/drivers/usb/early/ehci-dbgp.c linux-3.0.4/drivers/usb/early/ehci-dbgp.c
33380 --- linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33381 +++ linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33382 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33383
33384 #ifdef CONFIG_KGDB
33385 static struct kgdb_io kgdbdbgp_io_ops;
33386 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33387 +static struct kgdb_io kgdbdbgp_io_ops_console;
33388 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33389 #else
33390 #define dbgp_kgdb_mode (0)
33391 #endif
33392 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33393 .write_char = kgdbdbgp_write_char,
33394 };
33395
33396 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33397 + .name = "kgdbdbgp",
33398 + .read_char = kgdbdbgp_read_char,
33399 + .write_char = kgdbdbgp_write_char,
33400 + .is_console = 1
33401 +};
33402 +
33403 static int kgdbdbgp_wait_time;
33404
33405 static int __init kgdbdbgp_parse_config(char *str)
33406 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33407 ptr++;
33408 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33409 }
33410 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33411 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33412 + if (early_dbgp_console.index != -1)
33413 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33414 + else
33415 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33416
33417 return 0;
33418 }
33419 diff -urNp linux-3.0.4/drivers/usb/host/xhci-mem.c linux-3.0.4/drivers/usb/host/xhci-mem.c
33420 --- linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33421 +++ linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33422 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33423 unsigned int num_tests;
33424 int i, ret;
33425
33426 + pax_track_stack();
33427 +
33428 num_tests = ARRAY_SIZE(simple_test_vector);
33429 for (i = 0; i < num_tests; i++) {
33430 ret = xhci_test_trb_in_td(xhci,
33431 diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-hc.h linux-3.0.4/drivers/usb/wusbcore/wa-hc.h
33432 --- linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33433 +++ linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33434 @@ -192,7 +192,7 @@ struct wahc {
33435 struct list_head xfer_delayed_list;
33436 spinlock_t xfer_list_lock;
33437 struct work_struct xfer_work;
33438 - atomic_t xfer_id_count;
33439 + atomic_unchecked_t xfer_id_count;
33440 };
33441
33442
33443 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33444 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33445 spin_lock_init(&wa->xfer_list_lock);
33446 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33447 - atomic_set(&wa->xfer_id_count, 1);
33448 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33449 }
33450
33451 /**
33452 diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c
33453 --- linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
33454 +++ linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
33455 @@ -294,7 +294,7 @@ out:
33456 */
33457 static void wa_xfer_id_init(struct wa_xfer *xfer)
33458 {
33459 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33460 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33461 }
33462
33463 /*
33464 diff -urNp linux-3.0.4/drivers/vhost/vhost.c linux-3.0.4/drivers/vhost/vhost.c
33465 --- linux-3.0.4/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
33466 +++ linux-3.0.4/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
33467 @@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
33468 return get_user(vq->last_used_idx, &used->idx);
33469 }
33470
33471 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33472 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33473 {
33474 struct file *eventfp, *filep = NULL,
33475 *pollstart = NULL, *pollstop = NULL;
33476 diff -urNp linux-3.0.4/drivers/video/fbcmap.c linux-3.0.4/drivers/video/fbcmap.c
33477 --- linux-3.0.4/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
33478 +++ linux-3.0.4/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
33479 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33480 rc = -ENODEV;
33481 goto out;
33482 }
33483 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33484 - !info->fbops->fb_setcmap)) {
33485 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33486 rc = -EINVAL;
33487 goto out1;
33488 }
33489 diff -urNp linux-3.0.4/drivers/video/fbmem.c linux-3.0.4/drivers/video/fbmem.c
33490 --- linux-3.0.4/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
33491 +++ linux-3.0.4/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
33492 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33493 image->dx += image->width + 8;
33494 }
33495 } else if (rotate == FB_ROTATE_UD) {
33496 - for (x = 0; x < num && image->dx >= 0; x++) {
33497 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33498 info->fbops->fb_imageblit(info, image);
33499 image->dx -= image->width + 8;
33500 }
33501 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33502 image->dy += image->height + 8;
33503 }
33504 } else if (rotate == FB_ROTATE_CCW) {
33505 - for (x = 0; x < num && image->dy >= 0; x++) {
33506 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33507 info->fbops->fb_imageblit(info, image);
33508 image->dy -= image->height + 8;
33509 }
33510 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33511 int flags = info->flags;
33512 int ret = 0;
33513
33514 + pax_track_stack();
33515 +
33516 if (var->activate & FB_ACTIVATE_INV_MODE) {
33517 struct fb_videomode mode1, mode2;
33518
33519 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33520 void __user *argp = (void __user *)arg;
33521 long ret = 0;
33522
33523 + pax_track_stack();
33524 +
33525 switch (cmd) {
33526 case FBIOGET_VSCREENINFO:
33527 if (!lock_fb_info(info))
33528 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33529 return -EFAULT;
33530 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33531 return -EINVAL;
33532 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33533 + if (con2fb.framebuffer >= FB_MAX)
33534 return -EINVAL;
33535 if (!registered_fb[con2fb.framebuffer])
33536 request_module("fb%d", con2fb.framebuffer);
33537 diff -urNp linux-3.0.4/drivers/video/i810/i810_accel.c linux-3.0.4/drivers/video/i810/i810_accel.c
33538 --- linux-3.0.4/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
33539 +++ linux-3.0.4/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
33540 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33541 }
33542 }
33543 printk("ringbuffer lockup!!!\n");
33544 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33545 i810_report_error(mmio);
33546 par->dev_flags |= LOCKUP;
33547 info->pixmap.scan_align = 1;
33548 diff -urNp linux-3.0.4/drivers/video/udlfb.c linux-3.0.4/drivers/video/udlfb.c
33549 --- linux-3.0.4/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
33550 +++ linux-3.0.4/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
33551 @@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
33552 dlfb_urb_completion(urb);
33553
33554 error:
33555 - atomic_add(bytes_sent, &dev->bytes_sent);
33556 - atomic_add(bytes_identical, &dev->bytes_identical);
33557 - atomic_add(width*height*2, &dev->bytes_rendered);
33558 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33559 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33560 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33561 end_cycles = get_cycles();
33562 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33563 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33564 >> 10)), /* Kcycles */
33565 &dev->cpu_kcycles_used);
33566
33567 @@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
33568 dlfb_urb_completion(urb);
33569
33570 error:
33571 - atomic_add(bytes_sent, &dev->bytes_sent);
33572 - atomic_add(bytes_identical, &dev->bytes_identical);
33573 - atomic_add(bytes_rendered, &dev->bytes_rendered);
33574 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33575 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33576 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33577 end_cycles = get_cycles();
33578 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33579 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33580 >> 10)), /* Kcycles */
33581 &dev->cpu_kcycles_used);
33582 }
33583 @@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
33584 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33585 struct dlfb_data *dev = fb_info->par;
33586 return snprintf(buf, PAGE_SIZE, "%u\n",
33587 - atomic_read(&dev->bytes_rendered));
33588 + atomic_read_unchecked(&dev->bytes_rendered));
33589 }
33590
33591 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33592 @@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
33593 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33594 struct dlfb_data *dev = fb_info->par;
33595 return snprintf(buf, PAGE_SIZE, "%u\n",
33596 - atomic_read(&dev->bytes_identical));
33597 + atomic_read_unchecked(&dev->bytes_identical));
33598 }
33599
33600 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33601 @@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
33602 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33603 struct dlfb_data *dev = fb_info->par;
33604 return snprintf(buf, PAGE_SIZE, "%u\n",
33605 - atomic_read(&dev->bytes_sent));
33606 + atomic_read_unchecked(&dev->bytes_sent));
33607 }
33608
33609 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33610 @@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
33611 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33612 struct dlfb_data *dev = fb_info->par;
33613 return snprintf(buf, PAGE_SIZE, "%u\n",
33614 - atomic_read(&dev->cpu_kcycles_used));
33615 + atomic_read_unchecked(&dev->cpu_kcycles_used));
33616 }
33617
33618 static ssize_t edid_show(
33619 @@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
33620 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33621 struct dlfb_data *dev = fb_info->par;
33622
33623 - atomic_set(&dev->bytes_rendered, 0);
33624 - atomic_set(&dev->bytes_identical, 0);
33625 - atomic_set(&dev->bytes_sent, 0);
33626 - atomic_set(&dev->cpu_kcycles_used, 0);
33627 + atomic_set_unchecked(&dev->bytes_rendered, 0);
33628 + atomic_set_unchecked(&dev->bytes_identical, 0);
33629 + atomic_set_unchecked(&dev->bytes_sent, 0);
33630 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33631
33632 return count;
33633 }
33634 diff -urNp linux-3.0.4/drivers/video/uvesafb.c linux-3.0.4/drivers/video/uvesafb.c
33635 --- linux-3.0.4/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
33636 +++ linux-3.0.4/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
33637 @@ -19,6 +19,7 @@
33638 #include <linux/io.h>
33639 #include <linux/mutex.h>
33640 #include <linux/slab.h>
33641 +#include <linux/moduleloader.h>
33642 #include <video/edid.h>
33643 #include <video/uvesafb.h>
33644 #ifdef CONFIG_X86
33645 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33646 NULL,
33647 };
33648
33649 - return call_usermodehelper(v86d_path, argv, envp, 1);
33650 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33651 }
33652
33653 /*
33654 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33655 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33656 par->pmi_setpal = par->ypan = 0;
33657 } else {
33658 +
33659 +#ifdef CONFIG_PAX_KERNEXEC
33660 +#ifdef CONFIG_MODULES
33661 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33662 +#endif
33663 + if (!par->pmi_code) {
33664 + par->pmi_setpal = par->ypan = 0;
33665 + return 0;
33666 + }
33667 +#endif
33668 +
33669 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33670 + task->t.regs.edi);
33671 +
33672 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33673 + pax_open_kernel();
33674 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33675 + pax_close_kernel();
33676 +
33677 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33678 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33679 +#else
33680 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33681 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33682 +#endif
33683 +
33684 printk(KERN_INFO "uvesafb: protected mode interface info at "
33685 "%04x:%04x\n",
33686 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33687 @@ -1821,6 +1844,11 @@ out:
33688 if (par->vbe_modes)
33689 kfree(par->vbe_modes);
33690
33691 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33692 + if (par->pmi_code)
33693 + module_free_exec(NULL, par->pmi_code);
33694 +#endif
33695 +
33696 framebuffer_release(info);
33697 return err;
33698 }
33699 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33700 kfree(par->vbe_state_orig);
33701 if (par->vbe_state_saved)
33702 kfree(par->vbe_state_saved);
33703 +
33704 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33705 + if (par->pmi_code)
33706 + module_free_exec(NULL, par->pmi_code);
33707 +#endif
33708 +
33709 }
33710
33711 framebuffer_release(info);
33712 diff -urNp linux-3.0.4/drivers/video/vesafb.c linux-3.0.4/drivers/video/vesafb.c
33713 --- linux-3.0.4/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
33714 +++ linux-3.0.4/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
33715 @@ -9,6 +9,7 @@
33716 */
33717
33718 #include <linux/module.h>
33719 +#include <linux/moduleloader.h>
33720 #include <linux/kernel.h>
33721 #include <linux/errno.h>
33722 #include <linux/string.h>
33723 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33724 static int vram_total __initdata; /* Set total amount of memory */
33725 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33726 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33727 -static void (*pmi_start)(void) __read_mostly;
33728 -static void (*pmi_pal) (void) __read_mostly;
33729 +static void (*pmi_start)(void) __read_only;
33730 +static void (*pmi_pal) (void) __read_only;
33731 static int depth __read_mostly;
33732 static int vga_compat __read_mostly;
33733 /* --------------------------------------------------------------------- */
33734 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
33735 unsigned int size_vmode;
33736 unsigned int size_remap;
33737 unsigned int size_total;
33738 + void *pmi_code = NULL;
33739
33740 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33741 return -ENODEV;
33742 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
33743 size_remap = size_total;
33744 vesafb_fix.smem_len = size_remap;
33745
33746 -#ifndef __i386__
33747 - screen_info.vesapm_seg = 0;
33748 -#endif
33749 -
33750 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33751 printk(KERN_WARNING
33752 "vesafb: cannot reserve video memory at 0x%lx\n",
33753 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
33754 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33755 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33756
33757 +#ifdef __i386__
33758 +
33759 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33760 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
33761 + if (!pmi_code)
33762 +#elif !defined(CONFIG_PAX_KERNEXEC)
33763 + if (0)
33764 +#endif
33765 +
33766 +#endif
33767 + screen_info.vesapm_seg = 0;
33768 +
33769 if (screen_info.vesapm_seg) {
33770 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33771 - screen_info.vesapm_seg,screen_info.vesapm_off);
33772 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33773 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33774 }
33775
33776 if (screen_info.vesapm_seg < 0xc000)
33777 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
33778
33779 if (ypan || pmi_setpal) {
33780 unsigned short *pmi_base;
33781 +
33782 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33783 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33784 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33785 +
33786 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33787 + pax_open_kernel();
33788 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33789 +#else
33790 + pmi_code = pmi_base;
33791 +#endif
33792 +
33793 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33794 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33795 +
33796 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33797 + pmi_start = ktva_ktla(pmi_start);
33798 + pmi_pal = ktva_ktla(pmi_pal);
33799 + pax_close_kernel();
33800 +#endif
33801 +
33802 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33803 if (pmi_base[3]) {
33804 printk(KERN_INFO "vesafb: pmi: ports = ");
33805 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
33806 info->node, info->fix.id);
33807 return 0;
33808 err:
33809 +
33810 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33811 + module_free_exec(NULL, pmi_code);
33812 +#endif
33813 +
33814 if (info->screen_base)
33815 iounmap(info->screen_base);
33816 framebuffer_release(info);
33817 diff -urNp linux-3.0.4/drivers/video/via/via_clock.h linux-3.0.4/drivers/video/via/via_clock.h
33818 --- linux-3.0.4/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
33819 +++ linux-3.0.4/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
33820 @@ -56,7 +56,7 @@ struct via_clock {
33821
33822 void (*set_engine_pll_state)(u8 state);
33823 void (*set_engine_pll)(struct via_pll_config config);
33824 -};
33825 +} __no_const;
33826
33827
33828 static inline u32 get_pll_internal_frequency(u32 ref_freq,
33829 diff -urNp linux-3.0.4/drivers/virtio/virtio_balloon.c linux-3.0.4/drivers/virtio/virtio_balloon.c
33830 --- linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
33831 +++ linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
33832 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct
33833 struct sysinfo i;
33834 int idx = 0;
33835
33836 + pax_track_stack();
33837 +
33838 all_vm_events(events);
33839 si_meminfo(&i);
33840
33841 diff -urNp linux-3.0.4/fs/9p/vfs_inode.c linux-3.0.4/fs/9p/vfs_inode.c
33842 --- linux-3.0.4/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
33843 +++ linux-3.0.4/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
33844 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33845 void
33846 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33847 {
33848 - char *s = nd_get_link(nd);
33849 + const char *s = nd_get_link(nd);
33850
33851 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33852 IS_ERR(s) ? "<error>" : s);
33853 diff -urNp linux-3.0.4/fs/aio.c linux-3.0.4/fs/aio.c
33854 --- linux-3.0.4/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
33855 +++ linux-3.0.4/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
33856 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33857 size += sizeof(struct io_event) * nr_events;
33858 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33859
33860 - if (nr_pages < 0)
33861 + if (nr_pages <= 0)
33862 return -EINVAL;
33863
33864 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33865 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33866 struct aio_timeout to;
33867 int retry = 0;
33868
33869 + pax_track_stack();
33870 +
33871 /* needed to zero any padding within an entry (there shouldn't be
33872 * any, but C is fun!
33873 */
33874 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33875 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33876 {
33877 ssize_t ret;
33878 + struct iovec iovstack;
33879
33880 #ifdef CONFIG_COMPAT
33881 if (compat)
33882 ret = compat_rw_copy_check_uvector(type,
33883 (struct compat_iovec __user *)kiocb->ki_buf,
33884 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33885 + kiocb->ki_nbytes, 1, &iovstack,
33886 &kiocb->ki_iovec);
33887 else
33888 #endif
33889 ret = rw_copy_check_uvector(type,
33890 (struct iovec __user *)kiocb->ki_buf,
33891 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33892 + kiocb->ki_nbytes, 1, &iovstack,
33893 &kiocb->ki_iovec);
33894 if (ret < 0)
33895 goto out;
33896
33897 + if (kiocb->ki_iovec == &iovstack) {
33898 + kiocb->ki_inline_vec = iovstack;
33899 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
33900 + }
33901 kiocb->ki_nr_segs = kiocb->ki_nbytes;
33902 kiocb->ki_cur_seg = 0;
33903 /* ki_nbytes/left now reflect bytes instead of segs */
33904 diff -urNp linux-3.0.4/fs/attr.c linux-3.0.4/fs/attr.c
33905 --- linux-3.0.4/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
33906 +++ linux-3.0.4/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
33907 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
33908 unsigned long limit;
33909
33910 limit = rlimit(RLIMIT_FSIZE);
33911 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
33912 if (limit != RLIM_INFINITY && offset > limit)
33913 goto out_sig;
33914 if (offset > inode->i_sb->s_maxbytes)
33915 diff -urNp linux-3.0.4/fs/befs/linuxvfs.c linux-3.0.4/fs/befs/linuxvfs.c
33916 --- linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:13.000000000 -0400
33917 +++ linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
33918 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
33919 {
33920 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
33921 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
33922 - char *link = nd_get_link(nd);
33923 + const char *link = nd_get_link(nd);
33924 if (!IS_ERR(link))
33925 kfree(link);
33926 }
33927 diff -urNp linux-3.0.4/fs/binfmt_aout.c linux-3.0.4/fs/binfmt_aout.c
33928 --- linux-3.0.4/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
33929 +++ linux-3.0.4/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
33930 @@ -16,6 +16,7 @@
33931 #include <linux/string.h>
33932 #include <linux/fs.h>
33933 #include <linux/file.h>
33934 +#include <linux/security.h>
33935 #include <linux/stat.h>
33936 #include <linux/fcntl.h>
33937 #include <linux/ptrace.h>
33938 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
33939 #endif
33940 # define START_STACK(u) ((void __user *)u.start_stack)
33941
33942 + memset(&dump, 0, sizeof(dump));
33943 +
33944 fs = get_fs();
33945 set_fs(KERNEL_DS);
33946 has_dumped = 1;
33947 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
33948
33949 /* If the size of the dump file exceeds the rlimit, then see what would happen
33950 if we wrote the stack, but not the data area. */
33951 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
33952 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
33953 dump.u_dsize = 0;
33954
33955 /* Make sure we have enough room to write the stack and data areas. */
33956 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
33957 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
33958 dump.u_ssize = 0;
33959
33960 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
33961 rlim = rlimit(RLIMIT_DATA);
33962 if (rlim >= RLIM_INFINITY)
33963 rlim = ~0;
33964 +
33965 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
33966 if (ex.a_data + ex.a_bss > rlim)
33967 return -ENOMEM;
33968
33969 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
33970 install_exec_creds(bprm);
33971 current->flags &= ~PF_FORKNOEXEC;
33972
33973 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
33974 + current->mm->pax_flags = 0UL;
33975 +#endif
33976 +
33977 +#ifdef CONFIG_PAX_PAGEEXEC
33978 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
33979 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
33980 +
33981 +#ifdef CONFIG_PAX_EMUTRAMP
33982 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
33983 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
33984 +#endif
33985 +
33986 +#ifdef CONFIG_PAX_MPROTECT
33987 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
33988 + current->mm->pax_flags |= MF_PAX_MPROTECT;
33989 +#endif
33990 +
33991 + }
33992 +#endif
33993 +
33994 if (N_MAGIC(ex) == OMAGIC) {
33995 unsigned long text_addr, map_size;
33996 loff_t pos;
33997 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
33998
33999 down_write(&current->mm->mmap_sem);
34000 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
34001 - PROT_READ | PROT_WRITE | PROT_EXEC,
34002 + PROT_READ | PROT_WRITE,
34003 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
34004 fd_offset + ex.a_text);
34005 up_write(&current->mm->mmap_sem);
34006 diff -urNp linux-3.0.4/fs/binfmt_elf.c linux-3.0.4/fs/binfmt_elf.c
34007 --- linux-3.0.4/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
34008 +++ linux-3.0.4/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
34009 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
34010 #define elf_core_dump NULL
34011 #endif
34012
34013 +#ifdef CONFIG_PAX_MPROTECT
34014 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34015 +#endif
34016 +
34017 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34018 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34019 #else
34020 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34021 .load_binary = load_elf_binary,
34022 .load_shlib = load_elf_library,
34023 .core_dump = elf_core_dump,
34024 +
34025 +#ifdef CONFIG_PAX_MPROTECT
34026 + .handle_mprotect= elf_handle_mprotect,
34027 +#endif
34028 +
34029 .min_coredump = ELF_EXEC_PAGESIZE,
34030 };
34031
34032 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34033
34034 static int set_brk(unsigned long start, unsigned long end)
34035 {
34036 + unsigned long e = end;
34037 +
34038 start = ELF_PAGEALIGN(start);
34039 end = ELF_PAGEALIGN(end);
34040 if (end > start) {
34041 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34042 if (BAD_ADDR(addr))
34043 return addr;
34044 }
34045 - current->mm->start_brk = current->mm->brk = end;
34046 + current->mm->start_brk = current->mm->brk = e;
34047 return 0;
34048 }
34049
34050 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34051 elf_addr_t __user *u_rand_bytes;
34052 const char *k_platform = ELF_PLATFORM;
34053 const char *k_base_platform = ELF_BASE_PLATFORM;
34054 - unsigned char k_rand_bytes[16];
34055 + u32 k_rand_bytes[4];
34056 int items;
34057 elf_addr_t *elf_info;
34058 int ei_index = 0;
34059 const struct cred *cred = current_cred();
34060 struct vm_area_struct *vma;
34061 + unsigned long saved_auxv[AT_VECTOR_SIZE];
34062 +
34063 + pax_track_stack();
34064
34065 /*
34066 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34067 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34068 * Generate 16 random bytes for userspace PRNG seeding.
34069 */
34070 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34071 - u_rand_bytes = (elf_addr_t __user *)
34072 - STACK_ALLOC(p, sizeof(k_rand_bytes));
34073 + srandom32(k_rand_bytes[0] ^ random32());
34074 + srandom32(k_rand_bytes[1] ^ random32());
34075 + srandom32(k_rand_bytes[2] ^ random32());
34076 + srandom32(k_rand_bytes[3] ^ random32());
34077 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
34078 + u_rand_bytes = (elf_addr_t __user *) p;
34079 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34080 return -EFAULT;
34081
34082 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34083 return -EFAULT;
34084 current->mm->env_end = p;
34085
34086 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34087 +
34088 /* Put the elf_info on the stack in the right place. */
34089 sp = (elf_addr_t __user *)envp + 1;
34090 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34091 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34092 return -EFAULT;
34093 return 0;
34094 }
34095 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34096 {
34097 struct elf_phdr *elf_phdata;
34098 struct elf_phdr *eppnt;
34099 - unsigned long load_addr = 0;
34100 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34101 int load_addr_set = 0;
34102 unsigned long last_bss = 0, elf_bss = 0;
34103 - unsigned long error = ~0UL;
34104 + unsigned long error = -EINVAL;
34105 unsigned long total_size;
34106 int retval, i, size;
34107
34108 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34109 goto out_close;
34110 }
34111
34112 +#ifdef CONFIG_PAX_SEGMEXEC
34113 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34114 + pax_task_size = SEGMEXEC_TASK_SIZE;
34115 +#endif
34116 +
34117 eppnt = elf_phdata;
34118 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34119 if (eppnt->p_type == PT_LOAD) {
34120 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34121 k = load_addr + eppnt->p_vaddr;
34122 if (BAD_ADDR(k) ||
34123 eppnt->p_filesz > eppnt->p_memsz ||
34124 - eppnt->p_memsz > TASK_SIZE ||
34125 - TASK_SIZE - eppnt->p_memsz < k) {
34126 + eppnt->p_memsz > pax_task_size ||
34127 + pax_task_size - eppnt->p_memsz < k) {
34128 error = -ENOMEM;
34129 goto out_close;
34130 }
34131 @@ -528,6 +553,193 @@ out:
34132 return error;
34133 }
34134
34135 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34136 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34137 +{
34138 + unsigned long pax_flags = 0UL;
34139 +
34140 +#ifdef CONFIG_PAX_PAGEEXEC
34141 + if (elf_phdata->p_flags & PF_PAGEEXEC)
34142 + pax_flags |= MF_PAX_PAGEEXEC;
34143 +#endif
34144 +
34145 +#ifdef CONFIG_PAX_SEGMEXEC
34146 + if (elf_phdata->p_flags & PF_SEGMEXEC)
34147 + pax_flags |= MF_PAX_SEGMEXEC;
34148 +#endif
34149 +
34150 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34151 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34152 + if ((__supported_pte_mask & _PAGE_NX))
34153 + pax_flags &= ~MF_PAX_SEGMEXEC;
34154 + else
34155 + pax_flags &= ~MF_PAX_PAGEEXEC;
34156 + }
34157 +#endif
34158 +
34159 +#ifdef CONFIG_PAX_EMUTRAMP
34160 + if (elf_phdata->p_flags & PF_EMUTRAMP)
34161 + pax_flags |= MF_PAX_EMUTRAMP;
34162 +#endif
34163 +
34164 +#ifdef CONFIG_PAX_MPROTECT
34165 + if (elf_phdata->p_flags & PF_MPROTECT)
34166 + pax_flags |= MF_PAX_MPROTECT;
34167 +#endif
34168 +
34169 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34170 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34171 + pax_flags |= MF_PAX_RANDMMAP;
34172 +#endif
34173 +
34174 + return pax_flags;
34175 +}
34176 +#endif
34177 +
34178 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34179 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34180 +{
34181 + unsigned long pax_flags = 0UL;
34182 +
34183 +#ifdef CONFIG_PAX_PAGEEXEC
34184 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34185 + pax_flags |= MF_PAX_PAGEEXEC;
34186 +#endif
34187 +
34188 +#ifdef CONFIG_PAX_SEGMEXEC
34189 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34190 + pax_flags |= MF_PAX_SEGMEXEC;
34191 +#endif
34192 +
34193 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34194 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34195 + if ((__supported_pte_mask & _PAGE_NX))
34196 + pax_flags &= ~MF_PAX_SEGMEXEC;
34197 + else
34198 + pax_flags &= ~MF_PAX_PAGEEXEC;
34199 + }
34200 +#endif
34201 +
34202 +#ifdef CONFIG_PAX_EMUTRAMP
34203 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34204 + pax_flags |= MF_PAX_EMUTRAMP;
34205 +#endif
34206 +
34207 +#ifdef CONFIG_PAX_MPROTECT
34208 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34209 + pax_flags |= MF_PAX_MPROTECT;
34210 +#endif
34211 +
34212 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34213 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34214 + pax_flags |= MF_PAX_RANDMMAP;
34215 +#endif
34216 +
34217 + return pax_flags;
34218 +}
34219 +#endif
34220 +
34221 +#ifdef CONFIG_PAX_EI_PAX
34222 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34223 +{
34224 + unsigned long pax_flags = 0UL;
34225 +
34226 +#ifdef CONFIG_PAX_PAGEEXEC
34227 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34228 + pax_flags |= MF_PAX_PAGEEXEC;
34229 +#endif
34230 +
34231 +#ifdef CONFIG_PAX_SEGMEXEC
34232 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34233 + pax_flags |= MF_PAX_SEGMEXEC;
34234 +#endif
34235 +
34236 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34237 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34238 + if ((__supported_pte_mask & _PAGE_NX))
34239 + pax_flags &= ~MF_PAX_SEGMEXEC;
34240 + else
34241 + pax_flags &= ~MF_PAX_PAGEEXEC;
34242 + }
34243 +#endif
34244 +
34245 +#ifdef CONFIG_PAX_EMUTRAMP
34246 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34247 + pax_flags |= MF_PAX_EMUTRAMP;
34248 +#endif
34249 +
34250 +#ifdef CONFIG_PAX_MPROTECT
34251 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34252 + pax_flags |= MF_PAX_MPROTECT;
34253 +#endif
34254 +
34255 +#ifdef CONFIG_PAX_ASLR
34256 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34257 + pax_flags |= MF_PAX_RANDMMAP;
34258 +#endif
34259 +
34260 + return pax_flags;
34261 +}
34262 +#endif
34263 +
34264 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34265 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34266 +{
34267 + unsigned long pax_flags = 0UL;
34268 +
34269 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34270 + unsigned long i;
34271 + int found_flags = 0;
34272 +#endif
34273 +
34274 +#ifdef CONFIG_PAX_EI_PAX
34275 + pax_flags = pax_parse_ei_pax(elf_ex);
34276 +#endif
34277 +
34278 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34279 + for (i = 0UL; i < elf_ex->e_phnum; i++)
34280 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34281 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34282 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34283 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34284 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34285 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34286 + return -EINVAL;
34287 +
34288 +#ifdef CONFIG_PAX_SOFTMODE
34289 + if (pax_softmode)
34290 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
34291 + else
34292 +#endif
34293 +
34294 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34295 + found_flags = 1;
34296 + break;
34297 + }
34298 +#endif
34299 +
34300 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34301 + if (found_flags == 0) {
34302 + struct elf_phdr phdr;
34303 + memset(&phdr, 0, sizeof(phdr));
34304 + phdr.p_flags = PF_NOEMUTRAMP;
34305 +#ifdef CONFIG_PAX_SOFTMODE
34306 + if (pax_softmode)
34307 + pax_flags = pax_parse_softmode(&phdr);
34308 + else
34309 +#endif
34310 + pax_flags = pax_parse_hardmode(&phdr);
34311 + }
34312 +#endif
34313 +
34314 + if (0 > pax_check_flags(&pax_flags))
34315 + return -EINVAL;
34316 +
34317 + current->mm->pax_flags = pax_flags;
34318 + return 0;
34319 +}
34320 +#endif
34321 +
34322 /*
34323 * These are the functions used to load ELF style executables and shared
34324 * libraries. There is no binary dependent code anywhere else.
34325 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34326 {
34327 unsigned int random_variable = 0;
34328
34329 +#ifdef CONFIG_PAX_RANDUSTACK
34330 + if (randomize_va_space)
34331 + return stack_top - current->mm->delta_stack;
34332 +#endif
34333 +
34334 if ((current->flags & PF_RANDOMIZE) &&
34335 !(current->personality & ADDR_NO_RANDOMIZE)) {
34336 random_variable = get_random_int() & STACK_RND_MASK;
34337 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34338 unsigned long load_addr = 0, load_bias = 0;
34339 int load_addr_set = 0;
34340 char * elf_interpreter = NULL;
34341 - unsigned long error;
34342 + unsigned long error = 0;
34343 struct elf_phdr *elf_ppnt, *elf_phdata;
34344 unsigned long elf_bss, elf_brk;
34345 int retval, i;
34346 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34347 unsigned long start_code, end_code, start_data, end_data;
34348 unsigned long reloc_func_desc __maybe_unused = 0;
34349 int executable_stack = EXSTACK_DEFAULT;
34350 - unsigned long def_flags = 0;
34351 struct {
34352 struct elfhdr elf_ex;
34353 struct elfhdr interp_elf_ex;
34354 } *loc;
34355 + unsigned long pax_task_size = TASK_SIZE;
34356
34357 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34358 if (!loc) {
34359 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34360
34361 /* OK, This is the point of no return */
34362 current->flags &= ~PF_FORKNOEXEC;
34363 - current->mm->def_flags = def_flags;
34364 +
34365 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34366 + current->mm->pax_flags = 0UL;
34367 +#endif
34368 +
34369 +#ifdef CONFIG_PAX_DLRESOLVE
34370 + current->mm->call_dl_resolve = 0UL;
34371 +#endif
34372 +
34373 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34374 + current->mm->call_syscall = 0UL;
34375 +#endif
34376 +
34377 +#ifdef CONFIG_PAX_ASLR
34378 + current->mm->delta_mmap = 0UL;
34379 + current->mm->delta_stack = 0UL;
34380 +#endif
34381 +
34382 + current->mm->def_flags = 0;
34383 +
34384 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34385 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34386 + send_sig(SIGKILL, current, 0);
34387 + goto out_free_dentry;
34388 + }
34389 +#endif
34390 +
34391 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34392 + pax_set_initial_flags(bprm);
34393 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34394 + if (pax_set_initial_flags_func)
34395 + (pax_set_initial_flags_func)(bprm);
34396 +#endif
34397 +
34398 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34399 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34400 + current->mm->context.user_cs_limit = PAGE_SIZE;
34401 + current->mm->def_flags |= VM_PAGEEXEC;
34402 + }
34403 +#endif
34404 +
34405 +#ifdef CONFIG_PAX_SEGMEXEC
34406 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34407 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34408 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34409 + pax_task_size = SEGMEXEC_TASK_SIZE;
34410 + current->mm->def_flags |= VM_NOHUGEPAGE;
34411 + }
34412 +#endif
34413 +
34414 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34415 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34416 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34417 + put_cpu();
34418 + }
34419 +#endif
34420
34421 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34422 may depend on the personality. */
34423 SET_PERSONALITY(loc->elf_ex);
34424 +
34425 +#ifdef CONFIG_PAX_ASLR
34426 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34427 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34428 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34429 + }
34430 +#endif
34431 +
34432 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34433 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34434 + executable_stack = EXSTACK_DISABLE_X;
34435 + current->personality &= ~READ_IMPLIES_EXEC;
34436 + } else
34437 +#endif
34438 +
34439 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34440 current->personality |= READ_IMPLIES_EXEC;
34441
34442 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34443 #else
34444 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34445 #endif
34446 +
34447 +#ifdef CONFIG_PAX_RANDMMAP
34448 + /* PaX: randomize base address at the default exe base if requested */
34449 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34450 +#ifdef CONFIG_SPARC64
34451 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34452 +#else
34453 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34454 +#endif
34455 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34456 + elf_flags |= MAP_FIXED;
34457 + }
34458 +#endif
34459 +
34460 }
34461
34462 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34463 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34464 * allowed task size. Note that p_filesz must always be
34465 * <= p_memsz so it is only necessary to check p_memsz.
34466 */
34467 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34468 - elf_ppnt->p_memsz > TASK_SIZE ||
34469 - TASK_SIZE - elf_ppnt->p_memsz < k) {
34470 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34471 + elf_ppnt->p_memsz > pax_task_size ||
34472 + pax_task_size - elf_ppnt->p_memsz < k) {
34473 /* set_brk can never work. Avoid overflows. */
34474 send_sig(SIGKILL, current, 0);
34475 retval = -EINVAL;
34476 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34477 start_data += load_bias;
34478 end_data += load_bias;
34479
34480 +#ifdef CONFIG_PAX_RANDMMAP
34481 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34482 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34483 +#endif
34484 +
34485 /* Calling set_brk effectively mmaps the pages that we need
34486 * for the bss and break sections. We must do this before
34487 * mapping in the interpreter, to make sure it doesn't wind
34488 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34489 goto out_free_dentry;
34490 }
34491 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34492 - send_sig(SIGSEGV, current, 0);
34493 - retval = -EFAULT; /* Nobody gets to see this, but.. */
34494 - goto out_free_dentry;
34495 + /*
34496 + * This bss-zeroing can fail if the ELF
34497 + * file specifies odd protections. So
34498 + * we don't check the return value
34499 + */
34500 }
34501
34502 if (elf_interpreter) {
34503 @@ -1090,7 +1398,7 @@ out:
34504 * Decide what to dump of a segment, part, all or none.
34505 */
34506 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34507 - unsigned long mm_flags)
34508 + unsigned long mm_flags, long signr)
34509 {
34510 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34511
34512 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34513 if (vma->vm_file == NULL)
34514 return 0;
34515
34516 - if (FILTER(MAPPED_PRIVATE))
34517 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34518 goto whole;
34519
34520 /*
34521 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34522 {
34523 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34524 int i = 0;
34525 - do
34526 + do {
34527 i += 2;
34528 - while (auxv[i - 2] != AT_NULL);
34529 + } while (auxv[i - 2] != AT_NULL);
34530 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34531 }
34532
34533 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34534 }
34535
34536 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34537 - unsigned long mm_flags)
34538 + struct coredump_params *cprm)
34539 {
34540 struct vm_area_struct *vma;
34541 size_t size = 0;
34542
34543 for (vma = first_vma(current, gate_vma); vma != NULL;
34544 vma = next_vma(vma, gate_vma))
34545 - size += vma_dump_size(vma, mm_flags);
34546 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34547 return size;
34548 }
34549
34550 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34551
34552 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34553
34554 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34555 + offset += elf_core_vma_data_size(gate_vma, cprm);
34556 offset += elf_core_extra_data_size();
34557 e_shoff = offset;
34558
34559 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34560 offset = dataoff;
34561
34562 size += sizeof(*elf);
34563 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34564 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34565 goto end_coredump;
34566
34567 size += sizeof(*phdr4note);
34568 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34569 if (size > cprm->limit
34570 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34571 goto end_coredump;
34572 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34573 phdr.p_offset = offset;
34574 phdr.p_vaddr = vma->vm_start;
34575 phdr.p_paddr = 0;
34576 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34577 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34578 phdr.p_memsz = vma->vm_end - vma->vm_start;
34579 offset += phdr.p_filesz;
34580 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34581 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34582 phdr.p_align = ELF_EXEC_PAGESIZE;
34583
34584 size += sizeof(phdr);
34585 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34586 if (size > cprm->limit
34587 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34588 goto end_coredump;
34589 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34590 unsigned long addr;
34591 unsigned long end;
34592
34593 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34594 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34595
34596 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34597 struct page *page;
34598 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34599 page = get_dump_page(addr);
34600 if (page) {
34601 void *kaddr = kmap(page);
34602 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34603 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34604 !dump_write(cprm->file, kaddr,
34605 PAGE_SIZE);
34606 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34607
34608 if (e_phnum == PN_XNUM) {
34609 size += sizeof(*shdr4extnum);
34610 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34611 if (size > cprm->limit
34612 || !dump_write(cprm->file, shdr4extnum,
34613 sizeof(*shdr4extnum)))
34614 @@ -2067,6 +2380,97 @@ out:
34615
34616 #endif /* CONFIG_ELF_CORE */
34617
34618 +#ifdef CONFIG_PAX_MPROTECT
34619 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
34620 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34621 + * we'll remove VM_MAYWRITE for good on RELRO segments.
34622 + *
34623 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34624 + * basis because we want to allow the common case and not the special ones.
34625 + */
34626 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34627 +{
34628 + struct elfhdr elf_h;
34629 + struct elf_phdr elf_p;
34630 + unsigned long i;
34631 + unsigned long oldflags;
34632 + bool is_textrel_rw, is_textrel_rx, is_relro;
34633 +
34634 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34635 + return;
34636 +
34637 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34638 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34639 +
34640 +#ifdef CONFIG_PAX_ELFRELOCS
34641 + /* possible TEXTREL */
34642 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34643 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34644 +#else
34645 + is_textrel_rw = false;
34646 + is_textrel_rx = false;
34647 +#endif
34648 +
34649 + /* possible RELRO */
34650 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34651 +
34652 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34653 + return;
34654 +
34655 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34656 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34657 +
34658 +#ifdef CONFIG_PAX_ETEXECRELOCS
34659 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34660 +#else
34661 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34662 +#endif
34663 +
34664 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34665 + !elf_check_arch(&elf_h) ||
34666 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34667 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34668 + return;
34669 +
34670 + for (i = 0UL; i < elf_h.e_phnum; i++) {
34671 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34672 + return;
34673 + switch (elf_p.p_type) {
34674 + case PT_DYNAMIC:
34675 + if (!is_textrel_rw && !is_textrel_rx)
34676 + continue;
34677 + i = 0UL;
34678 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34679 + elf_dyn dyn;
34680 +
34681 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34682 + return;
34683 + if (dyn.d_tag == DT_NULL)
34684 + return;
34685 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34686 + gr_log_textrel(vma);
34687 + if (is_textrel_rw)
34688 + vma->vm_flags |= VM_MAYWRITE;
34689 + else
34690 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34691 + vma->vm_flags &= ~VM_MAYWRITE;
34692 + return;
34693 + }
34694 + i++;
34695 + }
34696 + return;
34697 +
34698 + case PT_GNU_RELRO:
34699 + if (!is_relro)
34700 + continue;
34701 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34702 + vma->vm_flags &= ~VM_MAYWRITE;
34703 + return;
34704 + }
34705 + }
34706 +}
34707 +#endif
34708 +
34709 static int __init init_elf_binfmt(void)
34710 {
34711 return register_binfmt(&elf_format);
34712 diff -urNp linux-3.0.4/fs/binfmt_flat.c linux-3.0.4/fs/binfmt_flat.c
34713 --- linux-3.0.4/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
34714 +++ linux-3.0.4/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
34715 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34716 realdatastart = (unsigned long) -ENOMEM;
34717 printk("Unable to allocate RAM for process data, errno %d\n",
34718 (int)-realdatastart);
34719 + down_write(&current->mm->mmap_sem);
34720 do_munmap(current->mm, textpos, text_len);
34721 + up_write(&current->mm->mmap_sem);
34722 ret = realdatastart;
34723 goto err;
34724 }
34725 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34726 }
34727 if (IS_ERR_VALUE(result)) {
34728 printk("Unable to read data+bss, errno %d\n", (int)-result);
34729 + down_write(&current->mm->mmap_sem);
34730 do_munmap(current->mm, textpos, text_len);
34731 do_munmap(current->mm, realdatastart, len);
34732 + up_write(&current->mm->mmap_sem);
34733 ret = result;
34734 goto err;
34735 }
34736 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34737 }
34738 if (IS_ERR_VALUE(result)) {
34739 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34740 + down_write(&current->mm->mmap_sem);
34741 do_munmap(current->mm, textpos, text_len + data_len + extra +
34742 MAX_SHARED_LIBS * sizeof(unsigned long));
34743 + up_write(&current->mm->mmap_sem);
34744 ret = result;
34745 goto err;
34746 }
34747 diff -urNp linux-3.0.4/fs/bio.c linux-3.0.4/fs/bio.c
34748 --- linux-3.0.4/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
34749 +++ linux-3.0.4/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
34750 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34751 const int read = bio_data_dir(bio) == READ;
34752 struct bio_map_data *bmd = bio->bi_private;
34753 int i;
34754 - char *p = bmd->sgvecs[0].iov_base;
34755 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
34756
34757 __bio_for_each_segment(bvec, bio, i, 0) {
34758 char *addr = page_address(bvec->bv_page);
34759 diff -urNp linux-3.0.4/fs/block_dev.c linux-3.0.4/fs/block_dev.c
34760 --- linux-3.0.4/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
34761 +++ linux-3.0.4/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
34762 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34763 else if (bdev->bd_contains == bdev)
34764 return true; /* is a whole device which isn't held */
34765
34766 - else if (whole->bd_holder == bd_may_claim)
34767 + else if (whole->bd_holder == (void *)bd_may_claim)
34768 return true; /* is a partition of a device that is being partitioned */
34769 else if (whole->bd_holder != NULL)
34770 return false; /* is a partition of a held device */
34771 diff -urNp linux-3.0.4/fs/btrfs/ctree.c linux-3.0.4/fs/btrfs/ctree.c
34772 --- linux-3.0.4/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
34773 +++ linux-3.0.4/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
34774 @@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
34775 free_extent_buffer(buf);
34776 add_root_to_dirty_list(root);
34777 } else {
34778 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34779 - parent_start = parent->start;
34780 - else
34781 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34782 + if (parent)
34783 + parent_start = parent->start;
34784 + else
34785 + parent_start = 0;
34786 + } else
34787 parent_start = 0;
34788
34789 WARN_ON(trans->transid != btrfs_header_generation(parent));
34790 diff -urNp linux-3.0.4/fs/btrfs/inode.c linux-3.0.4/fs/btrfs/inode.c
34791 --- linux-3.0.4/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
34792 +++ linux-3.0.4/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
34793 @@ -6895,7 +6895,7 @@ fail:
34794 return -ENOMEM;
34795 }
34796
34797 -static int btrfs_getattr(struct vfsmount *mnt,
34798 +int btrfs_getattr(struct vfsmount *mnt,
34799 struct dentry *dentry, struct kstat *stat)
34800 {
34801 struct inode *inode = dentry->d_inode;
34802 @@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
34803 return 0;
34804 }
34805
34806 +EXPORT_SYMBOL(btrfs_getattr);
34807 +
34808 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
34809 +{
34810 + return BTRFS_I(inode)->root->anon_super.s_dev;
34811 +}
34812 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34813 +
34814 /*
34815 * If a file is moved, it will inherit the cow and compression flags of the new
34816 * directory.
34817 diff -urNp linux-3.0.4/fs/btrfs/ioctl.c linux-3.0.4/fs/btrfs/ioctl.c
34818 --- linux-3.0.4/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
34819 +++ linux-3.0.4/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
34820 @@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
34821 for (i = 0; i < num_types; i++) {
34822 struct btrfs_space_info *tmp;
34823
34824 + /* Don't copy in more than we allocated */
34825 if (!slot_count)
34826 break;
34827
34828 + slot_count--;
34829 +
34830 info = NULL;
34831 rcu_read_lock();
34832 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34833 @@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
34834 memcpy(dest, &space, sizeof(space));
34835 dest++;
34836 space_args.total_spaces++;
34837 - slot_count--;
34838 }
34839 - if (!slot_count)
34840 - break;
34841 }
34842 up_read(&info->groups_sem);
34843 }
34844 diff -urNp linux-3.0.4/fs/btrfs/relocation.c linux-3.0.4/fs/btrfs/relocation.c
34845 --- linux-3.0.4/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
34846 +++ linux-3.0.4/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
34847 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
34848 }
34849 spin_unlock(&rc->reloc_root_tree.lock);
34850
34851 - BUG_ON((struct btrfs_root *)node->data != root);
34852 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
34853
34854 if (!del) {
34855 spin_lock(&rc->reloc_root_tree.lock);
34856 diff -urNp linux-3.0.4/fs/cachefiles/bind.c linux-3.0.4/fs/cachefiles/bind.c
34857 --- linux-3.0.4/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
34858 +++ linux-3.0.4/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
34859 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
34860 args);
34861
34862 /* start by checking things over */
34863 - ASSERT(cache->fstop_percent >= 0 &&
34864 - cache->fstop_percent < cache->fcull_percent &&
34865 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
34866 cache->fcull_percent < cache->frun_percent &&
34867 cache->frun_percent < 100);
34868
34869 - ASSERT(cache->bstop_percent >= 0 &&
34870 - cache->bstop_percent < cache->bcull_percent &&
34871 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
34872 cache->bcull_percent < cache->brun_percent &&
34873 cache->brun_percent < 100);
34874
34875 diff -urNp linux-3.0.4/fs/cachefiles/daemon.c linux-3.0.4/fs/cachefiles/daemon.c
34876 --- linux-3.0.4/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
34877 +++ linux-3.0.4/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
34878 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
34879 if (n > buflen)
34880 return -EMSGSIZE;
34881
34882 - if (copy_to_user(_buffer, buffer, n) != 0)
34883 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
34884 return -EFAULT;
34885
34886 return n;
34887 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
34888 if (test_bit(CACHEFILES_DEAD, &cache->flags))
34889 return -EIO;
34890
34891 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
34892 + if (datalen > PAGE_SIZE - 1)
34893 return -EOPNOTSUPP;
34894
34895 /* drag the command string into the kernel so we can parse it */
34896 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
34897 if (args[0] != '%' || args[1] != '\0')
34898 return -EINVAL;
34899
34900 - if (fstop < 0 || fstop >= cache->fcull_percent)
34901 + if (fstop >= cache->fcull_percent)
34902 return cachefiles_daemon_range_error(cache, args);
34903
34904 cache->fstop_percent = fstop;
34905 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
34906 if (args[0] != '%' || args[1] != '\0')
34907 return -EINVAL;
34908
34909 - if (bstop < 0 || bstop >= cache->bcull_percent)
34910 + if (bstop >= cache->bcull_percent)
34911 return cachefiles_daemon_range_error(cache, args);
34912
34913 cache->bstop_percent = bstop;
34914 diff -urNp linux-3.0.4/fs/cachefiles/internal.h linux-3.0.4/fs/cachefiles/internal.h
34915 --- linux-3.0.4/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
34916 +++ linux-3.0.4/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
34917 @@ -57,7 +57,7 @@ struct cachefiles_cache {
34918 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
34919 struct rb_root active_nodes; /* active nodes (can't be culled) */
34920 rwlock_t active_lock; /* lock for active_nodes */
34921 - atomic_t gravecounter; /* graveyard uniquifier */
34922 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
34923 unsigned frun_percent; /* when to stop culling (% files) */
34924 unsigned fcull_percent; /* when to start culling (% files) */
34925 unsigned fstop_percent; /* when to stop allocating (% files) */
34926 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
34927 * proc.c
34928 */
34929 #ifdef CONFIG_CACHEFILES_HISTOGRAM
34930 -extern atomic_t cachefiles_lookup_histogram[HZ];
34931 -extern atomic_t cachefiles_mkdir_histogram[HZ];
34932 -extern atomic_t cachefiles_create_histogram[HZ];
34933 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34934 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34935 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
34936
34937 extern int __init cachefiles_proc_init(void);
34938 extern void cachefiles_proc_cleanup(void);
34939 static inline
34940 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
34941 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
34942 {
34943 unsigned long jif = jiffies - start_jif;
34944 if (jif >= HZ)
34945 jif = HZ - 1;
34946 - atomic_inc(&histogram[jif]);
34947 + atomic_inc_unchecked(&histogram[jif]);
34948 }
34949
34950 #else
34951 diff -urNp linux-3.0.4/fs/cachefiles/namei.c linux-3.0.4/fs/cachefiles/namei.c
34952 --- linux-3.0.4/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
34953 +++ linux-3.0.4/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
34954 @@ -318,7 +318,7 @@ try_again:
34955 /* first step is to make up a grave dentry in the graveyard */
34956 sprintf(nbuffer, "%08x%08x",
34957 (uint32_t) get_seconds(),
34958 - (uint32_t) atomic_inc_return(&cache->gravecounter));
34959 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
34960
34961 /* do the multiway lock magic */
34962 trap = lock_rename(cache->graveyard, dir);
34963 diff -urNp linux-3.0.4/fs/cachefiles/proc.c linux-3.0.4/fs/cachefiles/proc.c
34964 --- linux-3.0.4/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
34965 +++ linux-3.0.4/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
34966 @@ -14,9 +14,9 @@
34967 #include <linux/seq_file.h>
34968 #include "internal.h"
34969
34970 -atomic_t cachefiles_lookup_histogram[HZ];
34971 -atomic_t cachefiles_mkdir_histogram[HZ];
34972 -atomic_t cachefiles_create_histogram[HZ];
34973 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34974 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34975 +atomic_unchecked_t cachefiles_create_histogram[HZ];
34976
34977 /*
34978 * display the latency histogram
34979 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
34980 return 0;
34981 default:
34982 index = (unsigned long) v - 3;
34983 - x = atomic_read(&cachefiles_lookup_histogram[index]);
34984 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
34985 - z = atomic_read(&cachefiles_create_histogram[index]);
34986 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
34987 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
34988 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
34989 if (x == 0 && y == 0 && z == 0)
34990 return 0;
34991
34992 diff -urNp linux-3.0.4/fs/cachefiles/rdwr.c linux-3.0.4/fs/cachefiles/rdwr.c
34993 --- linux-3.0.4/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
34994 +++ linux-3.0.4/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
34995 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
34996 old_fs = get_fs();
34997 set_fs(KERNEL_DS);
34998 ret = file->f_op->write(
34999 - file, (const void __user *) data, len, &pos);
35000 + file, (__force const void __user *) data, len, &pos);
35001 set_fs(old_fs);
35002 kunmap(page);
35003 if (ret != len)
35004 diff -urNp linux-3.0.4/fs/ceph/dir.c linux-3.0.4/fs/ceph/dir.c
35005 --- linux-3.0.4/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
35006 +++ linux-3.0.4/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
35007 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
35008 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
35009 struct ceph_mds_client *mdsc = fsc->mdsc;
35010 unsigned frag = fpos_frag(filp->f_pos);
35011 - int off = fpos_off(filp->f_pos);
35012 + unsigned int off = fpos_off(filp->f_pos);
35013 int err;
35014 u32 ftype;
35015 struct ceph_mds_reply_info_parsed *rinfo;
35016 diff -urNp linux-3.0.4/fs/cifs/cifs_debug.c linux-3.0.4/fs/cifs/cifs_debug.c
35017 --- linux-3.0.4/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
35018 +++ linux-3.0.4/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
35019 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
35020
35021 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
35022 #ifdef CONFIG_CIFS_STATS2
35023 - atomic_set(&totBufAllocCount, 0);
35024 - atomic_set(&totSmBufAllocCount, 0);
35025 + atomic_set_unchecked(&totBufAllocCount, 0);
35026 + atomic_set_unchecked(&totSmBufAllocCount, 0);
35027 #endif /* CONFIG_CIFS_STATS2 */
35028 spin_lock(&cifs_tcp_ses_lock);
35029 list_for_each(tmp1, &cifs_tcp_ses_list) {
35030 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35031 tcon = list_entry(tmp3,
35032 struct cifs_tcon,
35033 tcon_list);
35034 - atomic_set(&tcon->num_smbs_sent, 0);
35035 - atomic_set(&tcon->num_writes, 0);
35036 - atomic_set(&tcon->num_reads, 0);
35037 - atomic_set(&tcon->num_oplock_brks, 0);
35038 - atomic_set(&tcon->num_opens, 0);
35039 - atomic_set(&tcon->num_posixopens, 0);
35040 - atomic_set(&tcon->num_posixmkdirs, 0);
35041 - atomic_set(&tcon->num_closes, 0);
35042 - atomic_set(&tcon->num_deletes, 0);
35043 - atomic_set(&tcon->num_mkdirs, 0);
35044 - atomic_set(&tcon->num_rmdirs, 0);
35045 - atomic_set(&tcon->num_renames, 0);
35046 - atomic_set(&tcon->num_t2renames, 0);
35047 - atomic_set(&tcon->num_ffirst, 0);
35048 - atomic_set(&tcon->num_fnext, 0);
35049 - atomic_set(&tcon->num_fclose, 0);
35050 - atomic_set(&tcon->num_hardlinks, 0);
35051 - atomic_set(&tcon->num_symlinks, 0);
35052 - atomic_set(&tcon->num_locks, 0);
35053 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35054 + atomic_set_unchecked(&tcon->num_writes, 0);
35055 + atomic_set_unchecked(&tcon->num_reads, 0);
35056 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35057 + atomic_set_unchecked(&tcon->num_opens, 0);
35058 + atomic_set_unchecked(&tcon->num_posixopens, 0);
35059 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35060 + atomic_set_unchecked(&tcon->num_closes, 0);
35061 + atomic_set_unchecked(&tcon->num_deletes, 0);
35062 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
35063 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
35064 + atomic_set_unchecked(&tcon->num_renames, 0);
35065 + atomic_set_unchecked(&tcon->num_t2renames, 0);
35066 + atomic_set_unchecked(&tcon->num_ffirst, 0);
35067 + atomic_set_unchecked(&tcon->num_fnext, 0);
35068 + atomic_set_unchecked(&tcon->num_fclose, 0);
35069 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
35070 + atomic_set_unchecked(&tcon->num_symlinks, 0);
35071 + atomic_set_unchecked(&tcon->num_locks, 0);
35072 }
35073 }
35074 }
35075 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
35076 smBufAllocCount.counter, cifs_min_small);
35077 #ifdef CONFIG_CIFS_STATS2
35078 seq_printf(m, "Total Large %d Small %d Allocations\n",
35079 - atomic_read(&totBufAllocCount),
35080 - atomic_read(&totSmBufAllocCount));
35081 + atomic_read_unchecked(&totBufAllocCount),
35082 + atomic_read_unchecked(&totSmBufAllocCount));
35083 #endif /* CONFIG_CIFS_STATS2 */
35084
35085 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
35086 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35087 if (tcon->need_reconnect)
35088 seq_puts(m, "\tDISCONNECTED ");
35089 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35090 - atomic_read(&tcon->num_smbs_sent),
35091 - atomic_read(&tcon->num_oplock_brks));
35092 + atomic_read_unchecked(&tcon->num_smbs_sent),
35093 + atomic_read_unchecked(&tcon->num_oplock_brks));
35094 seq_printf(m, "\nReads: %d Bytes: %lld",
35095 - atomic_read(&tcon->num_reads),
35096 + atomic_read_unchecked(&tcon->num_reads),
35097 (long long)(tcon->bytes_read));
35098 seq_printf(m, "\nWrites: %d Bytes: %lld",
35099 - atomic_read(&tcon->num_writes),
35100 + atomic_read_unchecked(&tcon->num_writes),
35101 (long long)(tcon->bytes_written));
35102 seq_printf(m, "\nFlushes: %d",
35103 - atomic_read(&tcon->num_flushes));
35104 + atomic_read_unchecked(&tcon->num_flushes));
35105 seq_printf(m, "\nLocks: %d HardLinks: %d "
35106 "Symlinks: %d",
35107 - atomic_read(&tcon->num_locks),
35108 - atomic_read(&tcon->num_hardlinks),
35109 - atomic_read(&tcon->num_symlinks));
35110 + atomic_read_unchecked(&tcon->num_locks),
35111 + atomic_read_unchecked(&tcon->num_hardlinks),
35112 + atomic_read_unchecked(&tcon->num_symlinks));
35113 seq_printf(m, "\nOpens: %d Closes: %d "
35114 "Deletes: %d",
35115 - atomic_read(&tcon->num_opens),
35116 - atomic_read(&tcon->num_closes),
35117 - atomic_read(&tcon->num_deletes));
35118 + atomic_read_unchecked(&tcon->num_opens),
35119 + atomic_read_unchecked(&tcon->num_closes),
35120 + atomic_read_unchecked(&tcon->num_deletes));
35121 seq_printf(m, "\nPosix Opens: %d "
35122 "Posix Mkdirs: %d",
35123 - atomic_read(&tcon->num_posixopens),
35124 - atomic_read(&tcon->num_posixmkdirs));
35125 + atomic_read_unchecked(&tcon->num_posixopens),
35126 + atomic_read_unchecked(&tcon->num_posixmkdirs));
35127 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35128 - atomic_read(&tcon->num_mkdirs),
35129 - atomic_read(&tcon->num_rmdirs));
35130 + atomic_read_unchecked(&tcon->num_mkdirs),
35131 + atomic_read_unchecked(&tcon->num_rmdirs));
35132 seq_printf(m, "\nRenames: %d T2 Renames %d",
35133 - atomic_read(&tcon->num_renames),
35134 - atomic_read(&tcon->num_t2renames));
35135 + atomic_read_unchecked(&tcon->num_renames),
35136 + atomic_read_unchecked(&tcon->num_t2renames));
35137 seq_printf(m, "\nFindFirst: %d FNext %d "
35138 "FClose %d",
35139 - atomic_read(&tcon->num_ffirst),
35140 - atomic_read(&tcon->num_fnext),
35141 - atomic_read(&tcon->num_fclose));
35142 + atomic_read_unchecked(&tcon->num_ffirst),
35143 + atomic_read_unchecked(&tcon->num_fnext),
35144 + atomic_read_unchecked(&tcon->num_fclose));
35145 }
35146 }
35147 }
35148 diff -urNp linux-3.0.4/fs/cifs/cifsfs.c linux-3.0.4/fs/cifs/cifsfs.c
35149 --- linux-3.0.4/fs/cifs/cifsfs.c 2011-08-23 21:44:40.000000000 -0400
35150 +++ linux-3.0.4/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
35151 @@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
35152 cifs_req_cachep = kmem_cache_create("cifs_request",
35153 CIFSMaxBufSize +
35154 MAX_CIFS_HDR_SIZE, 0,
35155 - SLAB_HWCACHE_ALIGN, NULL);
35156 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
35157 if (cifs_req_cachep == NULL)
35158 return -ENOMEM;
35159
35160 @@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
35161 efficient to alloc 1 per page off the slab compared to 17K (5page)
35162 alloc of large cifs buffers even when page debugging is on */
35163 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
35164 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
35165 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
35166 NULL);
35167 if (cifs_sm_req_cachep == NULL) {
35168 mempool_destroy(cifs_req_poolp);
35169 @@ -1106,8 +1106,8 @@ init_cifs(void)
35170 atomic_set(&bufAllocCount, 0);
35171 atomic_set(&smBufAllocCount, 0);
35172 #ifdef CONFIG_CIFS_STATS2
35173 - atomic_set(&totBufAllocCount, 0);
35174 - atomic_set(&totSmBufAllocCount, 0);
35175 + atomic_set_unchecked(&totBufAllocCount, 0);
35176 + atomic_set_unchecked(&totSmBufAllocCount, 0);
35177 #endif /* CONFIG_CIFS_STATS2 */
35178
35179 atomic_set(&midCount, 0);
35180 diff -urNp linux-3.0.4/fs/cifs/cifsglob.h linux-3.0.4/fs/cifs/cifsglob.h
35181 --- linux-3.0.4/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
35182 +++ linux-3.0.4/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
35183 @@ -381,28 +381,28 @@ struct cifs_tcon {
35184 __u16 Flags; /* optional support bits */
35185 enum statusEnum tidStatus;
35186 #ifdef CONFIG_CIFS_STATS
35187 - atomic_t num_smbs_sent;
35188 - atomic_t num_writes;
35189 - atomic_t num_reads;
35190 - atomic_t num_flushes;
35191 - atomic_t num_oplock_brks;
35192 - atomic_t num_opens;
35193 - atomic_t num_closes;
35194 - atomic_t num_deletes;
35195 - atomic_t num_mkdirs;
35196 - atomic_t num_posixopens;
35197 - atomic_t num_posixmkdirs;
35198 - atomic_t num_rmdirs;
35199 - atomic_t num_renames;
35200 - atomic_t num_t2renames;
35201 - atomic_t num_ffirst;
35202 - atomic_t num_fnext;
35203 - atomic_t num_fclose;
35204 - atomic_t num_hardlinks;
35205 - atomic_t num_symlinks;
35206 - atomic_t num_locks;
35207 - atomic_t num_acl_get;
35208 - atomic_t num_acl_set;
35209 + atomic_unchecked_t num_smbs_sent;
35210 + atomic_unchecked_t num_writes;
35211 + atomic_unchecked_t num_reads;
35212 + atomic_unchecked_t num_flushes;
35213 + atomic_unchecked_t num_oplock_brks;
35214 + atomic_unchecked_t num_opens;
35215 + atomic_unchecked_t num_closes;
35216 + atomic_unchecked_t num_deletes;
35217 + atomic_unchecked_t num_mkdirs;
35218 + atomic_unchecked_t num_posixopens;
35219 + atomic_unchecked_t num_posixmkdirs;
35220 + atomic_unchecked_t num_rmdirs;
35221 + atomic_unchecked_t num_renames;
35222 + atomic_unchecked_t num_t2renames;
35223 + atomic_unchecked_t num_ffirst;
35224 + atomic_unchecked_t num_fnext;
35225 + atomic_unchecked_t num_fclose;
35226 + atomic_unchecked_t num_hardlinks;
35227 + atomic_unchecked_t num_symlinks;
35228 + atomic_unchecked_t num_locks;
35229 + atomic_unchecked_t num_acl_get;
35230 + atomic_unchecked_t num_acl_set;
35231 #ifdef CONFIG_CIFS_STATS2
35232 unsigned long long time_writes;
35233 unsigned long long time_reads;
35234 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
35235 }
35236
35237 #ifdef CONFIG_CIFS_STATS
35238 -#define cifs_stats_inc atomic_inc
35239 +#define cifs_stats_inc atomic_inc_unchecked
35240
35241 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
35242 unsigned int bytes)
35243 @@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
35244 /* Various Debug counters */
35245 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
35246 #ifdef CONFIG_CIFS_STATS2
35247 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
35248 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
35249 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
35250 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
35251 #endif
35252 GLOBAL_EXTERN atomic_t smBufAllocCount;
35253 GLOBAL_EXTERN atomic_t midCount;
35254 diff -urNp linux-3.0.4/fs/cifs/link.c linux-3.0.4/fs/cifs/link.c
35255 --- linux-3.0.4/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
35256 +++ linux-3.0.4/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
35257 @@ -587,7 +587,7 @@ symlink_exit:
35258
35259 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35260 {
35261 - char *p = nd_get_link(nd);
35262 + const char *p = nd_get_link(nd);
35263 if (!IS_ERR(p))
35264 kfree(p);
35265 }
35266 diff -urNp linux-3.0.4/fs/cifs/misc.c linux-3.0.4/fs/cifs/misc.c
35267 --- linux-3.0.4/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
35268 +++ linux-3.0.4/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
35269 @@ -156,7 +156,7 @@ cifs_buf_get(void)
35270 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
35271 atomic_inc(&bufAllocCount);
35272 #ifdef CONFIG_CIFS_STATS2
35273 - atomic_inc(&totBufAllocCount);
35274 + atomic_inc_unchecked(&totBufAllocCount);
35275 #endif /* CONFIG_CIFS_STATS2 */
35276 }
35277
35278 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
35279 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
35280 atomic_inc(&smBufAllocCount);
35281 #ifdef CONFIG_CIFS_STATS2
35282 - atomic_inc(&totSmBufAllocCount);
35283 + atomic_inc_unchecked(&totSmBufAllocCount);
35284 #endif /* CONFIG_CIFS_STATS2 */
35285
35286 }
35287 diff -urNp linux-3.0.4/fs/coda/cache.c linux-3.0.4/fs/coda/cache.c
35288 --- linux-3.0.4/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
35289 +++ linux-3.0.4/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
35290 @@ -24,7 +24,7 @@
35291 #include "coda_linux.h"
35292 #include "coda_cache.h"
35293
35294 -static atomic_t permission_epoch = ATOMIC_INIT(0);
35295 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35296
35297 /* replace or extend an acl cache hit */
35298 void coda_cache_enter(struct inode *inode, int mask)
35299 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35300 struct coda_inode_info *cii = ITOC(inode);
35301
35302 spin_lock(&cii->c_lock);
35303 - cii->c_cached_epoch = atomic_read(&permission_epoch);
35304 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35305 if (cii->c_uid != current_fsuid()) {
35306 cii->c_uid = current_fsuid();
35307 cii->c_cached_perm = mask;
35308 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35309 {
35310 struct coda_inode_info *cii = ITOC(inode);
35311 spin_lock(&cii->c_lock);
35312 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35313 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35314 spin_unlock(&cii->c_lock);
35315 }
35316
35317 /* remove all acl caches */
35318 void coda_cache_clear_all(struct super_block *sb)
35319 {
35320 - atomic_inc(&permission_epoch);
35321 + atomic_inc_unchecked(&permission_epoch);
35322 }
35323
35324
35325 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35326 spin_lock(&cii->c_lock);
35327 hit = (mask & cii->c_cached_perm) == mask &&
35328 cii->c_uid == current_fsuid() &&
35329 - cii->c_cached_epoch == atomic_read(&permission_epoch);
35330 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35331 spin_unlock(&cii->c_lock);
35332
35333 return hit;
35334 diff -urNp linux-3.0.4/fs/compat_binfmt_elf.c linux-3.0.4/fs/compat_binfmt_elf.c
35335 --- linux-3.0.4/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
35336 +++ linux-3.0.4/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
35337 @@ -30,11 +30,13 @@
35338 #undef elf_phdr
35339 #undef elf_shdr
35340 #undef elf_note
35341 +#undef elf_dyn
35342 #undef elf_addr_t
35343 #define elfhdr elf32_hdr
35344 #define elf_phdr elf32_phdr
35345 #define elf_shdr elf32_shdr
35346 #define elf_note elf32_note
35347 +#define elf_dyn Elf32_Dyn
35348 #define elf_addr_t Elf32_Addr
35349
35350 /*
35351 diff -urNp linux-3.0.4/fs/compat.c linux-3.0.4/fs/compat.c
35352 --- linux-3.0.4/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
35353 +++ linux-3.0.4/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
35354 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35355 goto out;
35356
35357 ret = -EINVAL;
35358 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35359 + if (nr_segs > UIO_MAXIOV)
35360 goto out;
35361 if (nr_segs > fast_segs) {
35362 ret = -ENOMEM;
35363 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35364
35365 struct compat_readdir_callback {
35366 struct compat_old_linux_dirent __user *dirent;
35367 + struct file * file;
35368 int result;
35369 };
35370
35371 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35372 buf->result = -EOVERFLOW;
35373 return -EOVERFLOW;
35374 }
35375 +
35376 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35377 + return 0;
35378 +
35379 buf->result++;
35380 dirent = buf->dirent;
35381 if (!access_ok(VERIFY_WRITE, dirent,
35382 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35383
35384 buf.result = 0;
35385 buf.dirent = dirent;
35386 + buf.file = file;
35387
35388 error = vfs_readdir(file, compat_fillonedir, &buf);
35389 if (buf.result)
35390 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
35391 struct compat_getdents_callback {
35392 struct compat_linux_dirent __user *current_dir;
35393 struct compat_linux_dirent __user *previous;
35394 + struct file * file;
35395 int count;
35396 int error;
35397 };
35398 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35399 buf->error = -EOVERFLOW;
35400 return -EOVERFLOW;
35401 }
35402 +
35403 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35404 + return 0;
35405 +
35406 dirent = buf->previous;
35407 if (dirent) {
35408 if (__put_user(offset, &dirent->d_off))
35409 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35410 buf.previous = NULL;
35411 buf.count = count;
35412 buf.error = 0;
35413 + buf.file = file;
35414
35415 error = vfs_readdir(file, compat_filldir, &buf);
35416 if (error >= 0)
35417 @@ -1006,6 +1018,7 @@ out:
35418 struct compat_getdents_callback64 {
35419 struct linux_dirent64 __user *current_dir;
35420 struct linux_dirent64 __user *previous;
35421 + struct file * file;
35422 int count;
35423 int error;
35424 };
35425 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35426 buf->error = -EINVAL; /* only used if we fail.. */
35427 if (reclen > buf->count)
35428 return -EINVAL;
35429 +
35430 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35431 + return 0;
35432 +
35433 dirent = buf->previous;
35434
35435 if (dirent) {
35436 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35437 buf.previous = NULL;
35438 buf.count = count;
35439 buf.error = 0;
35440 + buf.file = file;
35441
35442 error = vfs_readdir(file, compat_filldir64, &buf);
35443 if (error >= 0)
35444 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
35445 struct fdtable *fdt;
35446 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35447
35448 + pax_track_stack();
35449 +
35450 if (n < 0)
35451 goto out_nofds;
35452
35453 diff -urNp linux-3.0.4/fs/compat_ioctl.c linux-3.0.4/fs/compat_ioctl.c
35454 --- linux-3.0.4/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
35455 +++ linux-3.0.4/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
35456 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35457
35458 err = get_user(palp, &up->palette);
35459 err |= get_user(length, &up->length);
35460 + if (err)
35461 + return -EFAULT;
35462
35463 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35464 err = put_user(compat_ptr(palp), &up_native->palette);
35465 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35466 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35467 {
35468 unsigned int a, b;
35469 - a = *(unsigned int *)p;
35470 - b = *(unsigned int *)q;
35471 + a = *(const unsigned int *)p;
35472 + b = *(const unsigned int *)q;
35473 if (a > b)
35474 return 1;
35475 if (a < b)
35476 diff -urNp linux-3.0.4/fs/configfs/dir.c linux-3.0.4/fs/configfs/dir.c
35477 --- linux-3.0.4/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
35478 +++ linux-3.0.4/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
35479 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35480 }
35481 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35482 struct configfs_dirent *next;
35483 - const char * name;
35484 + const unsigned char * name;
35485 + char d_name[sizeof(next->s_dentry->d_iname)];
35486 int len;
35487 struct inode *inode = NULL;
35488
35489 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35490 continue;
35491
35492 name = configfs_get_name(next);
35493 - len = strlen(name);
35494 + if (next->s_dentry && name == next->s_dentry->d_iname) {
35495 + len = next->s_dentry->d_name.len;
35496 + memcpy(d_name, name, len);
35497 + name = d_name;
35498 + } else
35499 + len = strlen(name);
35500
35501 /*
35502 * We'll have a dentry and an inode for
35503 diff -urNp linux-3.0.4/fs/dcache.c linux-3.0.4/fs/dcache.c
35504 --- linux-3.0.4/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
35505 +++ linux-3.0.4/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
35506 @@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
35507 mempages -= reserve;
35508
35509 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35510 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35511 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35512
35513 dcache_init();
35514 inode_init();
35515 diff -urNp linux-3.0.4/fs/ecryptfs/inode.c linux-3.0.4/fs/ecryptfs/inode.c
35516 --- linux-3.0.4/fs/ecryptfs/inode.c 2011-08-23 21:44:40.000000000 -0400
35517 +++ linux-3.0.4/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
35518 @@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
35519 old_fs = get_fs();
35520 set_fs(get_ds());
35521 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35522 - (char __user *)lower_buf,
35523 + (__force char __user *)lower_buf,
35524 lower_bufsiz);
35525 set_fs(old_fs);
35526 if (rc < 0)
35527 @@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
35528 }
35529 old_fs = get_fs();
35530 set_fs(get_ds());
35531 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35532 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35533 set_fs(old_fs);
35534 if (rc < 0) {
35535 kfree(buf);
35536 @@ -765,7 +765,7 @@ out:
35537 static void
35538 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35539 {
35540 - char *buf = nd_get_link(nd);
35541 + const char *buf = nd_get_link(nd);
35542 if (!IS_ERR(buf)) {
35543 /* Free the char* */
35544 kfree(buf);
35545 diff -urNp linux-3.0.4/fs/ecryptfs/miscdev.c linux-3.0.4/fs/ecryptfs/miscdev.c
35546 --- linux-3.0.4/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
35547 +++ linux-3.0.4/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
35548 @@ -328,7 +328,7 @@ check_list:
35549 goto out_unlock_msg_ctx;
35550 i = 5;
35551 if (msg_ctx->msg) {
35552 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
35553 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35554 goto out_unlock_msg_ctx;
35555 i += packet_length_size;
35556 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35557 diff -urNp linux-3.0.4/fs/exec.c linux-3.0.4/fs/exec.c
35558 --- linux-3.0.4/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
35559 +++ linux-3.0.4/fs/exec.c 2011-08-25 17:26:58.000000000 -0400
35560 @@ -55,12 +55,24 @@
35561 #include <linux/pipe_fs_i.h>
35562 #include <linux/oom.h>
35563 #include <linux/compat.h>
35564 +#include <linux/random.h>
35565 +#include <linux/seq_file.h>
35566 +
35567 +#ifdef CONFIG_PAX_REFCOUNT
35568 +#include <linux/kallsyms.h>
35569 +#include <linux/kdebug.h>
35570 +#endif
35571
35572 #include <asm/uaccess.h>
35573 #include <asm/mmu_context.h>
35574 #include <asm/tlb.h>
35575 #include "internal.h"
35576
35577 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35578 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35579 +EXPORT_SYMBOL(pax_set_initial_flags_func);
35580 +#endif
35581 +
35582 int core_uses_pid;
35583 char core_pattern[CORENAME_MAX_SIZE] = "core";
35584 unsigned int core_pipe_limit;
35585 @@ -70,7 +82,7 @@ struct core_name {
35586 char *corename;
35587 int used, size;
35588 };
35589 -static atomic_t call_count = ATOMIC_INIT(1);
35590 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35591
35592 /* The maximal length of core_pattern is also specified in sysctl.c */
35593
35594 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35595 char *tmp = getname(library);
35596 int error = PTR_ERR(tmp);
35597 static const struct open_flags uselib_flags = {
35598 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35599 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35600 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35601 .intent = LOOKUP_OPEN
35602 };
35603 @@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
35604 int write)
35605 {
35606 struct page *page;
35607 - int ret;
35608
35609 -#ifdef CONFIG_STACK_GROWSUP
35610 - if (write) {
35611 - ret = expand_downwards(bprm->vma, pos);
35612 - if (ret < 0)
35613 - return NULL;
35614 - }
35615 -#endif
35616 - ret = get_user_pages(current, bprm->mm, pos,
35617 - 1, write, 1, &page, NULL);
35618 - if (ret <= 0)
35619 + if (0 > expand_downwards(bprm->vma, pos))
35620 + return NULL;
35621 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35622 return NULL;
35623
35624 if (write) {
35625 @@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
35626 vma->vm_end = STACK_TOP_MAX;
35627 vma->vm_start = vma->vm_end - PAGE_SIZE;
35628 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35629 +
35630 +#ifdef CONFIG_PAX_SEGMEXEC
35631 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35632 +#endif
35633 +
35634 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35635 INIT_LIST_HEAD(&vma->anon_vma_chain);
35636
35637 @@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
35638 mm->stack_vm = mm->total_vm = 1;
35639 up_write(&mm->mmap_sem);
35640 bprm->p = vma->vm_end - sizeof(void *);
35641 +
35642 +#ifdef CONFIG_PAX_RANDUSTACK
35643 + if (randomize_va_space)
35644 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35645 +#endif
35646 +
35647 return 0;
35648 err:
35649 up_write(&mm->mmap_sem);
35650 @@ -403,19 +418,7 @@ err:
35651 return err;
35652 }
35653
35654 -struct user_arg_ptr {
35655 -#ifdef CONFIG_COMPAT
35656 - bool is_compat;
35657 -#endif
35658 - union {
35659 - const char __user *const __user *native;
35660 -#ifdef CONFIG_COMPAT
35661 - compat_uptr_t __user *compat;
35662 -#endif
35663 - } ptr;
35664 -};
35665 -
35666 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35667 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35668 {
35669 const char __user *native;
35670
35671 @@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
35672 int r;
35673 mm_segment_t oldfs = get_fs();
35674 struct user_arg_ptr argv = {
35675 - .ptr.native = (const char __user *const __user *)__argv,
35676 + .ptr.native = (__force const char __user *const __user *)__argv,
35677 };
35678
35679 set_fs(KERNEL_DS);
35680 @@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
35681 unsigned long new_end = old_end - shift;
35682 struct mmu_gather tlb;
35683
35684 - BUG_ON(new_start > new_end);
35685 + if (new_start >= new_end || new_start < mmap_min_addr)
35686 + return -ENOMEM;
35687
35688 /*
35689 * ensure there are no vmas between where we want to go
35690 @@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
35691 if (vma != find_vma(mm, new_start))
35692 return -EFAULT;
35693
35694 +#ifdef CONFIG_PAX_SEGMEXEC
35695 + BUG_ON(pax_find_mirror_vma(vma));
35696 +#endif
35697 +
35698 /*
35699 * cover the whole range: [new_start, old_end)
35700 */
35701 @@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
35702 stack_top = arch_align_stack(stack_top);
35703 stack_top = PAGE_ALIGN(stack_top);
35704
35705 - if (unlikely(stack_top < mmap_min_addr) ||
35706 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35707 - return -ENOMEM;
35708 -
35709 stack_shift = vma->vm_end - stack_top;
35710
35711 bprm->p -= stack_shift;
35712 @@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
35713 bprm->exec -= stack_shift;
35714
35715 down_write(&mm->mmap_sem);
35716 +
35717 + /* Move stack pages down in memory. */
35718 + if (stack_shift) {
35719 + ret = shift_arg_pages(vma, stack_shift);
35720 + if (ret)
35721 + goto out_unlock;
35722 + }
35723 +
35724 vm_flags = VM_STACK_FLAGS;
35725
35726 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35727 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35728 + vm_flags &= ~VM_EXEC;
35729 +
35730 +#ifdef CONFIG_PAX_MPROTECT
35731 + if (mm->pax_flags & MF_PAX_MPROTECT)
35732 + vm_flags &= ~VM_MAYEXEC;
35733 +#endif
35734 +
35735 + }
35736 +#endif
35737 +
35738 /*
35739 * Adjust stack execute permissions; explicitly enable for
35740 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35741 @@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
35742 goto out_unlock;
35743 BUG_ON(prev != vma);
35744
35745 - /* Move stack pages down in memory. */
35746 - if (stack_shift) {
35747 - ret = shift_arg_pages(vma, stack_shift);
35748 - if (ret)
35749 - goto out_unlock;
35750 - }
35751 -
35752 /* mprotect_fixup is overkill to remove the temporary stack flags */
35753 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35754
35755 @@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
35756 struct file *file;
35757 int err;
35758 static const struct open_flags open_exec_flags = {
35759 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35760 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35761 .acc_mode = MAY_EXEC | MAY_OPEN,
35762 .intent = LOOKUP_OPEN
35763 };
35764 @@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
35765 old_fs = get_fs();
35766 set_fs(get_ds());
35767 /* The cast to a user pointer is valid due to the set_fs() */
35768 - result = vfs_read(file, (void __user *)addr, count, &pos);
35769 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
35770 set_fs(old_fs);
35771 return result;
35772 }
35773 @@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
35774 }
35775 rcu_read_unlock();
35776
35777 - if (p->fs->users > n_fs) {
35778 + if (atomic_read(&p->fs->users) > n_fs) {
35779 bprm->unsafe |= LSM_UNSAFE_SHARE;
35780 } else {
35781 res = -EAGAIN;
35782 @@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
35783 struct user_arg_ptr envp,
35784 struct pt_regs *regs)
35785 {
35786 +#ifdef CONFIG_GRKERNSEC
35787 + struct file *old_exec_file;
35788 + struct acl_subject_label *old_acl;
35789 + struct rlimit old_rlim[RLIM_NLIMITS];
35790 +#endif
35791 struct linux_binprm *bprm;
35792 struct file *file;
35793 struct files_struct *displaced;
35794 bool clear_in_exec;
35795 int retval;
35796 + const struct cred *cred = current_cred();
35797 +
35798 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35799 +
35800 + /*
35801 + * We move the actual failure in case of RLIMIT_NPROC excess from
35802 + * set*uid() to execve() because too many poorly written programs
35803 + * don't check setuid() return code. Here we additionally recheck
35804 + * whether NPROC limit is still exceeded.
35805 + */
35806 + if ((current->flags & PF_NPROC_EXCEEDED) &&
35807 + atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
35808 + retval = -EAGAIN;
35809 + goto out_ret;
35810 + }
35811 +
35812 + /* We're below the limit (still or again), so we don't want to make
35813 + * further execve() calls fail. */
35814 + current->flags &= ~PF_NPROC_EXCEEDED;
35815
35816 retval = unshare_files(&displaced);
35817 if (retval)
35818 @@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
35819 bprm->filename = filename;
35820 bprm->interp = filename;
35821
35822 + if (gr_process_user_ban()) {
35823 + retval = -EPERM;
35824 + goto out_file;
35825 + }
35826 +
35827 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35828 + retval = -EACCES;
35829 + goto out_file;
35830 + }
35831 +
35832 retval = bprm_mm_init(bprm);
35833 if (retval)
35834 goto out_file;
35835 @@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
35836 if (retval < 0)
35837 goto out;
35838
35839 + if (!gr_tpe_allow(file)) {
35840 + retval = -EACCES;
35841 + goto out;
35842 + }
35843 +
35844 + if (gr_check_crash_exec(file)) {
35845 + retval = -EACCES;
35846 + goto out;
35847 + }
35848 +
35849 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35850 +
35851 + gr_handle_exec_args(bprm, argv);
35852 +
35853 +#ifdef CONFIG_GRKERNSEC
35854 + old_acl = current->acl;
35855 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35856 + old_exec_file = current->exec_file;
35857 + get_file(file);
35858 + current->exec_file = file;
35859 +#endif
35860 +
35861 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35862 + bprm->unsafe & LSM_UNSAFE_SHARE);
35863 + if (retval < 0)
35864 + goto out_fail;
35865 +
35866 retval = search_binary_handler(bprm,regs);
35867 if (retval < 0)
35868 - goto out;
35869 + goto out_fail;
35870 +#ifdef CONFIG_GRKERNSEC
35871 + if (old_exec_file)
35872 + fput(old_exec_file);
35873 +#endif
35874
35875 /* execve succeeded */
35876 current->fs->in_exec = 0;
35877 @@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
35878 put_files_struct(displaced);
35879 return retval;
35880
35881 +out_fail:
35882 +#ifdef CONFIG_GRKERNSEC
35883 + current->acl = old_acl;
35884 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35885 + fput(current->exec_file);
35886 + current->exec_file = old_exec_file;
35887 +#endif
35888 +
35889 out:
35890 if (bprm->mm) {
35891 acct_arg_size(bprm, 0);
35892 @@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
35893 {
35894 char *old_corename = cn->corename;
35895
35896 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
35897 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
35898 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
35899
35900 if (!cn->corename) {
35901 @@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
35902 int pid_in_pattern = 0;
35903 int err = 0;
35904
35905 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
35906 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
35907 cn->corename = kmalloc(cn->size, GFP_KERNEL);
35908 cn->used = 0;
35909
35910 @@ -1758,6 +1848,219 @@ out:
35911 return ispipe;
35912 }
35913
35914 +int pax_check_flags(unsigned long *flags)
35915 +{
35916 + int retval = 0;
35917 +
35918 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
35919 + if (*flags & MF_PAX_SEGMEXEC)
35920 + {
35921 + *flags &= ~MF_PAX_SEGMEXEC;
35922 + retval = -EINVAL;
35923 + }
35924 +#endif
35925 +
35926 + if ((*flags & MF_PAX_PAGEEXEC)
35927 +
35928 +#ifdef CONFIG_PAX_PAGEEXEC
35929 + && (*flags & MF_PAX_SEGMEXEC)
35930 +#endif
35931 +
35932 + )
35933 + {
35934 + *flags &= ~MF_PAX_PAGEEXEC;
35935 + retval = -EINVAL;
35936 + }
35937 +
35938 + if ((*flags & MF_PAX_MPROTECT)
35939 +
35940 +#ifdef CONFIG_PAX_MPROTECT
35941 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35942 +#endif
35943 +
35944 + )
35945 + {
35946 + *flags &= ~MF_PAX_MPROTECT;
35947 + retval = -EINVAL;
35948 + }
35949 +
35950 + if ((*flags & MF_PAX_EMUTRAMP)
35951 +
35952 +#ifdef CONFIG_PAX_EMUTRAMP
35953 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35954 +#endif
35955 +
35956 + )
35957 + {
35958 + *flags &= ~MF_PAX_EMUTRAMP;
35959 + retval = -EINVAL;
35960 + }
35961 +
35962 + return retval;
35963 +}
35964 +
35965 +EXPORT_SYMBOL(pax_check_flags);
35966 +
35967 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35968 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
35969 +{
35970 + struct task_struct *tsk = current;
35971 + struct mm_struct *mm = current->mm;
35972 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
35973 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
35974 + char *path_exec = NULL;
35975 + char *path_fault = NULL;
35976 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
35977 +
35978 + if (buffer_exec && buffer_fault) {
35979 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
35980 +
35981 + down_read(&mm->mmap_sem);
35982 + vma = mm->mmap;
35983 + while (vma && (!vma_exec || !vma_fault)) {
35984 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
35985 + vma_exec = vma;
35986 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
35987 + vma_fault = vma;
35988 + vma = vma->vm_next;
35989 + }
35990 + if (vma_exec) {
35991 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
35992 + if (IS_ERR(path_exec))
35993 + path_exec = "<path too long>";
35994 + else {
35995 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
35996 + if (path_exec) {
35997 + *path_exec = 0;
35998 + path_exec = buffer_exec;
35999 + } else
36000 + path_exec = "<path too long>";
36001 + }
36002 + }
36003 + if (vma_fault) {
36004 + start = vma_fault->vm_start;
36005 + end = vma_fault->vm_end;
36006 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
36007 + if (vma_fault->vm_file) {
36008 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
36009 + if (IS_ERR(path_fault))
36010 + path_fault = "<path too long>";
36011 + else {
36012 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
36013 + if (path_fault) {
36014 + *path_fault = 0;
36015 + path_fault = buffer_fault;
36016 + } else
36017 + path_fault = "<path too long>";
36018 + }
36019 + } else
36020 + path_fault = "<anonymous mapping>";
36021 + }
36022 + up_read(&mm->mmap_sem);
36023 + }
36024 + if (tsk->signal->curr_ip)
36025 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
36026 + else
36027 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
36028 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
36029 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
36030 + task_uid(tsk), task_euid(tsk), pc, sp);
36031 + free_page((unsigned long)buffer_exec);
36032 + free_page((unsigned long)buffer_fault);
36033 + pax_report_insns(pc, sp);
36034 + do_coredump(SIGKILL, SIGKILL, regs);
36035 +}
36036 +#endif
36037 +
36038 +#ifdef CONFIG_PAX_REFCOUNT
36039 +void pax_report_refcount_overflow(struct pt_regs *regs)
36040 +{
36041 + if (current->signal->curr_ip)
36042 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36043 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36044 + else
36045 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36046 + current->comm, task_pid_nr(current), current_uid(), current_euid());
36047 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36048 + show_regs(regs);
36049 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36050 +}
36051 +#endif
36052 +
36053 +#ifdef CONFIG_PAX_USERCOPY
36054 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36055 +int object_is_on_stack(const void *obj, unsigned long len)
36056 +{
36057 + const void * const stack = task_stack_page(current);
36058 + const void * const stackend = stack + THREAD_SIZE;
36059 +
36060 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36061 + const void *frame = NULL;
36062 + const void *oldframe;
36063 +#endif
36064 +
36065 + if (obj + len < obj)
36066 + return -1;
36067 +
36068 + if (obj + len <= stack || stackend <= obj)
36069 + return 0;
36070 +
36071 + if (obj < stack || stackend < obj + len)
36072 + return -1;
36073 +
36074 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36075 + oldframe = __builtin_frame_address(1);
36076 + if (oldframe)
36077 + frame = __builtin_frame_address(2);
36078 + /*
36079 + low ----------------------------------------------> high
36080 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
36081 + ^----------------^
36082 + allow copies only within here
36083 + */
36084 + while (stack <= frame && frame < stackend) {
36085 + /* if obj + len extends past the last frame, this
36086 + check won't pass and the next frame will be 0,
36087 + causing us to bail out and correctly report
36088 + the copy as invalid
36089 + */
36090 + if (obj + len <= frame)
36091 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36092 + oldframe = frame;
36093 + frame = *(const void * const *)frame;
36094 + }
36095 + return -1;
36096 +#else
36097 + return 1;
36098 +#endif
36099 +}
36100 +
36101 +
36102 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36103 +{
36104 + if (current->signal->curr_ip)
36105 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36106 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36107 + else
36108 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36109 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36110 + dump_stack();
36111 + gr_handle_kernel_exploit();
36112 + do_group_exit(SIGKILL);
36113 +}
36114 +#endif
36115 +
36116 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36117 +void pax_track_stack(void)
36118 +{
36119 + unsigned long sp = (unsigned long)&sp;
36120 + if (sp < current_thread_info()->lowest_stack &&
36121 + sp > (unsigned long)task_stack_page(current))
36122 + current_thread_info()->lowest_stack = sp;
36123 +}
36124 +EXPORT_SYMBOL(pax_track_stack);
36125 +#endif
36126 +
36127 static int zap_process(struct task_struct *start, int exit_code)
36128 {
36129 struct task_struct *t;
36130 @@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
36131 pipe = file->f_path.dentry->d_inode->i_pipe;
36132
36133 pipe_lock(pipe);
36134 - pipe->readers++;
36135 - pipe->writers--;
36136 + atomic_inc(&pipe->readers);
36137 + atomic_dec(&pipe->writers);
36138
36139 - while ((pipe->readers > 1) && (!signal_pending(current))) {
36140 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36141 wake_up_interruptible_sync(&pipe->wait);
36142 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36143 pipe_wait(pipe);
36144 }
36145
36146 - pipe->readers--;
36147 - pipe->writers++;
36148 + atomic_dec(&pipe->readers);
36149 + atomic_inc(&pipe->writers);
36150 pipe_unlock(pipe);
36151
36152 }
36153 @@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
36154 int retval = 0;
36155 int flag = 0;
36156 int ispipe;
36157 - static atomic_t core_dump_count = ATOMIC_INIT(0);
36158 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36159 struct coredump_params cprm = {
36160 .signr = signr,
36161 .regs = regs,
36162 @@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
36163
36164 audit_core_dumps(signr);
36165
36166 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36167 + gr_handle_brute_attach(current, cprm.mm_flags);
36168 +
36169 binfmt = mm->binfmt;
36170 if (!binfmt || !binfmt->core_dump)
36171 goto fail;
36172 @@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
36173 goto fail_corename;
36174 }
36175
36176 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36177 +
36178 if (ispipe) {
36179 int dump_count;
36180 char **helper_argv;
36181 @@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
36182 }
36183 cprm.limit = RLIM_INFINITY;
36184
36185 - dump_count = atomic_inc_return(&core_dump_count);
36186 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
36187 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36188 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36189 task_tgid_vnr(current), current->comm);
36190 @@ -2192,7 +2500,7 @@ close_fail:
36191 filp_close(cprm.file, NULL);
36192 fail_dropcount:
36193 if (ispipe)
36194 - atomic_dec(&core_dump_count);
36195 + atomic_dec_unchecked(&core_dump_count);
36196 fail_unlock:
36197 kfree(cn.corename);
36198 fail_corename:
36199 diff -urNp linux-3.0.4/fs/ext2/balloc.c linux-3.0.4/fs/ext2/balloc.c
36200 --- linux-3.0.4/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
36201 +++ linux-3.0.4/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
36202 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36203
36204 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36205 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36206 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36207 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36208 sbi->s_resuid != current_fsuid() &&
36209 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36210 return 0;
36211 diff -urNp linux-3.0.4/fs/ext3/balloc.c linux-3.0.4/fs/ext3/balloc.c
36212 --- linux-3.0.4/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
36213 +++ linux-3.0.4/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
36214 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36215
36216 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36217 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36218 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36219 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36220 sbi->s_resuid != current_fsuid() &&
36221 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36222 return 0;
36223 diff -urNp linux-3.0.4/fs/ext4/balloc.c linux-3.0.4/fs/ext4/balloc.c
36224 --- linux-3.0.4/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
36225 +++ linux-3.0.4/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
36226 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
36227 /* Hm, nope. Are (enough) root reserved blocks available? */
36228 if (sbi->s_resuid == current_fsuid() ||
36229 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36230 - capable(CAP_SYS_RESOURCE) ||
36231 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
36232 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
36233 + capable_nolog(CAP_SYS_RESOURCE)) {
36234
36235 if (free_blocks >= (nblocks + dirty_blocks))
36236 return 1;
36237 diff -urNp linux-3.0.4/fs/ext4/ext4.h linux-3.0.4/fs/ext4/ext4.h
36238 --- linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:44:40.000000000 -0400
36239 +++ linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
36240 @@ -1177,19 +1177,19 @@ struct ext4_sb_info {
36241 unsigned long s_mb_last_start;
36242
36243 /* stats for buddy allocator */
36244 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36245 - atomic_t s_bal_success; /* we found long enough chunks */
36246 - atomic_t s_bal_allocated; /* in blocks */
36247 - atomic_t s_bal_ex_scanned; /* total extents scanned */
36248 - atomic_t s_bal_goals; /* goal hits */
36249 - atomic_t s_bal_breaks; /* too long searches */
36250 - atomic_t s_bal_2orders; /* 2^order hits */
36251 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36252 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36253 + atomic_unchecked_t s_bal_allocated; /* in blocks */
36254 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36255 + atomic_unchecked_t s_bal_goals; /* goal hits */
36256 + atomic_unchecked_t s_bal_breaks; /* too long searches */
36257 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36258 spinlock_t s_bal_lock;
36259 unsigned long s_mb_buddies_generated;
36260 unsigned long long s_mb_generation_time;
36261 - atomic_t s_mb_lost_chunks;
36262 - atomic_t s_mb_preallocated;
36263 - atomic_t s_mb_discarded;
36264 + atomic_unchecked_t s_mb_lost_chunks;
36265 + atomic_unchecked_t s_mb_preallocated;
36266 + atomic_unchecked_t s_mb_discarded;
36267 atomic_t s_lock_busy;
36268
36269 /* locality groups */
36270 diff -urNp linux-3.0.4/fs/ext4/mballoc.c linux-3.0.4/fs/ext4/mballoc.c
36271 --- linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:44:40.000000000 -0400
36272 +++ linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
36273 @@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
36274 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36275
36276 if (EXT4_SB(sb)->s_mb_stats)
36277 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36278 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36279
36280 break;
36281 }
36282 @@ -2087,7 +2087,7 @@ repeat:
36283 ac->ac_status = AC_STATUS_CONTINUE;
36284 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36285 cr = 3;
36286 - atomic_inc(&sbi->s_mb_lost_chunks);
36287 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36288 goto repeat;
36289 }
36290 }
36291 @@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
36292 ext4_grpblk_t counters[16];
36293 } sg;
36294
36295 + pax_track_stack();
36296 +
36297 group--;
36298 if (group == 0)
36299 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36300 @@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
36301 if (sbi->s_mb_stats) {
36302 printk(KERN_INFO
36303 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36304 - atomic_read(&sbi->s_bal_allocated),
36305 - atomic_read(&sbi->s_bal_reqs),
36306 - atomic_read(&sbi->s_bal_success));
36307 + atomic_read_unchecked(&sbi->s_bal_allocated),
36308 + atomic_read_unchecked(&sbi->s_bal_reqs),
36309 + atomic_read_unchecked(&sbi->s_bal_success));
36310 printk(KERN_INFO
36311 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36312 "%u 2^N hits, %u breaks, %u lost\n",
36313 - atomic_read(&sbi->s_bal_ex_scanned),
36314 - atomic_read(&sbi->s_bal_goals),
36315 - atomic_read(&sbi->s_bal_2orders),
36316 - atomic_read(&sbi->s_bal_breaks),
36317 - atomic_read(&sbi->s_mb_lost_chunks));
36318 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36319 + atomic_read_unchecked(&sbi->s_bal_goals),
36320 + atomic_read_unchecked(&sbi->s_bal_2orders),
36321 + atomic_read_unchecked(&sbi->s_bal_breaks),
36322 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36323 printk(KERN_INFO
36324 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36325 sbi->s_mb_buddies_generated++,
36326 sbi->s_mb_generation_time);
36327 printk(KERN_INFO
36328 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36329 - atomic_read(&sbi->s_mb_preallocated),
36330 - atomic_read(&sbi->s_mb_discarded));
36331 + atomic_read_unchecked(&sbi->s_mb_preallocated),
36332 + atomic_read_unchecked(&sbi->s_mb_discarded));
36333 }
36334
36335 free_percpu(sbi->s_locality_groups);
36336 @@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
36337 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36338
36339 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36340 - atomic_inc(&sbi->s_bal_reqs);
36341 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36342 + atomic_inc_unchecked(&sbi->s_bal_reqs);
36343 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36344 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36345 - atomic_inc(&sbi->s_bal_success);
36346 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36347 + atomic_inc_unchecked(&sbi->s_bal_success);
36348 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36349 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36350 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36351 - atomic_inc(&sbi->s_bal_goals);
36352 + atomic_inc_unchecked(&sbi->s_bal_goals);
36353 if (ac->ac_found > sbi->s_mb_max_to_scan)
36354 - atomic_inc(&sbi->s_bal_breaks);
36355 + atomic_inc_unchecked(&sbi->s_bal_breaks);
36356 }
36357
36358 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36359 @@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36360 trace_ext4_mb_new_inode_pa(ac, pa);
36361
36362 ext4_mb_use_inode_pa(ac, pa);
36363 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36364 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36365
36366 ei = EXT4_I(ac->ac_inode);
36367 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36368 @@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36369 trace_ext4_mb_new_group_pa(ac, pa);
36370
36371 ext4_mb_use_group_pa(ac, pa);
36372 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36373 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36374
36375 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36376 lg = ac->ac_lg;
36377 @@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36378 * from the bitmap and continue.
36379 */
36380 }
36381 - atomic_add(free, &sbi->s_mb_discarded);
36382 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
36383
36384 return err;
36385 }
36386 @@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36387 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36388 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36389 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36390 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36391 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36392 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36393
36394 return 0;
36395 diff -urNp linux-3.0.4/fs/fcntl.c linux-3.0.4/fs/fcntl.c
36396 --- linux-3.0.4/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
36397 +++ linux-3.0.4/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
36398 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36399 if (err)
36400 return err;
36401
36402 + if (gr_handle_chroot_fowner(pid, type))
36403 + return -ENOENT;
36404 + if (gr_check_protected_task_fowner(pid, type))
36405 + return -EACCES;
36406 +
36407 f_modown(filp, pid, type, force);
36408 return 0;
36409 }
36410 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36411 switch (cmd) {
36412 case F_DUPFD:
36413 case F_DUPFD_CLOEXEC:
36414 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36415 if (arg >= rlimit(RLIMIT_NOFILE))
36416 break;
36417 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36418 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36419 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36420 * is defined as O_NONBLOCK on some platforms and not on others.
36421 */
36422 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36423 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36424 O_RDONLY | O_WRONLY | O_RDWR |
36425 O_CREAT | O_EXCL | O_NOCTTY |
36426 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36427 __O_SYNC | O_DSYNC | FASYNC |
36428 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36429 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36430 - __FMODE_EXEC | O_PATH
36431 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
36432 ));
36433
36434 fasync_cache = kmem_cache_create("fasync_cache",
36435 diff -urNp linux-3.0.4/fs/fifo.c linux-3.0.4/fs/fifo.c
36436 --- linux-3.0.4/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
36437 +++ linux-3.0.4/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
36438 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36439 */
36440 filp->f_op = &read_pipefifo_fops;
36441 pipe->r_counter++;
36442 - if (pipe->readers++ == 0)
36443 + if (atomic_inc_return(&pipe->readers) == 1)
36444 wake_up_partner(inode);
36445
36446 - if (!pipe->writers) {
36447 + if (!atomic_read(&pipe->writers)) {
36448 if ((filp->f_flags & O_NONBLOCK)) {
36449 /* suppress POLLHUP until we have
36450 * seen a writer */
36451 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36452 * errno=ENXIO when there is no process reading the FIFO.
36453 */
36454 ret = -ENXIO;
36455 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36456 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36457 goto err;
36458
36459 filp->f_op = &write_pipefifo_fops;
36460 pipe->w_counter++;
36461 - if (!pipe->writers++)
36462 + if (atomic_inc_return(&pipe->writers) == 1)
36463 wake_up_partner(inode);
36464
36465 - if (!pipe->readers) {
36466 + if (!atomic_read(&pipe->readers)) {
36467 wait_for_partner(inode, &pipe->r_counter);
36468 if (signal_pending(current))
36469 goto err_wr;
36470 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36471 */
36472 filp->f_op = &rdwr_pipefifo_fops;
36473
36474 - pipe->readers++;
36475 - pipe->writers++;
36476 + atomic_inc(&pipe->readers);
36477 + atomic_inc(&pipe->writers);
36478 pipe->r_counter++;
36479 pipe->w_counter++;
36480 - if (pipe->readers == 1 || pipe->writers == 1)
36481 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36482 wake_up_partner(inode);
36483 break;
36484
36485 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36486 return 0;
36487
36488 err_rd:
36489 - if (!--pipe->readers)
36490 + if (atomic_dec_and_test(&pipe->readers))
36491 wake_up_interruptible(&pipe->wait);
36492 ret = -ERESTARTSYS;
36493 goto err;
36494
36495 err_wr:
36496 - if (!--pipe->writers)
36497 + if (atomic_dec_and_test(&pipe->writers))
36498 wake_up_interruptible(&pipe->wait);
36499 ret = -ERESTARTSYS;
36500 goto err;
36501
36502 err:
36503 - if (!pipe->readers && !pipe->writers)
36504 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36505 free_pipe_info(inode);
36506
36507 err_nocleanup:
36508 diff -urNp linux-3.0.4/fs/file.c linux-3.0.4/fs/file.c
36509 --- linux-3.0.4/fs/file.c 2011-07-21 22:17:23.000000000 -0400
36510 +++ linux-3.0.4/fs/file.c 2011-08-23 21:48:14.000000000 -0400
36511 @@ -15,6 +15,7 @@
36512 #include <linux/slab.h>
36513 #include <linux/vmalloc.h>
36514 #include <linux/file.h>
36515 +#include <linux/security.h>
36516 #include <linux/fdtable.h>
36517 #include <linux/bitops.h>
36518 #include <linux/interrupt.h>
36519 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36520 * N.B. For clone tasks sharing a files structure, this test
36521 * will limit the total number of files that can be opened.
36522 */
36523 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36524 if (nr >= rlimit(RLIMIT_NOFILE))
36525 return -EMFILE;
36526
36527 diff -urNp linux-3.0.4/fs/filesystems.c linux-3.0.4/fs/filesystems.c
36528 --- linux-3.0.4/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
36529 +++ linux-3.0.4/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
36530 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36531 int len = dot ? dot - name : strlen(name);
36532
36533 fs = __get_fs_type(name, len);
36534 +
36535 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
36536 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36537 +#else
36538 if (!fs && (request_module("%.*s", len, name) == 0))
36539 +#endif
36540 fs = __get_fs_type(name, len);
36541
36542 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36543 diff -urNp linux-3.0.4/fs/fscache/cookie.c linux-3.0.4/fs/fscache/cookie.c
36544 --- linux-3.0.4/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
36545 +++ linux-3.0.4/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
36546 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36547 parent ? (char *) parent->def->name : "<no-parent>",
36548 def->name, netfs_data);
36549
36550 - fscache_stat(&fscache_n_acquires);
36551 + fscache_stat_unchecked(&fscache_n_acquires);
36552
36553 /* if there's no parent cookie, then we don't create one here either */
36554 if (!parent) {
36555 - fscache_stat(&fscache_n_acquires_null);
36556 + fscache_stat_unchecked(&fscache_n_acquires_null);
36557 _leave(" [no parent]");
36558 return NULL;
36559 }
36560 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36561 /* allocate and initialise a cookie */
36562 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36563 if (!cookie) {
36564 - fscache_stat(&fscache_n_acquires_oom);
36565 + fscache_stat_unchecked(&fscache_n_acquires_oom);
36566 _leave(" [ENOMEM]");
36567 return NULL;
36568 }
36569 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36570
36571 switch (cookie->def->type) {
36572 case FSCACHE_COOKIE_TYPE_INDEX:
36573 - fscache_stat(&fscache_n_cookie_index);
36574 + fscache_stat_unchecked(&fscache_n_cookie_index);
36575 break;
36576 case FSCACHE_COOKIE_TYPE_DATAFILE:
36577 - fscache_stat(&fscache_n_cookie_data);
36578 + fscache_stat_unchecked(&fscache_n_cookie_data);
36579 break;
36580 default:
36581 - fscache_stat(&fscache_n_cookie_special);
36582 + fscache_stat_unchecked(&fscache_n_cookie_special);
36583 break;
36584 }
36585
36586 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36587 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36588 atomic_dec(&parent->n_children);
36589 __fscache_cookie_put(cookie);
36590 - fscache_stat(&fscache_n_acquires_nobufs);
36591 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36592 _leave(" = NULL");
36593 return NULL;
36594 }
36595 }
36596
36597 - fscache_stat(&fscache_n_acquires_ok);
36598 + fscache_stat_unchecked(&fscache_n_acquires_ok);
36599 _leave(" = %p", cookie);
36600 return cookie;
36601 }
36602 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36603 cache = fscache_select_cache_for_object(cookie->parent);
36604 if (!cache) {
36605 up_read(&fscache_addremove_sem);
36606 - fscache_stat(&fscache_n_acquires_no_cache);
36607 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36608 _leave(" = -ENOMEDIUM [no cache]");
36609 return -ENOMEDIUM;
36610 }
36611 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36612 object = cache->ops->alloc_object(cache, cookie);
36613 fscache_stat_d(&fscache_n_cop_alloc_object);
36614 if (IS_ERR(object)) {
36615 - fscache_stat(&fscache_n_object_no_alloc);
36616 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
36617 ret = PTR_ERR(object);
36618 goto error;
36619 }
36620
36621 - fscache_stat(&fscache_n_object_alloc);
36622 + fscache_stat_unchecked(&fscache_n_object_alloc);
36623
36624 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36625
36626 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36627 struct fscache_object *object;
36628 struct hlist_node *_p;
36629
36630 - fscache_stat(&fscache_n_updates);
36631 + fscache_stat_unchecked(&fscache_n_updates);
36632
36633 if (!cookie) {
36634 - fscache_stat(&fscache_n_updates_null);
36635 + fscache_stat_unchecked(&fscache_n_updates_null);
36636 _leave(" [no cookie]");
36637 return;
36638 }
36639 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36640 struct fscache_object *object;
36641 unsigned long event;
36642
36643 - fscache_stat(&fscache_n_relinquishes);
36644 + fscache_stat_unchecked(&fscache_n_relinquishes);
36645 if (retire)
36646 - fscache_stat(&fscache_n_relinquishes_retire);
36647 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36648
36649 if (!cookie) {
36650 - fscache_stat(&fscache_n_relinquishes_null);
36651 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
36652 _leave(" [no cookie]");
36653 return;
36654 }
36655 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36656
36657 /* wait for the cookie to finish being instantiated (or to fail) */
36658 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36659 - fscache_stat(&fscache_n_relinquishes_waitcrt);
36660 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36661 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36662 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36663 }
36664 diff -urNp linux-3.0.4/fs/fscache/internal.h linux-3.0.4/fs/fscache/internal.h
36665 --- linux-3.0.4/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
36666 +++ linux-3.0.4/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
36667 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36668 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36669 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36670
36671 -extern atomic_t fscache_n_op_pend;
36672 -extern atomic_t fscache_n_op_run;
36673 -extern atomic_t fscache_n_op_enqueue;
36674 -extern atomic_t fscache_n_op_deferred_release;
36675 -extern atomic_t fscache_n_op_release;
36676 -extern atomic_t fscache_n_op_gc;
36677 -extern atomic_t fscache_n_op_cancelled;
36678 -extern atomic_t fscache_n_op_rejected;
36679 -
36680 -extern atomic_t fscache_n_attr_changed;
36681 -extern atomic_t fscache_n_attr_changed_ok;
36682 -extern atomic_t fscache_n_attr_changed_nobufs;
36683 -extern atomic_t fscache_n_attr_changed_nomem;
36684 -extern atomic_t fscache_n_attr_changed_calls;
36685 -
36686 -extern atomic_t fscache_n_allocs;
36687 -extern atomic_t fscache_n_allocs_ok;
36688 -extern atomic_t fscache_n_allocs_wait;
36689 -extern atomic_t fscache_n_allocs_nobufs;
36690 -extern atomic_t fscache_n_allocs_intr;
36691 -extern atomic_t fscache_n_allocs_object_dead;
36692 -extern atomic_t fscache_n_alloc_ops;
36693 -extern atomic_t fscache_n_alloc_op_waits;
36694 -
36695 -extern atomic_t fscache_n_retrievals;
36696 -extern atomic_t fscache_n_retrievals_ok;
36697 -extern atomic_t fscache_n_retrievals_wait;
36698 -extern atomic_t fscache_n_retrievals_nodata;
36699 -extern atomic_t fscache_n_retrievals_nobufs;
36700 -extern atomic_t fscache_n_retrievals_intr;
36701 -extern atomic_t fscache_n_retrievals_nomem;
36702 -extern atomic_t fscache_n_retrievals_object_dead;
36703 -extern atomic_t fscache_n_retrieval_ops;
36704 -extern atomic_t fscache_n_retrieval_op_waits;
36705 -
36706 -extern atomic_t fscache_n_stores;
36707 -extern atomic_t fscache_n_stores_ok;
36708 -extern atomic_t fscache_n_stores_again;
36709 -extern atomic_t fscache_n_stores_nobufs;
36710 -extern atomic_t fscache_n_stores_oom;
36711 -extern atomic_t fscache_n_store_ops;
36712 -extern atomic_t fscache_n_store_calls;
36713 -extern atomic_t fscache_n_store_pages;
36714 -extern atomic_t fscache_n_store_radix_deletes;
36715 -extern atomic_t fscache_n_store_pages_over_limit;
36716 -
36717 -extern atomic_t fscache_n_store_vmscan_not_storing;
36718 -extern atomic_t fscache_n_store_vmscan_gone;
36719 -extern atomic_t fscache_n_store_vmscan_busy;
36720 -extern atomic_t fscache_n_store_vmscan_cancelled;
36721 -
36722 -extern atomic_t fscache_n_marks;
36723 -extern atomic_t fscache_n_uncaches;
36724 -
36725 -extern atomic_t fscache_n_acquires;
36726 -extern atomic_t fscache_n_acquires_null;
36727 -extern atomic_t fscache_n_acquires_no_cache;
36728 -extern atomic_t fscache_n_acquires_ok;
36729 -extern atomic_t fscache_n_acquires_nobufs;
36730 -extern atomic_t fscache_n_acquires_oom;
36731 -
36732 -extern atomic_t fscache_n_updates;
36733 -extern atomic_t fscache_n_updates_null;
36734 -extern atomic_t fscache_n_updates_run;
36735 -
36736 -extern atomic_t fscache_n_relinquishes;
36737 -extern atomic_t fscache_n_relinquishes_null;
36738 -extern atomic_t fscache_n_relinquishes_waitcrt;
36739 -extern atomic_t fscache_n_relinquishes_retire;
36740 -
36741 -extern atomic_t fscache_n_cookie_index;
36742 -extern atomic_t fscache_n_cookie_data;
36743 -extern atomic_t fscache_n_cookie_special;
36744 -
36745 -extern atomic_t fscache_n_object_alloc;
36746 -extern atomic_t fscache_n_object_no_alloc;
36747 -extern atomic_t fscache_n_object_lookups;
36748 -extern atomic_t fscache_n_object_lookups_negative;
36749 -extern atomic_t fscache_n_object_lookups_positive;
36750 -extern atomic_t fscache_n_object_lookups_timed_out;
36751 -extern atomic_t fscache_n_object_created;
36752 -extern atomic_t fscache_n_object_avail;
36753 -extern atomic_t fscache_n_object_dead;
36754 -
36755 -extern atomic_t fscache_n_checkaux_none;
36756 -extern atomic_t fscache_n_checkaux_okay;
36757 -extern atomic_t fscache_n_checkaux_update;
36758 -extern atomic_t fscache_n_checkaux_obsolete;
36759 +extern atomic_unchecked_t fscache_n_op_pend;
36760 +extern atomic_unchecked_t fscache_n_op_run;
36761 +extern atomic_unchecked_t fscache_n_op_enqueue;
36762 +extern atomic_unchecked_t fscache_n_op_deferred_release;
36763 +extern atomic_unchecked_t fscache_n_op_release;
36764 +extern atomic_unchecked_t fscache_n_op_gc;
36765 +extern atomic_unchecked_t fscache_n_op_cancelled;
36766 +extern atomic_unchecked_t fscache_n_op_rejected;
36767 +
36768 +extern atomic_unchecked_t fscache_n_attr_changed;
36769 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
36770 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36771 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36772 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
36773 +
36774 +extern atomic_unchecked_t fscache_n_allocs;
36775 +extern atomic_unchecked_t fscache_n_allocs_ok;
36776 +extern atomic_unchecked_t fscache_n_allocs_wait;
36777 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
36778 +extern atomic_unchecked_t fscache_n_allocs_intr;
36779 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
36780 +extern atomic_unchecked_t fscache_n_alloc_ops;
36781 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
36782 +
36783 +extern atomic_unchecked_t fscache_n_retrievals;
36784 +extern atomic_unchecked_t fscache_n_retrievals_ok;
36785 +extern atomic_unchecked_t fscache_n_retrievals_wait;
36786 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
36787 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36788 +extern atomic_unchecked_t fscache_n_retrievals_intr;
36789 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
36790 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36791 +extern atomic_unchecked_t fscache_n_retrieval_ops;
36792 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36793 +
36794 +extern atomic_unchecked_t fscache_n_stores;
36795 +extern atomic_unchecked_t fscache_n_stores_ok;
36796 +extern atomic_unchecked_t fscache_n_stores_again;
36797 +extern atomic_unchecked_t fscache_n_stores_nobufs;
36798 +extern atomic_unchecked_t fscache_n_stores_oom;
36799 +extern atomic_unchecked_t fscache_n_store_ops;
36800 +extern atomic_unchecked_t fscache_n_store_calls;
36801 +extern atomic_unchecked_t fscache_n_store_pages;
36802 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
36803 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36804 +
36805 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36806 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36807 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36808 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36809 +
36810 +extern atomic_unchecked_t fscache_n_marks;
36811 +extern atomic_unchecked_t fscache_n_uncaches;
36812 +
36813 +extern atomic_unchecked_t fscache_n_acquires;
36814 +extern atomic_unchecked_t fscache_n_acquires_null;
36815 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
36816 +extern atomic_unchecked_t fscache_n_acquires_ok;
36817 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
36818 +extern atomic_unchecked_t fscache_n_acquires_oom;
36819 +
36820 +extern atomic_unchecked_t fscache_n_updates;
36821 +extern atomic_unchecked_t fscache_n_updates_null;
36822 +extern atomic_unchecked_t fscache_n_updates_run;
36823 +
36824 +extern atomic_unchecked_t fscache_n_relinquishes;
36825 +extern atomic_unchecked_t fscache_n_relinquishes_null;
36826 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36827 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
36828 +
36829 +extern atomic_unchecked_t fscache_n_cookie_index;
36830 +extern atomic_unchecked_t fscache_n_cookie_data;
36831 +extern atomic_unchecked_t fscache_n_cookie_special;
36832 +
36833 +extern atomic_unchecked_t fscache_n_object_alloc;
36834 +extern atomic_unchecked_t fscache_n_object_no_alloc;
36835 +extern atomic_unchecked_t fscache_n_object_lookups;
36836 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
36837 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
36838 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36839 +extern atomic_unchecked_t fscache_n_object_created;
36840 +extern atomic_unchecked_t fscache_n_object_avail;
36841 +extern atomic_unchecked_t fscache_n_object_dead;
36842 +
36843 +extern atomic_unchecked_t fscache_n_checkaux_none;
36844 +extern atomic_unchecked_t fscache_n_checkaux_okay;
36845 +extern atomic_unchecked_t fscache_n_checkaux_update;
36846 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36847
36848 extern atomic_t fscache_n_cop_alloc_object;
36849 extern atomic_t fscache_n_cop_lookup_object;
36850 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36851 atomic_inc(stat);
36852 }
36853
36854 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36855 +{
36856 + atomic_inc_unchecked(stat);
36857 +}
36858 +
36859 static inline void fscache_stat_d(atomic_t *stat)
36860 {
36861 atomic_dec(stat);
36862 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
36863
36864 #define __fscache_stat(stat) (NULL)
36865 #define fscache_stat(stat) do {} while (0)
36866 +#define fscache_stat_unchecked(stat) do {} while (0)
36867 #define fscache_stat_d(stat) do {} while (0)
36868 #endif
36869
36870 diff -urNp linux-3.0.4/fs/fscache/object.c linux-3.0.4/fs/fscache/object.c
36871 --- linux-3.0.4/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
36872 +++ linux-3.0.4/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
36873 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
36874 /* update the object metadata on disk */
36875 case FSCACHE_OBJECT_UPDATING:
36876 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36877 - fscache_stat(&fscache_n_updates_run);
36878 + fscache_stat_unchecked(&fscache_n_updates_run);
36879 fscache_stat(&fscache_n_cop_update_object);
36880 object->cache->ops->update_object(object);
36881 fscache_stat_d(&fscache_n_cop_update_object);
36882 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
36883 spin_lock(&object->lock);
36884 object->state = FSCACHE_OBJECT_DEAD;
36885 spin_unlock(&object->lock);
36886 - fscache_stat(&fscache_n_object_dead);
36887 + fscache_stat_unchecked(&fscache_n_object_dead);
36888 goto terminal_transit;
36889
36890 /* handle the parent cache of this object being withdrawn from
36891 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
36892 spin_lock(&object->lock);
36893 object->state = FSCACHE_OBJECT_DEAD;
36894 spin_unlock(&object->lock);
36895 - fscache_stat(&fscache_n_object_dead);
36896 + fscache_stat_unchecked(&fscache_n_object_dead);
36897 goto terminal_transit;
36898
36899 /* complain about the object being woken up once it is
36900 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
36901 parent->cookie->def->name, cookie->def->name,
36902 object->cache->tag->name);
36903
36904 - fscache_stat(&fscache_n_object_lookups);
36905 + fscache_stat_unchecked(&fscache_n_object_lookups);
36906 fscache_stat(&fscache_n_cop_lookup_object);
36907 ret = object->cache->ops->lookup_object(object);
36908 fscache_stat_d(&fscache_n_cop_lookup_object);
36909 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
36910 if (ret == -ETIMEDOUT) {
36911 /* probably stuck behind another object, so move this one to
36912 * the back of the queue */
36913 - fscache_stat(&fscache_n_object_lookups_timed_out);
36914 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
36915 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36916 }
36917
36918 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
36919
36920 spin_lock(&object->lock);
36921 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36922 - fscache_stat(&fscache_n_object_lookups_negative);
36923 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
36924
36925 /* transit here to allow write requests to begin stacking up
36926 * and read requests to begin returning ENODATA */
36927 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
36928 * result, in which case there may be data available */
36929 spin_lock(&object->lock);
36930 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36931 - fscache_stat(&fscache_n_object_lookups_positive);
36932 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
36933
36934 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
36935
36936 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
36937 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36938 } else {
36939 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
36940 - fscache_stat(&fscache_n_object_created);
36941 + fscache_stat_unchecked(&fscache_n_object_created);
36942
36943 object->state = FSCACHE_OBJECT_AVAILABLE;
36944 spin_unlock(&object->lock);
36945 @@ -602,7 +602,7 @@ static void fscache_object_available(str
36946 fscache_enqueue_dependents(object);
36947
36948 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
36949 - fscache_stat(&fscache_n_object_avail);
36950 + fscache_stat_unchecked(&fscache_n_object_avail);
36951
36952 _leave("");
36953 }
36954 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
36955 enum fscache_checkaux result;
36956
36957 if (!object->cookie->def->check_aux) {
36958 - fscache_stat(&fscache_n_checkaux_none);
36959 + fscache_stat_unchecked(&fscache_n_checkaux_none);
36960 return FSCACHE_CHECKAUX_OKAY;
36961 }
36962
36963 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
36964 switch (result) {
36965 /* entry okay as is */
36966 case FSCACHE_CHECKAUX_OKAY:
36967 - fscache_stat(&fscache_n_checkaux_okay);
36968 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
36969 break;
36970
36971 /* entry requires update */
36972 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
36973 - fscache_stat(&fscache_n_checkaux_update);
36974 + fscache_stat_unchecked(&fscache_n_checkaux_update);
36975 break;
36976
36977 /* entry requires deletion */
36978 case FSCACHE_CHECKAUX_OBSOLETE:
36979 - fscache_stat(&fscache_n_checkaux_obsolete);
36980 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
36981 break;
36982
36983 default:
36984 diff -urNp linux-3.0.4/fs/fscache/operation.c linux-3.0.4/fs/fscache/operation.c
36985 --- linux-3.0.4/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
36986 +++ linux-3.0.4/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
36987 @@ -17,7 +17,7 @@
36988 #include <linux/slab.h>
36989 #include "internal.h"
36990
36991 -atomic_t fscache_op_debug_id;
36992 +atomic_unchecked_t fscache_op_debug_id;
36993 EXPORT_SYMBOL(fscache_op_debug_id);
36994
36995 /**
36996 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
36997 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
36998 ASSERTCMP(atomic_read(&op->usage), >, 0);
36999
37000 - fscache_stat(&fscache_n_op_enqueue);
37001 + fscache_stat_unchecked(&fscache_n_op_enqueue);
37002 switch (op->flags & FSCACHE_OP_TYPE) {
37003 case FSCACHE_OP_ASYNC:
37004 _debug("queue async");
37005 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
37006 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
37007 if (op->processor)
37008 fscache_enqueue_operation(op);
37009 - fscache_stat(&fscache_n_op_run);
37010 + fscache_stat_unchecked(&fscache_n_op_run);
37011 }
37012
37013 /*
37014 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
37015 if (object->n_ops > 1) {
37016 atomic_inc(&op->usage);
37017 list_add_tail(&op->pend_link, &object->pending_ops);
37018 - fscache_stat(&fscache_n_op_pend);
37019 + fscache_stat_unchecked(&fscache_n_op_pend);
37020 } else if (!list_empty(&object->pending_ops)) {
37021 atomic_inc(&op->usage);
37022 list_add_tail(&op->pend_link, &object->pending_ops);
37023 - fscache_stat(&fscache_n_op_pend);
37024 + fscache_stat_unchecked(&fscache_n_op_pend);
37025 fscache_start_operations(object);
37026 } else {
37027 ASSERTCMP(object->n_in_progress, ==, 0);
37028 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
37029 object->n_exclusive++; /* reads and writes must wait */
37030 atomic_inc(&op->usage);
37031 list_add_tail(&op->pend_link, &object->pending_ops);
37032 - fscache_stat(&fscache_n_op_pend);
37033 + fscache_stat_unchecked(&fscache_n_op_pend);
37034 ret = 0;
37035 } else {
37036 /* not allowed to submit ops in any other state */
37037 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
37038 if (object->n_exclusive > 0) {
37039 atomic_inc(&op->usage);
37040 list_add_tail(&op->pend_link, &object->pending_ops);
37041 - fscache_stat(&fscache_n_op_pend);
37042 + fscache_stat_unchecked(&fscache_n_op_pend);
37043 } else if (!list_empty(&object->pending_ops)) {
37044 atomic_inc(&op->usage);
37045 list_add_tail(&op->pend_link, &object->pending_ops);
37046 - fscache_stat(&fscache_n_op_pend);
37047 + fscache_stat_unchecked(&fscache_n_op_pend);
37048 fscache_start_operations(object);
37049 } else {
37050 ASSERTCMP(object->n_exclusive, ==, 0);
37051 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
37052 object->n_ops++;
37053 atomic_inc(&op->usage);
37054 list_add_tail(&op->pend_link, &object->pending_ops);
37055 - fscache_stat(&fscache_n_op_pend);
37056 + fscache_stat_unchecked(&fscache_n_op_pend);
37057 ret = 0;
37058 } else if (object->state == FSCACHE_OBJECT_DYING ||
37059 object->state == FSCACHE_OBJECT_LC_DYING ||
37060 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37061 - fscache_stat(&fscache_n_op_rejected);
37062 + fscache_stat_unchecked(&fscache_n_op_rejected);
37063 ret = -ENOBUFS;
37064 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37065 fscache_report_unexpected_submission(object, op, ostate);
37066 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
37067
37068 ret = -EBUSY;
37069 if (!list_empty(&op->pend_link)) {
37070 - fscache_stat(&fscache_n_op_cancelled);
37071 + fscache_stat_unchecked(&fscache_n_op_cancelled);
37072 list_del_init(&op->pend_link);
37073 object->n_ops--;
37074 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37075 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
37076 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37077 BUG();
37078
37079 - fscache_stat(&fscache_n_op_release);
37080 + fscache_stat_unchecked(&fscache_n_op_release);
37081
37082 if (op->release) {
37083 op->release(op);
37084 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
37085 * lock, and defer it otherwise */
37086 if (!spin_trylock(&object->lock)) {
37087 _debug("defer put");
37088 - fscache_stat(&fscache_n_op_deferred_release);
37089 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
37090
37091 cache = object->cache;
37092 spin_lock(&cache->op_gc_list_lock);
37093 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
37094
37095 _debug("GC DEFERRED REL OBJ%x OP%x",
37096 object->debug_id, op->debug_id);
37097 - fscache_stat(&fscache_n_op_gc);
37098 + fscache_stat_unchecked(&fscache_n_op_gc);
37099
37100 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37101
37102 diff -urNp linux-3.0.4/fs/fscache/page.c linux-3.0.4/fs/fscache/page.c
37103 --- linux-3.0.4/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
37104 +++ linux-3.0.4/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
37105 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37106 val = radix_tree_lookup(&cookie->stores, page->index);
37107 if (!val) {
37108 rcu_read_unlock();
37109 - fscache_stat(&fscache_n_store_vmscan_not_storing);
37110 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37111 __fscache_uncache_page(cookie, page);
37112 return true;
37113 }
37114 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37115 spin_unlock(&cookie->stores_lock);
37116
37117 if (xpage) {
37118 - fscache_stat(&fscache_n_store_vmscan_cancelled);
37119 - fscache_stat(&fscache_n_store_radix_deletes);
37120 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37121 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37122 ASSERTCMP(xpage, ==, page);
37123 } else {
37124 - fscache_stat(&fscache_n_store_vmscan_gone);
37125 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37126 }
37127
37128 wake_up_bit(&cookie->flags, 0);
37129 @@ -107,7 +107,7 @@ page_busy:
37130 /* we might want to wait here, but that could deadlock the allocator as
37131 * the work threads writing to the cache may all end up sleeping
37132 * on memory allocation */
37133 - fscache_stat(&fscache_n_store_vmscan_busy);
37134 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37135 return false;
37136 }
37137 EXPORT_SYMBOL(__fscache_maybe_release_page);
37138 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37139 FSCACHE_COOKIE_STORING_TAG);
37140 if (!radix_tree_tag_get(&cookie->stores, page->index,
37141 FSCACHE_COOKIE_PENDING_TAG)) {
37142 - fscache_stat(&fscache_n_store_radix_deletes);
37143 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37144 xpage = radix_tree_delete(&cookie->stores, page->index);
37145 }
37146 spin_unlock(&cookie->stores_lock);
37147 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37148
37149 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37150
37151 - fscache_stat(&fscache_n_attr_changed_calls);
37152 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37153
37154 if (fscache_object_is_active(object)) {
37155 fscache_stat(&fscache_n_cop_attr_changed);
37156 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
37157
37158 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37159
37160 - fscache_stat(&fscache_n_attr_changed);
37161 + fscache_stat_unchecked(&fscache_n_attr_changed);
37162
37163 op = kzalloc(sizeof(*op), GFP_KERNEL);
37164 if (!op) {
37165 - fscache_stat(&fscache_n_attr_changed_nomem);
37166 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37167 _leave(" = -ENOMEM");
37168 return -ENOMEM;
37169 }
37170 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
37171 if (fscache_submit_exclusive_op(object, op) < 0)
37172 goto nobufs;
37173 spin_unlock(&cookie->lock);
37174 - fscache_stat(&fscache_n_attr_changed_ok);
37175 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37176 fscache_put_operation(op);
37177 _leave(" = 0");
37178 return 0;
37179 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
37180 nobufs:
37181 spin_unlock(&cookie->lock);
37182 kfree(op);
37183 - fscache_stat(&fscache_n_attr_changed_nobufs);
37184 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37185 _leave(" = %d", -ENOBUFS);
37186 return -ENOBUFS;
37187 }
37188 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
37189 /* allocate a retrieval operation and attempt to submit it */
37190 op = kzalloc(sizeof(*op), GFP_NOIO);
37191 if (!op) {
37192 - fscache_stat(&fscache_n_retrievals_nomem);
37193 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37194 return NULL;
37195 }
37196
37197 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
37198 return 0;
37199 }
37200
37201 - fscache_stat(&fscache_n_retrievals_wait);
37202 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
37203
37204 jif = jiffies;
37205 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37206 fscache_wait_bit_interruptible,
37207 TASK_INTERRUPTIBLE) != 0) {
37208 - fscache_stat(&fscache_n_retrievals_intr);
37209 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37210 _leave(" = -ERESTARTSYS");
37211 return -ERESTARTSYS;
37212 }
37213 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
37214 */
37215 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37216 struct fscache_retrieval *op,
37217 - atomic_t *stat_op_waits,
37218 - atomic_t *stat_object_dead)
37219 + atomic_unchecked_t *stat_op_waits,
37220 + atomic_unchecked_t *stat_object_dead)
37221 {
37222 int ret;
37223
37224 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
37225 goto check_if_dead;
37226
37227 _debug(">>> WT");
37228 - fscache_stat(stat_op_waits);
37229 + fscache_stat_unchecked(stat_op_waits);
37230 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37231 fscache_wait_bit_interruptible,
37232 TASK_INTERRUPTIBLE) < 0) {
37233 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
37234
37235 check_if_dead:
37236 if (unlikely(fscache_object_is_dead(object))) {
37237 - fscache_stat(stat_object_dead);
37238 + fscache_stat_unchecked(stat_object_dead);
37239 return -ENOBUFS;
37240 }
37241 return 0;
37242 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
37243
37244 _enter("%p,%p,,,", cookie, page);
37245
37246 - fscache_stat(&fscache_n_retrievals);
37247 + fscache_stat_unchecked(&fscache_n_retrievals);
37248
37249 if (hlist_empty(&cookie->backing_objects))
37250 goto nobufs;
37251 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
37252 goto nobufs_unlock;
37253 spin_unlock(&cookie->lock);
37254
37255 - fscache_stat(&fscache_n_retrieval_ops);
37256 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37257
37258 /* pin the netfs read context in case we need to do the actual netfs
37259 * read because we've encountered a cache read failure */
37260 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
37261
37262 error:
37263 if (ret == -ENOMEM)
37264 - fscache_stat(&fscache_n_retrievals_nomem);
37265 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37266 else if (ret == -ERESTARTSYS)
37267 - fscache_stat(&fscache_n_retrievals_intr);
37268 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37269 else if (ret == -ENODATA)
37270 - fscache_stat(&fscache_n_retrievals_nodata);
37271 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37272 else if (ret < 0)
37273 - fscache_stat(&fscache_n_retrievals_nobufs);
37274 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37275 else
37276 - fscache_stat(&fscache_n_retrievals_ok);
37277 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37278
37279 fscache_put_retrieval(op);
37280 _leave(" = %d", ret);
37281 @@ -429,7 +429,7 @@ nobufs_unlock:
37282 spin_unlock(&cookie->lock);
37283 kfree(op);
37284 nobufs:
37285 - fscache_stat(&fscache_n_retrievals_nobufs);
37286 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37287 _leave(" = -ENOBUFS");
37288 return -ENOBUFS;
37289 }
37290 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
37291
37292 _enter("%p,,%d,,,", cookie, *nr_pages);
37293
37294 - fscache_stat(&fscache_n_retrievals);
37295 + fscache_stat_unchecked(&fscache_n_retrievals);
37296
37297 if (hlist_empty(&cookie->backing_objects))
37298 goto nobufs;
37299 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
37300 goto nobufs_unlock;
37301 spin_unlock(&cookie->lock);
37302
37303 - fscache_stat(&fscache_n_retrieval_ops);
37304 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37305
37306 /* pin the netfs read context in case we need to do the actual netfs
37307 * read because we've encountered a cache read failure */
37308 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
37309
37310 error:
37311 if (ret == -ENOMEM)
37312 - fscache_stat(&fscache_n_retrievals_nomem);
37313 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37314 else if (ret == -ERESTARTSYS)
37315 - fscache_stat(&fscache_n_retrievals_intr);
37316 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37317 else if (ret == -ENODATA)
37318 - fscache_stat(&fscache_n_retrievals_nodata);
37319 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37320 else if (ret < 0)
37321 - fscache_stat(&fscache_n_retrievals_nobufs);
37322 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37323 else
37324 - fscache_stat(&fscache_n_retrievals_ok);
37325 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37326
37327 fscache_put_retrieval(op);
37328 _leave(" = %d", ret);
37329 @@ -545,7 +545,7 @@ nobufs_unlock:
37330 spin_unlock(&cookie->lock);
37331 kfree(op);
37332 nobufs:
37333 - fscache_stat(&fscache_n_retrievals_nobufs);
37334 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37335 _leave(" = -ENOBUFS");
37336 return -ENOBUFS;
37337 }
37338 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
37339
37340 _enter("%p,%p,,,", cookie, page);
37341
37342 - fscache_stat(&fscache_n_allocs);
37343 + fscache_stat_unchecked(&fscache_n_allocs);
37344
37345 if (hlist_empty(&cookie->backing_objects))
37346 goto nobufs;
37347 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
37348 goto nobufs_unlock;
37349 spin_unlock(&cookie->lock);
37350
37351 - fscache_stat(&fscache_n_alloc_ops);
37352 + fscache_stat_unchecked(&fscache_n_alloc_ops);
37353
37354 ret = fscache_wait_for_retrieval_activation(
37355 object, op,
37356 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
37357
37358 error:
37359 if (ret == -ERESTARTSYS)
37360 - fscache_stat(&fscache_n_allocs_intr);
37361 + fscache_stat_unchecked(&fscache_n_allocs_intr);
37362 else if (ret < 0)
37363 - fscache_stat(&fscache_n_allocs_nobufs);
37364 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37365 else
37366 - fscache_stat(&fscache_n_allocs_ok);
37367 + fscache_stat_unchecked(&fscache_n_allocs_ok);
37368
37369 fscache_put_retrieval(op);
37370 _leave(" = %d", ret);
37371 @@ -625,7 +625,7 @@ nobufs_unlock:
37372 spin_unlock(&cookie->lock);
37373 kfree(op);
37374 nobufs:
37375 - fscache_stat(&fscache_n_allocs_nobufs);
37376 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37377 _leave(" = -ENOBUFS");
37378 return -ENOBUFS;
37379 }
37380 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
37381
37382 spin_lock(&cookie->stores_lock);
37383
37384 - fscache_stat(&fscache_n_store_calls);
37385 + fscache_stat_unchecked(&fscache_n_store_calls);
37386
37387 /* find a page to store */
37388 page = NULL;
37389 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
37390 page = results[0];
37391 _debug("gang %d [%lx]", n, page->index);
37392 if (page->index > op->store_limit) {
37393 - fscache_stat(&fscache_n_store_pages_over_limit);
37394 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37395 goto superseded;
37396 }
37397
37398 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
37399 spin_unlock(&cookie->stores_lock);
37400 spin_unlock(&object->lock);
37401
37402 - fscache_stat(&fscache_n_store_pages);
37403 + fscache_stat_unchecked(&fscache_n_store_pages);
37404 fscache_stat(&fscache_n_cop_write_page);
37405 ret = object->cache->ops->write_page(op, page);
37406 fscache_stat_d(&fscache_n_cop_write_page);
37407 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
37408 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37409 ASSERT(PageFsCache(page));
37410
37411 - fscache_stat(&fscache_n_stores);
37412 + fscache_stat_unchecked(&fscache_n_stores);
37413
37414 op = kzalloc(sizeof(*op), GFP_NOIO);
37415 if (!op)
37416 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
37417 spin_unlock(&cookie->stores_lock);
37418 spin_unlock(&object->lock);
37419
37420 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37421 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37422 op->store_limit = object->store_limit;
37423
37424 if (fscache_submit_op(object, &op->op) < 0)
37425 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
37426
37427 spin_unlock(&cookie->lock);
37428 radix_tree_preload_end();
37429 - fscache_stat(&fscache_n_store_ops);
37430 - fscache_stat(&fscache_n_stores_ok);
37431 + fscache_stat_unchecked(&fscache_n_store_ops);
37432 + fscache_stat_unchecked(&fscache_n_stores_ok);
37433
37434 /* the work queue now carries its own ref on the object */
37435 fscache_put_operation(&op->op);
37436 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
37437 return 0;
37438
37439 already_queued:
37440 - fscache_stat(&fscache_n_stores_again);
37441 + fscache_stat_unchecked(&fscache_n_stores_again);
37442 already_pending:
37443 spin_unlock(&cookie->stores_lock);
37444 spin_unlock(&object->lock);
37445 spin_unlock(&cookie->lock);
37446 radix_tree_preload_end();
37447 kfree(op);
37448 - fscache_stat(&fscache_n_stores_ok);
37449 + fscache_stat_unchecked(&fscache_n_stores_ok);
37450 _leave(" = 0");
37451 return 0;
37452
37453 @@ -851,14 +851,14 @@ nobufs:
37454 spin_unlock(&cookie->lock);
37455 radix_tree_preload_end();
37456 kfree(op);
37457 - fscache_stat(&fscache_n_stores_nobufs);
37458 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
37459 _leave(" = -ENOBUFS");
37460 return -ENOBUFS;
37461
37462 nomem_free:
37463 kfree(op);
37464 nomem:
37465 - fscache_stat(&fscache_n_stores_oom);
37466 + fscache_stat_unchecked(&fscache_n_stores_oom);
37467 _leave(" = -ENOMEM");
37468 return -ENOMEM;
37469 }
37470 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
37471 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37472 ASSERTCMP(page, !=, NULL);
37473
37474 - fscache_stat(&fscache_n_uncaches);
37475 + fscache_stat_unchecked(&fscache_n_uncaches);
37476
37477 /* cache withdrawal may beat us to it */
37478 if (!PageFsCache(page))
37479 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
37480 unsigned long loop;
37481
37482 #ifdef CONFIG_FSCACHE_STATS
37483 - atomic_add(pagevec->nr, &fscache_n_marks);
37484 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37485 #endif
37486
37487 for (loop = 0; loop < pagevec->nr; loop++) {
37488 diff -urNp linux-3.0.4/fs/fscache/stats.c linux-3.0.4/fs/fscache/stats.c
37489 --- linux-3.0.4/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
37490 +++ linux-3.0.4/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
37491 @@ -18,95 +18,95 @@
37492 /*
37493 * operation counters
37494 */
37495 -atomic_t fscache_n_op_pend;
37496 -atomic_t fscache_n_op_run;
37497 -atomic_t fscache_n_op_enqueue;
37498 -atomic_t fscache_n_op_requeue;
37499 -atomic_t fscache_n_op_deferred_release;
37500 -atomic_t fscache_n_op_release;
37501 -atomic_t fscache_n_op_gc;
37502 -atomic_t fscache_n_op_cancelled;
37503 -atomic_t fscache_n_op_rejected;
37504 -
37505 -atomic_t fscache_n_attr_changed;
37506 -atomic_t fscache_n_attr_changed_ok;
37507 -atomic_t fscache_n_attr_changed_nobufs;
37508 -atomic_t fscache_n_attr_changed_nomem;
37509 -atomic_t fscache_n_attr_changed_calls;
37510 -
37511 -atomic_t fscache_n_allocs;
37512 -atomic_t fscache_n_allocs_ok;
37513 -atomic_t fscache_n_allocs_wait;
37514 -atomic_t fscache_n_allocs_nobufs;
37515 -atomic_t fscache_n_allocs_intr;
37516 -atomic_t fscache_n_allocs_object_dead;
37517 -atomic_t fscache_n_alloc_ops;
37518 -atomic_t fscache_n_alloc_op_waits;
37519 -
37520 -atomic_t fscache_n_retrievals;
37521 -atomic_t fscache_n_retrievals_ok;
37522 -atomic_t fscache_n_retrievals_wait;
37523 -atomic_t fscache_n_retrievals_nodata;
37524 -atomic_t fscache_n_retrievals_nobufs;
37525 -atomic_t fscache_n_retrievals_intr;
37526 -atomic_t fscache_n_retrievals_nomem;
37527 -atomic_t fscache_n_retrievals_object_dead;
37528 -atomic_t fscache_n_retrieval_ops;
37529 -atomic_t fscache_n_retrieval_op_waits;
37530 -
37531 -atomic_t fscache_n_stores;
37532 -atomic_t fscache_n_stores_ok;
37533 -atomic_t fscache_n_stores_again;
37534 -atomic_t fscache_n_stores_nobufs;
37535 -atomic_t fscache_n_stores_oom;
37536 -atomic_t fscache_n_store_ops;
37537 -atomic_t fscache_n_store_calls;
37538 -atomic_t fscache_n_store_pages;
37539 -atomic_t fscache_n_store_radix_deletes;
37540 -atomic_t fscache_n_store_pages_over_limit;
37541 -
37542 -atomic_t fscache_n_store_vmscan_not_storing;
37543 -atomic_t fscache_n_store_vmscan_gone;
37544 -atomic_t fscache_n_store_vmscan_busy;
37545 -atomic_t fscache_n_store_vmscan_cancelled;
37546 -
37547 -atomic_t fscache_n_marks;
37548 -atomic_t fscache_n_uncaches;
37549 -
37550 -atomic_t fscache_n_acquires;
37551 -atomic_t fscache_n_acquires_null;
37552 -atomic_t fscache_n_acquires_no_cache;
37553 -atomic_t fscache_n_acquires_ok;
37554 -atomic_t fscache_n_acquires_nobufs;
37555 -atomic_t fscache_n_acquires_oom;
37556 -
37557 -atomic_t fscache_n_updates;
37558 -atomic_t fscache_n_updates_null;
37559 -atomic_t fscache_n_updates_run;
37560 -
37561 -atomic_t fscache_n_relinquishes;
37562 -atomic_t fscache_n_relinquishes_null;
37563 -atomic_t fscache_n_relinquishes_waitcrt;
37564 -atomic_t fscache_n_relinquishes_retire;
37565 -
37566 -atomic_t fscache_n_cookie_index;
37567 -atomic_t fscache_n_cookie_data;
37568 -atomic_t fscache_n_cookie_special;
37569 -
37570 -atomic_t fscache_n_object_alloc;
37571 -atomic_t fscache_n_object_no_alloc;
37572 -atomic_t fscache_n_object_lookups;
37573 -atomic_t fscache_n_object_lookups_negative;
37574 -atomic_t fscache_n_object_lookups_positive;
37575 -atomic_t fscache_n_object_lookups_timed_out;
37576 -atomic_t fscache_n_object_created;
37577 -atomic_t fscache_n_object_avail;
37578 -atomic_t fscache_n_object_dead;
37579 -
37580 -atomic_t fscache_n_checkaux_none;
37581 -atomic_t fscache_n_checkaux_okay;
37582 -atomic_t fscache_n_checkaux_update;
37583 -atomic_t fscache_n_checkaux_obsolete;
37584 +atomic_unchecked_t fscache_n_op_pend;
37585 +atomic_unchecked_t fscache_n_op_run;
37586 +atomic_unchecked_t fscache_n_op_enqueue;
37587 +atomic_unchecked_t fscache_n_op_requeue;
37588 +atomic_unchecked_t fscache_n_op_deferred_release;
37589 +atomic_unchecked_t fscache_n_op_release;
37590 +atomic_unchecked_t fscache_n_op_gc;
37591 +atomic_unchecked_t fscache_n_op_cancelled;
37592 +atomic_unchecked_t fscache_n_op_rejected;
37593 +
37594 +atomic_unchecked_t fscache_n_attr_changed;
37595 +atomic_unchecked_t fscache_n_attr_changed_ok;
37596 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
37597 +atomic_unchecked_t fscache_n_attr_changed_nomem;
37598 +atomic_unchecked_t fscache_n_attr_changed_calls;
37599 +
37600 +atomic_unchecked_t fscache_n_allocs;
37601 +atomic_unchecked_t fscache_n_allocs_ok;
37602 +atomic_unchecked_t fscache_n_allocs_wait;
37603 +atomic_unchecked_t fscache_n_allocs_nobufs;
37604 +atomic_unchecked_t fscache_n_allocs_intr;
37605 +atomic_unchecked_t fscache_n_allocs_object_dead;
37606 +atomic_unchecked_t fscache_n_alloc_ops;
37607 +atomic_unchecked_t fscache_n_alloc_op_waits;
37608 +
37609 +atomic_unchecked_t fscache_n_retrievals;
37610 +atomic_unchecked_t fscache_n_retrievals_ok;
37611 +atomic_unchecked_t fscache_n_retrievals_wait;
37612 +atomic_unchecked_t fscache_n_retrievals_nodata;
37613 +atomic_unchecked_t fscache_n_retrievals_nobufs;
37614 +atomic_unchecked_t fscache_n_retrievals_intr;
37615 +atomic_unchecked_t fscache_n_retrievals_nomem;
37616 +atomic_unchecked_t fscache_n_retrievals_object_dead;
37617 +atomic_unchecked_t fscache_n_retrieval_ops;
37618 +atomic_unchecked_t fscache_n_retrieval_op_waits;
37619 +
37620 +atomic_unchecked_t fscache_n_stores;
37621 +atomic_unchecked_t fscache_n_stores_ok;
37622 +atomic_unchecked_t fscache_n_stores_again;
37623 +atomic_unchecked_t fscache_n_stores_nobufs;
37624 +atomic_unchecked_t fscache_n_stores_oom;
37625 +atomic_unchecked_t fscache_n_store_ops;
37626 +atomic_unchecked_t fscache_n_store_calls;
37627 +atomic_unchecked_t fscache_n_store_pages;
37628 +atomic_unchecked_t fscache_n_store_radix_deletes;
37629 +atomic_unchecked_t fscache_n_store_pages_over_limit;
37630 +
37631 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37632 +atomic_unchecked_t fscache_n_store_vmscan_gone;
37633 +atomic_unchecked_t fscache_n_store_vmscan_busy;
37634 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37635 +
37636 +atomic_unchecked_t fscache_n_marks;
37637 +atomic_unchecked_t fscache_n_uncaches;
37638 +
37639 +atomic_unchecked_t fscache_n_acquires;
37640 +atomic_unchecked_t fscache_n_acquires_null;
37641 +atomic_unchecked_t fscache_n_acquires_no_cache;
37642 +atomic_unchecked_t fscache_n_acquires_ok;
37643 +atomic_unchecked_t fscache_n_acquires_nobufs;
37644 +atomic_unchecked_t fscache_n_acquires_oom;
37645 +
37646 +atomic_unchecked_t fscache_n_updates;
37647 +atomic_unchecked_t fscache_n_updates_null;
37648 +atomic_unchecked_t fscache_n_updates_run;
37649 +
37650 +atomic_unchecked_t fscache_n_relinquishes;
37651 +atomic_unchecked_t fscache_n_relinquishes_null;
37652 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37653 +atomic_unchecked_t fscache_n_relinquishes_retire;
37654 +
37655 +atomic_unchecked_t fscache_n_cookie_index;
37656 +atomic_unchecked_t fscache_n_cookie_data;
37657 +atomic_unchecked_t fscache_n_cookie_special;
37658 +
37659 +atomic_unchecked_t fscache_n_object_alloc;
37660 +atomic_unchecked_t fscache_n_object_no_alloc;
37661 +atomic_unchecked_t fscache_n_object_lookups;
37662 +atomic_unchecked_t fscache_n_object_lookups_negative;
37663 +atomic_unchecked_t fscache_n_object_lookups_positive;
37664 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
37665 +atomic_unchecked_t fscache_n_object_created;
37666 +atomic_unchecked_t fscache_n_object_avail;
37667 +atomic_unchecked_t fscache_n_object_dead;
37668 +
37669 +atomic_unchecked_t fscache_n_checkaux_none;
37670 +atomic_unchecked_t fscache_n_checkaux_okay;
37671 +atomic_unchecked_t fscache_n_checkaux_update;
37672 +atomic_unchecked_t fscache_n_checkaux_obsolete;
37673
37674 atomic_t fscache_n_cop_alloc_object;
37675 atomic_t fscache_n_cop_lookup_object;
37676 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37677 seq_puts(m, "FS-Cache statistics\n");
37678
37679 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37680 - atomic_read(&fscache_n_cookie_index),
37681 - atomic_read(&fscache_n_cookie_data),
37682 - atomic_read(&fscache_n_cookie_special));
37683 + atomic_read_unchecked(&fscache_n_cookie_index),
37684 + atomic_read_unchecked(&fscache_n_cookie_data),
37685 + atomic_read_unchecked(&fscache_n_cookie_special));
37686
37687 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37688 - atomic_read(&fscache_n_object_alloc),
37689 - atomic_read(&fscache_n_object_no_alloc),
37690 - atomic_read(&fscache_n_object_avail),
37691 - atomic_read(&fscache_n_object_dead));
37692 + atomic_read_unchecked(&fscache_n_object_alloc),
37693 + atomic_read_unchecked(&fscache_n_object_no_alloc),
37694 + atomic_read_unchecked(&fscache_n_object_avail),
37695 + atomic_read_unchecked(&fscache_n_object_dead));
37696 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37697 - atomic_read(&fscache_n_checkaux_none),
37698 - atomic_read(&fscache_n_checkaux_okay),
37699 - atomic_read(&fscache_n_checkaux_update),
37700 - atomic_read(&fscache_n_checkaux_obsolete));
37701 + atomic_read_unchecked(&fscache_n_checkaux_none),
37702 + atomic_read_unchecked(&fscache_n_checkaux_okay),
37703 + atomic_read_unchecked(&fscache_n_checkaux_update),
37704 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37705
37706 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37707 - atomic_read(&fscache_n_marks),
37708 - atomic_read(&fscache_n_uncaches));
37709 + atomic_read_unchecked(&fscache_n_marks),
37710 + atomic_read_unchecked(&fscache_n_uncaches));
37711
37712 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37713 " oom=%u\n",
37714 - atomic_read(&fscache_n_acquires),
37715 - atomic_read(&fscache_n_acquires_null),
37716 - atomic_read(&fscache_n_acquires_no_cache),
37717 - atomic_read(&fscache_n_acquires_ok),
37718 - atomic_read(&fscache_n_acquires_nobufs),
37719 - atomic_read(&fscache_n_acquires_oom));
37720 + atomic_read_unchecked(&fscache_n_acquires),
37721 + atomic_read_unchecked(&fscache_n_acquires_null),
37722 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
37723 + atomic_read_unchecked(&fscache_n_acquires_ok),
37724 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
37725 + atomic_read_unchecked(&fscache_n_acquires_oom));
37726
37727 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37728 - atomic_read(&fscache_n_object_lookups),
37729 - atomic_read(&fscache_n_object_lookups_negative),
37730 - atomic_read(&fscache_n_object_lookups_positive),
37731 - atomic_read(&fscache_n_object_created),
37732 - atomic_read(&fscache_n_object_lookups_timed_out));
37733 + atomic_read_unchecked(&fscache_n_object_lookups),
37734 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
37735 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
37736 + atomic_read_unchecked(&fscache_n_object_created),
37737 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37738
37739 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37740 - atomic_read(&fscache_n_updates),
37741 - atomic_read(&fscache_n_updates_null),
37742 - atomic_read(&fscache_n_updates_run));
37743 + atomic_read_unchecked(&fscache_n_updates),
37744 + atomic_read_unchecked(&fscache_n_updates_null),
37745 + atomic_read_unchecked(&fscache_n_updates_run));
37746
37747 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37748 - atomic_read(&fscache_n_relinquishes),
37749 - atomic_read(&fscache_n_relinquishes_null),
37750 - atomic_read(&fscache_n_relinquishes_waitcrt),
37751 - atomic_read(&fscache_n_relinquishes_retire));
37752 + atomic_read_unchecked(&fscache_n_relinquishes),
37753 + atomic_read_unchecked(&fscache_n_relinquishes_null),
37754 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37755 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
37756
37757 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37758 - atomic_read(&fscache_n_attr_changed),
37759 - atomic_read(&fscache_n_attr_changed_ok),
37760 - atomic_read(&fscache_n_attr_changed_nobufs),
37761 - atomic_read(&fscache_n_attr_changed_nomem),
37762 - atomic_read(&fscache_n_attr_changed_calls));
37763 + atomic_read_unchecked(&fscache_n_attr_changed),
37764 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
37765 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37766 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37767 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
37768
37769 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37770 - atomic_read(&fscache_n_allocs),
37771 - atomic_read(&fscache_n_allocs_ok),
37772 - atomic_read(&fscache_n_allocs_wait),
37773 - atomic_read(&fscache_n_allocs_nobufs),
37774 - atomic_read(&fscache_n_allocs_intr));
37775 + atomic_read_unchecked(&fscache_n_allocs),
37776 + atomic_read_unchecked(&fscache_n_allocs_ok),
37777 + atomic_read_unchecked(&fscache_n_allocs_wait),
37778 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
37779 + atomic_read_unchecked(&fscache_n_allocs_intr));
37780 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37781 - atomic_read(&fscache_n_alloc_ops),
37782 - atomic_read(&fscache_n_alloc_op_waits),
37783 - atomic_read(&fscache_n_allocs_object_dead));
37784 + atomic_read_unchecked(&fscache_n_alloc_ops),
37785 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
37786 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
37787
37788 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37789 " int=%u oom=%u\n",
37790 - atomic_read(&fscache_n_retrievals),
37791 - atomic_read(&fscache_n_retrievals_ok),
37792 - atomic_read(&fscache_n_retrievals_wait),
37793 - atomic_read(&fscache_n_retrievals_nodata),
37794 - atomic_read(&fscache_n_retrievals_nobufs),
37795 - atomic_read(&fscache_n_retrievals_intr),
37796 - atomic_read(&fscache_n_retrievals_nomem));
37797 + atomic_read_unchecked(&fscache_n_retrievals),
37798 + atomic_read_unchecked(&fscache_n_retrievals_ok),
37799 + atomic_read_unchecked(&fscache_n_retrievals_wait),
37800 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
37801 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37802 + atomic_read_unchecked(&fscache_n_retrievals_intr),
37803 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
37804 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37805 - atomic_read(&fscache_n_retrieval_ops),
37806 - atomic_read(&fscache_n_retrieval_op_waits),
37807 - atomic_read(&fscache_n_retrievals_object_dead));
37808 + atomic_read_unchecked(&fscache_n_retrieval_ops),
37809 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37810 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37811
37812 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37813 - atomic_read(&fscache_n_stores),
37814 - atomic_read(&fscache_n_stores_ok),
37815 - atomic_read(&fscache_n_stores_again),
37816 - atomic_read(&fscache_n_stores_nobufs),
37817 - atomic_read(&fscache_n_stores_oom));
37818 + atomic_read_unchecked(&fscache_n_stores),
37819 + atomic_read_unchecked(&fscache_n_stores_ok),
37820 + atomic_read_unchecked(&fscache_n_stores_again),
37821 + atomic_read_unchecked(&fscache_n_stores_nobufs),
37822 + atomic_read_unchecked(&fscache_n_stores_oom));
37823 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37824 - atomic_read(&fscache_n_store_ops),
37825 - atomic_read(&fscache_n_store_calls),
37826 - atomic_read(&fscache_n_store_pages),
37827 - atomic_read(&fscache_n_store_radix_deletes),
37828 - atomic_read(&fscache_n_store_pages_over_limit));
37829 + atomic_read_unchecked(&fscache_n_store_ops),
37830 + atomic_read_unchecked(&fscache_n_store_calls),
37831 + atomic_read_unchecked(&fscache_n_store_pages),
37832 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
37833 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37834
37835 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37836 - atomic_read(&fscache_n_store_vmscan_not_storing),
37837 - atomic_read(&fscache_n_store_vmscan_gone),
37838 - atomic_read(&fscache_n_store_vmscan_busy),
37839 - atomic_read(&fscache_n_store_vmscan_cancelled));
37840 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37841 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37842 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37843 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37844
37845 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37846 - atomic_read(&fscache_n_op_pend),
37847 - atomic_read(&fscache_n_op_run),
37848 - atomic_read(&fscache_n_op_enqueue),
37849 - atomic_read(&fscache_n_op_cancelled),
37850 - atomic_read(&fscache_n_op_rejected));
37851 + atomic_read_unchecked(&fscache_n_op_pend),
37852 + atomic_read_unchecked(&fscache_n_op_run),
37853 + atomic_read_unchecked(&fscache_n_op_enqueue),
37854 + atomic_read_unchecked(&fscache_n_op_cancelled),
37855 + atomic_read_unchecked(&fscache_n_op_rejected));
37856 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37857 - atomic_read(&fscache_n_op_deferred_release),
37858 - atomic_read(&fscache_n_op_release),
37859 - atomic_read(&fscache_n_op_gc));
37860 + atomic_read_unchecked(&fscache_n_op_deferred_release),
37861 + atomic_read_unchecked(&fscache_n_op_release),
37862 + atomic_read_unchecked(&fscache_n_op_gc));
37863
37864 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37865 atomic_read(&fscache_n_cop_alloc_object),
37866 diff -urNp linux-3.0.4/fs/fs_struct.c linux-3.0.4/fs/fs_struct.c
37867 --- linux-3.0.4/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
37868 +++ linux-3.0.4/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
37869 @@ -4,6 +4,7 @@
37870 #include <linux/path.h>
37871 #include <linux/slab.h>
37872 #include <linux/fs_struct.h>
37873 +#include <linux/grsecurity.h>
37874 #include "internal.h"
37875
37876 static inline void path_get_longterm(struct path *path)
37877 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37878 old_root = fs->root;
37879 fs->root = *path;
37880 path_get_longterm(path);
37881 + gr_set_chroot_entries(current, path);
37882 write_seqcount_end(&fs->seq);
37883 spin_unlock(&fs->lock);
37884 if (old_root.dentry)
37885 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37886 && fs->root.mnt == old_root->mnt) {
37887 path_get_longterm(new_root);
37888 fs->root = *new_root;
37889 + gr_set_chroot_entries(p, new_root);
37890 count++;
37891 }
37892 if (fs->pwd.dentry == old_root->dentry
37893 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
37894 spin_lock(&fs->lock);
37895 write_seqcount_begin(&fs->seq);
37896 tsk->fs = NULL;
37897 - kill = !--fs->users;
37898 + gr_clear_chroot_entries(tsk);
37899 + kill = !atomic_dec_return(&fs->users);
37900 write_seqcount_end(&fs->seq);
37901 spin_unlock(&fs->lock);
37902 task_unlock(tsk);
37903 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
37904 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
37905 /* We don't need to lock fs - think why ;-) */
37906 if (fs) {
37907 - fs->users = 1;
37908 + atomic_set(&fs->users, 1);
37909 fs->in_exec = 0;
37910 spin_lock_init(&fs->lock);
37911 seqcount_init(&fs->seq);
37912 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
37913 spin_lock(&old->lock);
37914 fs->root = old->root;
37915 path_get_longterm(&fs->root);
37916 + /* instead of calling gr_set_chroot_entries here,
37917 + we call it from every caller of this function
37918 + */
37919 fs->pwd = old->pwd;
37920 path_get_longterm(&fs->pwd);
37921 spin_unlock(&old->lock);
37922 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
37923
37924 task_lock(current);
37925 spin_lock(&fs->lock);
37926 - kill = !--fs->users;
37927 + kill = !atomic_dec_return(&fs->users);
37928 current->fs = new_fs;
37929 + gr_set_chroot_entries(current, &new_fs->root);
37930 spin_unlock(&fs->lock);
37931 task_unlock(current);
37932
37933 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
37934
37935 /* to be mentioned only in INIT_TASK */
37936 struct fs_struct init_fs = {
37937 - .users = 1,
37938 + .users = ATOMIC_INIT(1),
37939 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
37940 .seq = SEQCNT_ZERO,
37941 .umask = 0022,
37942 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
37943 task_lock(current);
37944
37945 spin_lock(&init_fs.lock);
37946 - init_fs.users++;
37947 + atomic_inc(&init_fs.users);
37948 spin_unlock(&init_fs.lock);
37949
37950 spin_lock(&fs->lock);
37951 current->fs = &init_fs;
37952 - kill = !--fs->users;
37953 + gr_set_chroot_entries(current, &current->fs->root);
37954 + kill = !atomic_dec_return(&fs->users);
37955 spin_unlock(&fs->lock);
37956
37957 task_unlock(current);
37958 diff -urNp linux-3.0.4/fs/fuse/cuse.c linux-3.0.4/fs/fuse/cuse.c
37959 --- linux-3.0.4/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
37960 +++ linux-3.0.4/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
37961 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
37962 INIT_LIST_HEAD(&cuse_conntbl[i]);
37963
37964 /* inherit and extend fuse_dev_operations */
37965 - cuse_channel_fops = fuse_dev_operations;
37966 - cuse_channel_fops.owner = THIS_MODULE;
37967 - cuse_channel_fops.open = cuse_channel_open;
37968 - cuse_channel_fops.release = cuse_channel_release;
37969 + pax_open_kernel();
37970 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
37971 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
37972 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
37973 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
37974 + pax_close_kernel();
37975
37976 cuse_class = class_create(THIS_MODULE, "cuse");
37977 if (IS_ERR(cuse_class))
37978 diff -urNp linux-3.0.4/fs/fuse/dev.c linux-3.0.4/fs/fuse/dev.c
37979 --- linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:14.000000000 -0400
37980 +++ linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
37981 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
37982 ret = 0;
37983 pipe_lock(pipe);
37984
37985 - if (!pipe->readers) {
37986 + if (!atomic_read(&pipe->readers)) {
37987 send_sig(SIGPIPE, current, 0);
37988 if (!ret)
37989 ret = -EPIPE;
37990 diff -urNp linux-3.0.4/fs/fuse/dir.c linux-3.0.4/fs/fuse/dir.c
37991 --- linux-3.0.4/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
37992 +++ linux-3.0.4/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
37993 @@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
37994 return link;
37995 }
37996
37997 -static void free_link(char *link)
37998 +static void free_link(const char *link)
37999 {
38000 if (!IS_ERR(link))
38001 free_page((unsigned long) link);
38002 diff -urNp linux-3.0.4/fs/gfs2/inode.c linux-3.0.4/fs/gfs2/inode.c
38003 --- linux-3.0.4/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
38004 +++ linux-3.0.4/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
38005 @@ -1525,7 +1525,7 @@ out:
38006
38007 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38008 {
38009 - char *s = nd_get_link(nd);
38010 + const char *s = nd_get_link(nd);
38011 if (!IS_ERR(s))
38012 kfree(s);
38013 }
38014 diff -urNp linux-3.0.4/fs/hfsplus/catalog.c linux-3.0.4/fs/hfsplus/catalog.c
38015 --- linux-3.0.4/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
38016 +++ linux-3.0.4/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
38017 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
38018 int err;
38019 u16 type;
38020
38021 + pax_track_stack();
38022 +
38023 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
38024 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
38025 if (err)
38026 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
38027 int entry_size;
38028 int err;
38029
38030 + pax_track_stack();
38031 +
38032 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38033 str->name, cnid, inode->i_nlink);
38034 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38035 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38036 int entry_size, type;
38037 int err = 0;
38038
38039 + pax_track_stack();
38040 +
38041 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38042 cnid, src_dir->i_ino, src_name->name,
38043 dst_dir->i_ino, dst_name->name);
38044 diff -urNp linux-3.0.4/fs/hfsplus/dir.c linux-3.0.4/fs/hfsplus/dir.c
38045 --- linux-3.0.4/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
38046 +++ linux-3.0.4/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
38047 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38048 struct hfsplus_readdir_data *rd;
38049 u16 type;
38050
38051 + pax_track_stack();
38052 +
38053 if (filp->f_pos >= inode->i_size)
38054 return 0;
38055
38056 diff -urNp linux-3.0.4/fs/hfsplus/inode.c linux-3.0.4/fs/hfsplus/inode.c
38057 --- linux-3.0.4/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
38058 +++ linux-3.0.4/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
38059 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38060 int res = 0;
38061 u16 type;
38062
38063 + pax_track_stack();
38064 +
38065 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38066
38067 HFSPLUS_I(inode)->linkid = 0;
38068 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38069 struct hfs_find_data fd;
38070 hfsplus_cat_entry entry;
38071
38072 + pax_track_stack();
38073 +
38074 if (HFSPLUS_IS_RSRC(inode))
38075 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38076
38077 diff -urNp linux-3.0.4/fs/hfsplus/ioctl.c linux-3.0.4/fs/hfsplus/ioctl.c
38078 --- linux-3.0.4/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
38079 +++ linux-3.0.4/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
38080 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38081 struct hfsplus_cat_file *file;
38082 int res;
38083
38084 + pax_track_stack();
38085 +
38086 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38087 return -EOPNOTSUPP;
38088
38089 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38090 struct hfsplus_cat_file *file;
38091 ssize_t res = 0;
38092
38093 + pax_track_stack();
38094 +
38095 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38096 return -EOPNOTSUPP;
38097
38098 diff -urNp linux-3.0.4/fs/hfsplus/super.c linux-3.0.4/fs/hfsplus/super.c
38099 --- linux-3.0.4/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
38100 +++ linux-3.0.4/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
38101 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38102 struct nls_table *nls = NULL;
38103 int err;
38104
38105 + pax_track_stack();
38106 +
38107 err = -EINVAL;
38108 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38109 if (!sbi)
38110 diff -urNp linux-3.0.4/fs/hugetlbfs/inode.c linux-3.0.4/fs/hugetlbfs/inode.c
38111 --- linux-3.0.4/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38112 +++ linux-3.0.4/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38113 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38114 .kill_sb = kill_litter_super,
38115 };
38116
38117 -static struct vfsmount *hugetlbfs_vfsmount;
38118 +struct vfsmount *hugetlbfs_vfsmount;
38119
38120 static int can_do_hugetlb_shm(void)
38121 {
38122 diff -urNp linux-3.0.4/fs/inode.c linux-3.0.4/fs/inode.c
38123 --- linux-3.0.4/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
38124 +++ linux-3.0.4/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
38125 @@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
38126
38127 #ifdef CONFIG_SMP
38128 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38129 - static atomic_t shared_last_ino;
38130 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38131 + static atomic_unchecked_t shared_last_ino;
38132 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38133
38134 res = next - LAST_INO_BATCH;
38135 }
38136 diff -urNp linux-3.0.4/fs/jbd/checkpoint.c linux-3.0.4/fs/jbd/checkpoint.c
38137 --- linux-3.0.4/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
38138 +++ linux-3.0.4/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
38139 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38140 tid_t this_tid;
38141 int result;
38142
38143 + pax_track_stack();
38144 +
38145 jbd_debug(1, "Start checkpoint\n");
38146
38147 /*
38148 diff -urNp linux-3.0.4/fs/jffs2/compr_rtime.c linux-3.0.4/fs/jffs2/compr_rtime.c
38149 --- linux-3.0.4/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
38150 +++ linux-3.0.4/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
38151 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38152 int outpos = 0;
38153 int pos=0;
38154
38155 + pax_track_stack();
38156 +
38157 memset(positions,0,sizeof(positions));
38158
38159 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38160 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38161 int outpos = 0;
38162 int pos=0;
38163
38164 + pax_track_stack();
38165 +
38166 memset(positions,0,sizeof(positions));
38167
38168 while (outpos<destlen) {
38169 diff -urNp linux-3.0.4/fs/jffs2/compr_rubin.c linux-3.0.4/fs/jffs2/compr_rubin.c
38170 --- linux-3.0.4/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
38171 +++ linux-3.0.4/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
38172 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38173 int ret;
38174 uint32_t mysrclen, mydstlen;
38175
38176 + pax_track_stack();
38177 +
38178 mysrclen = *sourcelen;
38179 mydstlen = *dstlen - 8;
38180
38181 diff -urNp linux-3.0.4/fs/jffs2/erase.c linux-3.0.4/fs/jffs2/erase.c
38182 --- linux-3.0.4/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
38183 +++ linux-3.0.4/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
38184 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38185 struct jffs2_unknown_node marker = {
38186 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38187 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38188 - .totlen = cpu_to_je32(c->cleanmarker_size)
38189 + .totlen = cpu_to_je32(c->cleanmarker_size),
38190 + .hdr_crc = cpu_to_je32(0)
38191 };
38192
38193 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38194 diff -urNp linux-3.0.4/fs/jffs2/wbuf.c linux-3.0.4/fs/jffs2/wbuf.c
38195 --- linux-3.0.4/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
38196 +++ linux-3.0.4/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
38197 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38198 {
38199 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38200 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38201 - .totlen = constant_cpu_to_je32(8)
38202 + .totlen = constant_cpu_to_je32(8),
38203 + .hdr_crc = constant_cpu_to_je32(0)
38204 };
38205
38206 /*
38207 diff -urNp linux-3.0.4/fs/jffs2/xattr.c linux-3.0.4/fs/jffs2/xattr.c
38208 --- linux-3.0.4/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
38209 +++ linux-3.0.4/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
38210 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38211
38212 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38213
38214 + pax_track_stack();
38215 +
38216 /* Phase.1 : Merge same xref */
38217 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38218 xref_tmphash[i] = NULL;
38219 diff -urNp linux-3.0.4/fs/jfs/super.c linux-3.0.4/fs/jfs/super.c
38220 --- linux-3.0.4/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
38221 +++ linux-3.0.4/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
38222 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38223
38224 jfs_inode_cachep =
38225 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38226 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38227 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38228 init_once);
38229 if (jfs_inode_cachep == NULL)
38230 return -ENOMEM;
38231 diff -urNp linux-3.0.4/fs/Kconfig.binfmt linux-3.0.4/fs/Kconfig.binfmt
38232 --- linux-3.0.4/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
38233 +++ linux-3.0.4/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
38234 @@ -86,7 +86,7 @@ config HAVE_AOUT
38235
38236 config BINFMT_AOUT
38237 tristate "Kernel support for a.out and ECOFF binaries"
38238 - depends on HAVE_AOUT
38239 + depends on HAVE_AOUT && BROKEN
38240 ---help---
38241 A.out (Assembler.OUTput) is a set of formats for libraries and
38242 executables used in the earliest versions of UNIX. Linux used
38243 diff -urNp linux-3.0.4/fs/libfs.c linux-3.0.4/fs/libfs.c
38244 --- linux-3.0.4/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
38245 +++ linux-3.0.4/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
38246 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38247
38248 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38249 struct dentry *next;
38250 + char d_name[sizeof(next->d_iname)];
38251 + const unsigned char *name;
38252 +
38253 next = list_entry(p, struct dentry, d_u.d_child);
38254 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38255 if (!simple_positive(next)) {
38256 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38257
38258 spin_unlock(&next->d_lock);
38259 spin_unlock(&dentry->d_lock);
38260 - if (filldir(dirent, next->d_name.name,
38261 + name = next->d_name.name;
38262 + if (name == next->d_iname) {
38263 + memcpy(d_name, name, next->d_name.len);
38264 + name = d_name;
38265 + }
38266 + if (filldir(dirent, name,
38267 next->d_name.len, filp->f_pos,
38268 next->d_inode->i_ino,
38269 dt_type(next->d_inode)) < 0)
38270 diff -urNp linux-3.0.4/fs/lockd/clntproc.c linux-3.0.4/fs/lockd/clntproc.c
38271 --- linux-3.0.4/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
38272 +++ linux-3.0.4/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
38273 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38274 /*
38275 * Cookie counter for NLM requests
38276 */
38277 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38278 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38279
38280 void nlmclnt_next_cookie(struct nlm_cookie *c)
38281 {
38282 - u32 cookie = atomic_inc_return(&nlm_cookie);
38283 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38284
38285 memcpy(c->data, &cookie, 4);
38286 c->len=4;
38287 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38288 struct nlm_rqst reqst, *req;
38289 int status;
38290
38291 + pax_track_stack();
38292 +
38293 req = &reqst;
38294 memset(req, 0, sizeof(*req));
38295 locks_init_lock(&req->a_args.lock.fl);
38296 diff -urNp linux-3.0.4/fs/locks.c linux-3.0.4/fs/locks.c
38297 --- linux-3.0.4/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
38298 +++ linux-3.0.4/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
38299 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38300 return;
38301
38302 if (filp->f_op && filp->f_op->flock) {
38303 - struct file_lock fl = {
38304 + struct file_lock flock = {
38305 .fl_pid = current->tgid,
38306 .fl_file = filp,
38307 .fl_flags = FL_FLOCK,
38308 .fl_type = F_UNLCK,
38309 .fl_end = OFFSET_MAX,
38310 };
38311 - filp->f_op->flock(filp, F_SETLKW, &fl);
38312 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
38313 - fl.fl_ops->fl_release_private(&fl);
38314 + filp->f_op->flock(filp, F_SETLKW, &flock);
38315 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
38316 + flock.fl_ops->fl_release_private(&flock);
38317 }
38318
38319 lock_flocks();
38320 diff -urNp linux-3.0.4/fs/logfs/super.c linux-3.0.4/fs/logfs/super.c
38321 --- linux-3.0.4/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
38322 +++ linux-3.0.4/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
38323 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38324 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38325 int err, valid0, valid1;
38326
38327 + pax_track_stack();
38328 +
38329 /* read first superblock */
38330 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38331 if (err)
38332 diff -urNp linux-3.0.4/fs/namei.c linux-3.0.4/fs/namei.c
38333 --- linux-3.0.4/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
38334 +++ linux-3.0.4/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
38335 @@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
38336 return ret;
38337
38338 /*
38339 - * Read/write DACs are always overridable.
38340 - * Executable DACs are overridable for all directories and
38341 - * for non-directories that have least one exec bit set.
38342 + * Searching includes executable on directories, else just read.
38343 */
38344 - if (!(mask & MAY_EXEC) || execute_ok(inode))
38345 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38346 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38347 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38348 +#ifdef CONFIG_GRKERNSEC
38349 + if (flags & IPERM_FLAG_RCU)
38350 + return -ECHILD;
38351 +#endif
38352 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38353 return 0;
38354 + }
38355
38356 /*
38357 - * Searching includes executable on directories, else just read.
38358 + * Read/write DACs are always overridable.
38359 + * Executable DACs are overridable for all directories and
38360 + * for non-directories that have least one exec bit set.
38361 */
38362 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38363 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38364 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38365 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38366 +#ifdef CONFIG_GRKERNSEC
38367 + if (flags & IPERM_FLAG_RCU)
38368 + return -ECHILD;
38369 +#endif
38370 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38371 return 0;
38372 + }
38373
38374 return -EACCES;
38375 }
38376 @@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
38377 br_read_unlock(vfsmount_lock);
38378 }
38379
38380 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38381 + return -ENOENT;
38382 +
38383 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38384 return 0;
38385
38386 @@ -593,9 +606,16 @@ static inline int exec_permission(struct
38387 if (ret == -ECHILD)
38388 return ret;
38389
38390 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38391 - ns_capable(ns, CAP_DAC_READ_SEARCH))
38392 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38393 goto ok;
38394 + else {
38395 +#ifdef CONFIG_GRKERNSEC
38396 + if (flags & IPERM_FLAG_RCU)
38397 + return -ECHILD;
38398 +#endif
38399 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38400 + goto ok;
38401 + }
38402
38403 return ret;
38404 ok:
38405 @@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
38406 return error;
38407 }
38408
38409 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
38410 + dentry->d_inode, dentry, nd->path.mnt)) {
38411 + error = -EACCES;
38412 + *p = ERR_PTR(error); /* no ->put_link(), please */
38413 + path_put(&nd->path);
38414 + return error;
38415 + }
38416 +
38417 nd->last_type = LAST_BIND;
38418 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38419 error = PTR_ERR(*p);
38420 if (!IS_ERR(*p)) {
38421 - char *s = nd_get_link(nd);
38422 + const char *s = nd_get_link(nd);
38423 error = 0;
38424 if (s)
38425 error = __vfs_follow_link(nd, s);
38426 @@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
38427 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38428
38429 if (likely(!retval)) {
38430 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38431 + return -ENOENT;
38432 +
38433 if (unlikely(!audit_dummy_context())) {
38434 if (nd->path.dentry && nd->inode)
38435 audit_inode(name, nd->path.dentry);
38436 @@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
38437 return error;
38438 }
38439
38440 +/*
38441 + * Note that while the flag value (low two bits) for sys_open means:
38442 + * 00 - read-only
38443 + * 01 - write-only
38444 + * 10 - read-write
38445 + * 11 - special
38446 + * it is changed into
38447 + * 00 - no permissions needed
38448 + * 01 - read-permission
38449 + * 10 - write-permission
38450 + * 11 - read-write
38451 + * for the internal routines (ie open_namei()/follow_link() etc)
38452 + * This is more logical, and also allows the 00 "no perm needed"
38453 + * to be used for symlinks (where the permissions are checked
38454 + * later).
38455 + *
38456 +*/
38457 +static inline int open_to_namei_flags(int flag)
38458 +{
38459 + if ((flag+1) & O_ACCMODE)
38460 + flag++;
38461 + return flag;
38462 +}
38463 +
38464 static int may_open(struct path *path, int acc_mode, int flag)
38465 {
38466 struct dentry *dentry = path->dentry;
38467 @@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
38468 /*
38469 * Ensure there are no outstanding leases on the file.
38470 */
38471 - return break_lease(inode, flag);
38472 + error = break_lease(inode, flag);
38473 +
38474 + if (error)
38475 + return error;
38476 +
38477 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38478 + error = -EPERM;
38479 + goto exit;
38480 + }
38481 +
38482 + if (gr_handle_rawio(inode)) {
38483 + error = -EPERM;
38484 + goto exit;
38485 + }
38486 +
38487 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38488 + error = -EACCES;
38489 + goto exit;
38490 + }
38491 +exit:
38492 + return error;
38493 }
38494
38495 static int handle_truncate(struct file *filp)
38496 @@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
38497 }
38498
38499 /*
38500 - * Note that while the flag value (low two bits) for sys_open means:
38501 - * 00 - read-only
38502 - * 01 - write-only
38503 - * 10 - read-write
38504 - * 11 - special
38505 - * it is changed into
38506 - * 00 - no permissions needed
38507 - * 01 - read-permission
38508 - * 10 - write-permission
38509 - * 11 - read-write
38510 - * for the internal routines (ie open_namei()/follow_link() etc)
38511 - * This is more logical, and also allows the 00 "no perm needed"
38512 - * to be used for symlinks (where the permissions are checked
38513 - * later).
38514 - *
38515 -*/
38516 -static inline int open_to_namei_flags(int flag)
38517 -{
38518 - if ((flag+1) & O_ACCMODE)
38519 - flag++;
38520 - return flag;
38521 -}
38522 -
38523 -/*
38524 * Handle the last step of open()
38525 */
38526 static struct file *do_last(struct nameidata *nd, struct path *path,
38527 @@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
38528 struct dentry *dir = nd->path.dentry;
38529 struct dentry *dentry;
38530 int open_flag = op->open_flag;
38531 + int flag = open_to_namei_flags(open_flag);
38532 int will_truncate = open_flag & O_TRUNC;
38533 int want_write = 0;
38534 int acc_mode = op->acc_mode;
38535 @@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
38536 /* Negative dentry, just create the file */
38537 if (!dentry->d_inode) {
38538 int mode = op->mode;
38539 +
38540 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38541 + error = -EACCES;
38542 + goto exit_mutex_unlock;
38543 + }
38544 +
38545 if (!IS_POSIXACL(dir->d_inode))
38546 mode &= ~current_umask();
38547 /*
38548 @@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
38549 error = vfs_create(dir->d_inode, dentry, mode, nd);
38550 if (error)
38551 goto exit_mutex_unlock;
38552 + else
38553 + gr_handle_create(path->dentry, path->mnt);
38554 mutex_unlock(&dir->d_inode->i_mutex);
38555 dput(nd->path.dentry);
38556 nd->path.dentry = dentry;
38557 @@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
38558 /*
38559 * It already exists.
38560 */
38561 +
38562 + /* only check if O_CREAT is specified, all other checks need to go
38563 + into may_open */
38564 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38565 + error = -EACCES;
38566 + goto exit_mutex_unlock;
38567 + }
38568 +
38569 mutex_unlock(&dir->d_inode->i_mutex);
38570 audit_inode(pathname, path->dentry);
38571
38572 @@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38573 error = may_mknod(mode);
38574 if (error)
38575 goto out_dput;
38576 +
38577 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38578 + error = -EPERM;
38579 + goto out_dput;
38580 + }
38581 +
38582 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38583 + error = -EACCES;
38584 + goto out_dput;
38585 + }
38586 +
38587 error = mnt_want_write(nd.path.mnt);
38588 if (error)
38589 goto out_dput;
38590 @@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38591 }
38592 out_drop_write:
38593 mnt_drop_write(nd.path.mnt);
38594 +
38595 + if (!error)
38596 + gr_handle_create(dentry, nd.path.mnt);
38597 out_dput:
38598 dput(dentry);
38599 out_unlock:
38600 @@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38601 if (IS_ERR(dentry))
38602 goto out_unlock;
38603
38604 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38605 + error = -EACCES;
38606 + goto out_dput;
38607 + }
38608 +
38609 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38610 mode &= ~current_umask();
38611 error = mnt_want_write(nd.path.mnt);
38612 @@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38613 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38614 out_drop_write:
38615 mnt_drop_write(nd.path.mnt);
38616 +
38617 + if (!error)
38618 + gr_handle_create(dentry, nd.path.mnt);
38619 +
38620 out_dput:
38621 dput(dentry);
38622 out_unlock:
38623 @@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
38624 char * name;
38625 struct dentry *dentry;
38626 struct nameidata nd;
38627 + ino_t saved_ino = 0;
38628 + dev_t saved_dev = 0;
38629
38630 error = user_path_parent(dfd, pathname, &nd, &name);
38631 if (error)
38632 @@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
38633 error = -ENOENT;
38634 goto exit3;
38635 }
38636 +
38637 + if (dentry->d_inode->i_nlink <= 1) {
38638 + saved_ino = dentry->d_inode->i_ino;
38639 + saved_dev = gr_get_dev_from_dentry(dentry);
38640 + }
38641 +
38642 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38643 + error = -EACCES;
38644 + goto exit3;
38645 + }
38646 +
38647 error = mnt_want_write(nd.path.mnt);
38648 if (error)
38649 goto exit3;
38650 @@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
38651 if (error)
38652 goto exit4;
38653 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38654 + if (!error && (saved_dev || saved_ino))
38655 + gr_handle_delete(saved_ino, saved_dev);
38656 exit4:
38657 mnt_drop_write(nd.path.mnt);
38658 exit3:
38659 @@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
38660 struct dentry *dentry;
38661 struct nameidata nd;
38662 struct inode *inode = NULL;
38663 + ino_t saved_ino = 0;
38664 + dev_t saved_dev = 0;
38665
38666 error = user_path_parent(dfd, pathname, &nd, &name);
38667 if (error)
38668 @@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
38669 if (!inode)
38670 goto slashes;
38671 ihold(inode);
38672 +
38673 + if (inode->i_nlink <= 1) {
38674 + saved_ino = inode->i_ino;
38675 + saved_dev = gr_get_dev_from_dentry(dentry);
38676 + }
38677 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38678 + error = -EACCES;
38679 + goto exit2;
38680 + }
38681 +
38682 error = mnt_want_write(nd.path.mnt);
38683 if (error)
38684 goto exit2;
38685 @@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
38686 if (error)
38687 goto exit3;
38688 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38689 + if (!error && (saved_ino || saved_dev))
38690 + gr_handle_delete(saved_ino, saved_dev);
38691 exit3:
38692 mnt_drop_write(nd.path.mnt);
38693 exit2:
38694 @@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38695 if (IS_ERR(dentry))
38696 goto out_unlock;
38697
38698 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38699 + error = -EACCES;
38700 + goto out_dput;
38701 + }
38702 +
38703 error = mnt_want_write(nd.path.mnt);
38704 if (error)
38705 goto out_dput;
38706 @@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38707 if (error)
38708 goto out_drop_write;
38709 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38710 + if (!error)
38711 + gr_handle_create(dentry, nd.path.mnt);
38712 out_drop_write:
38713 mnt_drop_write(nd.path.mnt);
38714 out_dput:
38715 @@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38716 error = PTR_ERR(new_dentry);
38717 if (IS_ERR(new_dentry))
38718 goto out_unlock;
38719 +
38720 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38721 + old_path.dentry->d_inode,
38722 + old_path.dentry->d_inode->i_mode, to)) {
38723 + error = -EACCES;
38724 + goto out_dput;
38725 + }
38726 +
38727 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38728 + old_path.dentry, old_path.mnt, to)) {
38729 + error = -EACCES;
38730 + goto out_dput;
38731 + }
38732 +
38733 error = mnt_want_write(nd.path.mnt);
38734 if (error)
38735 goto out_dput;
38736 @@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38737 if (error)
38738 goto out_drop_write;
38739 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38740 + if (!error)
38741 + gr_handle_create(new_dentry, nd.path.mnt);
38742 out_drop_write:
38743 mnt_drop_write(nd.path.mnt);
38744 out_dput:
38745 @@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38746 char *to;
38747 int error;
38748
38749 + pax_track_stack();
38750 +
38751 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38752 if (error)
38753 goto exit;
38754 @@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38755 if (new_dentry == trap)
38756 goto exit5;
38757
38758 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38759 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
38760 + to);
38761 + if (error)
38762 + goto exit5;
38763 +
38764 error = mnt_want_write(oldnd.path.mnt);
38765 if (error)
38766 goto exit5;
38767 @@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38768 goto exit6;
38769 error = vfs_rename(old_dir->d_inode, old_dentry,
38770 new_dir->d_inode, new_dentry);
38771 + if (!error)
38772 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38773 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38774 exit6:
38775 mnt_drop_write(oldnd.path.mnt);
38776 exit5:
38777 @@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
38778
38779 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38780 {
38781 + char tmpbuf[64];
38782 + const char *newlink;
38783 int len;
38784
38785 len = PTR_ERR(link);
38786 @@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
38787 len = strlen(link);
38788 if (len > (unsigned) buflen)
38789 len = buflen;
38790 - if (copy_to_user(buffer, link, len))
38791 +
38792 + if (len < sizeof(tmpbuf)) {
38793 + memcpy(tmpbuf, link, len);
38794 + newlink = tmpbuf;
38795 + } else
38796 + newlink = link;
38797 +
38798 + if (copy_to_user(buffer, newlink, len))
38799 len = -EFAULT;
38800 out:
38801 return len;
38802 diff -urNp linux-3.0.4/fs/namespace.c linux-3.0.4/fs/namespace.c
38803 --- linux-3.0.4/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
38804 +++ linux-3.0.4/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
38805 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38806 if (!(sb->s_flags & MS_RDONLY))
38807 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38808 up_write(&sb->s_umount);
38809 +
38810 + gr_log_remount(mnt->mnt_devname, retval);
38811 +
38812 return retval;
38813 }
38814
38815 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38816 br_write_unlock(vfsmount_lock);
38817 up_write(&namespace_sem);
38818 release_mounts(&umount_list);
38819 +
38820 + gr_log_unmount(mnt->mnt_devname, retval);
38821 +
38822 return retval;
38823 }
38824
38825 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38826 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38827 MS_STRICTATIME);
38828
38829 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38830 + retval = -EPERM;
38831 + goto dput_out;
38832 + }
38833 +
38834 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38835 + retval = -EPERM;
38836 + goto dput_out;
38837 + }
38838 +
38839 if (flags & MS_REMOUNT)
38840 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38841 data_page);
38842 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38843 dev_name, data_page);
38844 dput_out:
38845 path_put(&path);
38846 +
38847 + gr_log_mount(dev_name, dir_name, retval);
38848 +
38849 return retval;
38850 }
38851
38852 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38853 if (error)
38854 goto out2;
38855
38856 + if (gr_handle_chroot_pivot()) {
38857 + error = -EPERM;
38858 + goto out2;
38859 + }
38860 +
38861 get_fs_root(current->fs, &root);
38862 error = lock_mount(&old);
38863 if (error)
38864 diff -urNp linux-3.0.4/fs/ncpfs/dir.c linux-3.0.4/fs/ncpfs/dir.c
38865 --- linux-3.0.4/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38866 +++ linux-3.0.4/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
38867 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38868 int res, val = 0, len;
38869 __u8 __name[NCP_MAXPATHLEN + 1];
38870
38871 + pax_track_stack();
38872 +
38873 if (dentry == dentry->d_sb->s_root)
38874 return 1;
38875
38876 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
38877 int error, res, len;
38878 __u8 __name[NCP_MAXPATHLEN + 1];
38879
38880 + pax_track_stack();
38881 +
38882 error = -EIO;
38883 if (!ncp_conn_valid(server))
38884 goto finished;
38885 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
38886 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
38887 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
38888
38889 + pax_track_stack();
38890 +
38891 ncp_age_dentry(server, dentry);
38892 len = sizeof(__name);
38893 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
38894 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
38895 int error, len;
38896 __u8 __name[NCP_MAXPATHLEN + 1];
38897
38898 + pax_track_stack();
38899 +
38900 DPRINTK("ncp_mkdir: making %s/%s\n",
38901 dentry->d_parent->d_name.name, dentry->d_name.name);
38902
38903 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
38904 int old_len, new_len;
38905 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
38906
38907 + pax_track_stack();
38908 +
38909 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
38910 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
38911 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
38912 diff -urNp linux-3.0.4/fs/ncpfs/inode.c linux-3.0.4/fs/ncpfs/inode.c
38913 --- linux-3.0.4/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38914 +++ linux-3.0.4/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38915 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
38916 #endif
38917 struct ncp_entry_info finfo;
38918
38919 + pax_track_stack();
38920 +
38921 memset(&data, 0, sizeof(data));
38922 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
38923 if (!server)
38924 diff -urNp linux-3.0.4/fs/nfs/inode.c linux-3.0.4/fs/nfs/inode.c
38925 --- linux-3.0.4/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38926 +++ linux-3.0.4/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38927 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
38928 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
38929 nfsi->attrtimeo_timestamp = jiffies;
38930
38931 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
38932 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
38933 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
38934 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
38935 else
38936 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
38937 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
38938 }
38939
38940 -static atomic_long_t nfs_attr_generation_counter;
38941 +static atomic_long_unchecked_t nfs_attr_generation_counter;
38942
38943 static unsigned long nfs_read_attr_generation_counter(void)
38944 {
38945 - return atomic_long_read(&nfs_attr_generation_counter);
38946 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
38947 }
38948
38949 unsigned long nfs_inc_attr_generation_counter(void)
38950 {
38951 - return atomic_long_inc_return(&nfs_attr_generation_counter);
38952 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
38953 }
38954
38955 void nfs_fattr_init(struct nfs_fattr *fattr)
38956 diff -urNp linux-3.0.4/fs/nfsd/nfs4state.c linux-3.0.4/fs/nfsd/nfs4state.c
38957 --- linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:44:40.000000000 -0400
38958 +++ linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
38959 @@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
38960 unsigned int strhashval;
38961 int err;
38962
38963 + pax_track_stack();
38964 +
38965 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
38966 (long long) lock->lk_offset,
38967 (long long) lock->lk_length);
38968 diff -urNp linux-3.0.4/fs/nfsd/nfs4xdr.c linux-3.0.4/fs/nfsd/nfs4xdr.c
38969 --- linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
38970 +++ linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
38971 @@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
38972 .dentry = dentry,
38973 };
38974
38975 + pax_track_stack();
38976 +
38977 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
38978 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
38979 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
38980 diff -urNp linux-3.0.4/fs/nfsd/vfs.c linux-3.0.4/fs/nfsd/vfs.c
38981 --- linux-3.0.4/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
38982 +++ linux-3.0.4/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
38983 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
38984 } else {
38985 oldfs = get_fs();
38986 set_fs(KERNEL_DS);
38987 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
38988 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
38989 set_fs(oldfs);
38990 }
38991
38992 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
38993
38994 /* Write the data. */
38995 oldfs = get_fs(); set_fs(KERNEL_DS);
38996 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
38997 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
38998 set_fs(oldfs);
38999 if (host_err < 0)
39000 goto out_nfserr;
39001 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
39002 */
39003
39004 oldfs = get_fs(); set_fs(KERNEL_DS);
39005 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
39006 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
39007 set_fs(oldfs);
39008
39009 if (host_err < 0)
39010 diff -urNp linux-3.0.4/fs/notify/fanotify/fanotify_user.c linux-3.0.4/fs/notify/fanotify/fanotify_user.c
39011 --- linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
39012 +++ linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
39013 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
39014 goto out_close_fd;
39015
39016 ret = -EFAULT;
39017 - if (copy_to_user(buf, &fanotify_event_metadata,
39018 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
39019 + copy_to_user(buf, &fanotify_event_metadata,
39020 fanotify_event_metadata.event_len))
39021 goto out_kill_access_response;
39022
39023 diff -urNp linux-3.0.4/fs/notify/notification.c linux-3.0.4/fs/notify/notification.c
39024 --- linux-3.0.4/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
39025 +++ linux-3.0.4/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
39026 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
39027 * get set to 0 so it will never get 'freed'
39028 */
39029 static struct fsnotify_event *q_overflow_event;
39030 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39031 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39032
39033 /**
39034 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
39035 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
39036 */
39037 u32 fsnotify_get_cookie(void)
39038 {
39039 - return atomic_inc_return(&fsnotify_sync_cookie);
39040 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
39041 }
39042 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39043
39044 diff -urNp linux-3.0.4/fs/ntfs/dir.c linux-3.0.4/fs/ntfs/dir.c
39045 --- linux-3.0.4/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
39046 +++ linux-3.0.4/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
39047 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
39048 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39049 ~(s64)(ndir->itype.index.block_size - 1)));
39050 /* Bounds checks. */
39051 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39052 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39053 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39054 "inode 0x%lx or driver bug.", vdir->i_ino);
39055 goto err_out;
39056 diff -urNp linux-3.0.4/fs/ntfs/file.c linux-3.0.4/fs/ntfs/file.c
39057 --- linux-3.0.4/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
39058 +++ linux-3.0.4/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
39059 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39060 #endif /* NTFS_RW */
39061 };
39062
39063 -const struct file_operations ntfs_empty_file_ops = {};
39064 +const struct file_operations ntfs_empty_file_ops __read_only;
39065
39066 -const struct inode_operations ntfs_empty_inode_ops = {};
39067 +const struct inode_operations ntfs_empty_inode_ops __read_only;
39068 diff -urNp linux-3.0.4/fs/ocfs2/localalloc.c linux-3.0.4/fs/ocfs2/localalloc.c
39069 --- linux-3.0.4/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
39070 +++ linux-3.0.4/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
39071 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39072 goto bail;
39073 }
39074
39075 - atomic_inc(&osb->alloc_stats.moves);
39076 + atomic_inc_unchecked(&osb->alloc_stats.moves);
39077
39078 bail:
39079 if (handle)
39080 diff -urNp linux-3.0.4/fs/ocfs2/namei.c linux-3.0.4/fs/ocfs2/namei.c
39081 --- linux-3.0.4/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
39082 +++ linux-3.0.4/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
39083 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39084 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39085 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39086
39087 + pax_track_stack();
39088 +
39089 /* At some point it might be nice to break this function up a
39090 * bit. */
39091
39092 diff -urNp linux-3.0.4/fs/ocfs2/ocfs2.h linux-3.0.4/fs/ocfs2/ocfs2.h
39093 --- linux-3.0.4/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
39094 +++ linux-3.0.4/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
39095 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
39096
39097 struct ocfs2_alloc_stats
39098 {
39099 - atomic_t moves;
39100 - atomic_t local_data;
39101 - atomic_t bitmap_data;
39102 - atomic_t bg_allocs;
39103 - atomic_t bg_extends;
39104 + atomic_unchecked_t moves;
39105 + atomic_unchecked_t local_data;
39106 + atomic_unchecked_t bitmap_data;
39107 + atomic_unchecked_t bg_allocs;
39108 + atomic_unchecked_t bg_extends;
39109 };
39110
39111 enum ocfs2_local_alloc_state
39112 diff -urNp linux-3.0.4/fs/ocfs2/suballoc.c linux-3.0.4/fs/ocfs2/suballoc.c
39113 --- linux-3.0.4/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
39114 +++ linux-3.0.4/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
39115 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39116 mlog_errno(status);
39117 goto bail;
39118 }
39119 - atomic_inc(&osb->alloc_stats.bg_extends);
39120 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39121
39122 /* You should never ask for this much metadata */
39123 BUG_ON(bits_wanted >
39124 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39125 mlog_errno(status);
39126 goto bail;
39127 }
39128 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39129 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39130
39131 *suballoc_loc = res.sr_bg_blkno;
39132 *suballoc_bit_start = res.sr_bit_offset;
39133 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39134 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39135 res->sr_bits);
39136
39137 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39138 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39139
39140 BUG_ON(res->sr_bits != 1);
39141
39142 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39143 mlog_errno(status);
39144 goto bail;
39145 }
39146 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39147 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39148
39149 BUG_ON(res.sr_bits != 1);
39150
39151 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39152 cluster_start,
39153 num_clusters);
39154 if (!status)
39155 - atomic_inc(&osb->alloc_stats.local_data);
39156 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
39157 } else {
39158 if (min_clusters > (osb->bitmap_cpg - 1)) {
39159 /* The only paths asking for contiguousness
39160 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39161 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39162 res.sr_bg_blkno,
39163 res.sr_bit_offset);
39164 - atomic_inc(&osb->alloc_stats.bitmap_data);
39165 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39166 *num_clusters = res.sr_bits;
39167 }
39168 }
39169 diff -urNp linux-3.0.4/fs/ocfs2/super.c linux-3.0.4/fs/ocfs2/super.c
39170 --- linux-3.0.4/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
39171 +++ linux-3.0.4/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
39172 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39173 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39174 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39175 "Stats",
39176 - atomic_read(&osb->alloc_stats.bitmap_data),
39177 - atomic_read(&osb->alloc_stats.local_data),
39178 - atomic_read(&osb->alloc_stats.bg_allocs),
39179 - atomic_read(&osb->alloc_stats.moves),
39180 - atomic_read(&osb->alloc_stats.bg_extends));
39181 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39182 + atomic_read_unchecked(&osb->alloc_stats.local_data),
39183 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39184 + atomic_read_unchecked(&osb->alloc_stats.moves),
39185 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39186
39187 out += snprintf(buf + out, len - out,
39188 "%10s => State: %u Descriptor: %llu Size: %u bits "
39189 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
39190 spin_lock_init(&osb->osb_xattr_lock);
39191 ocfs2_init_steal_slots(osb);
39192
39193 - atomic_set(&osb->alloc_stats.moves, 0);
39194 - atomic_set(&osb->alloc_stats.local_data, 0);
39195 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
39196 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
39197 - atomic_set(&osb->alloc_stats.bg_extends, 0);
39198 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39199 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39200 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39201 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39202 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39203
39204 /* Copy the blockcheck stats from the superblock probe */
39205 osb->osb_ecc_stats = *stats;
39206 diff -urNp linux-3.0.4/fs/ocfs2/symlink.c linux-3.0.4/fs/ocfs2/symlink.c
39207 --- linux-3.0.4/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
39208 +++ linux-3.0.4/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
39209 @@ -142,7 +142,7 @@ bail:
39210
39211 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39212 {
39213 - char *link = nd_get_link(nd);
39214 + const char *link = nd_get_link(nd);
39215 if (!IS_ERR(link))
39216 kfree(link);
39217 }
39218 diff -urNp linux-3.0.4/fs/open.c linux-3.0.4/fs/open.c
39219 --- linux-3.0.4/fs/open.c 2011-07-21 22:17:23.000000000 -0400
39220 +++ linux-3.0.4/fs/open.c 2011-08-23 21:48:14.000000000 -0400
39221 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39222 error = locks_verify_truncate(inode, NULL, length);
39223 if (!error)
39224 error = security_path_truncate(&path);
39225 +
39226 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39227 + error = -EACCES;
39228 +
39229 if (!error)
39230 error = do_truncate(path.dentry, length, 0, NULL);
39231
39232 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39233 if (__mnt_is_readonly(path.mnt))
39234 res = -EROFS;
39235
39236 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39237 + res = -EACCES;
39238 +
39239 out_path_release:
39240 path_put(&path);
39241 out:
39242 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39243 if (error)
39244 goto dput_and_out;
39245
39246 + gr_log_chdir(path.dentry, path.mnt);
39247 +
39248 set_fs_pwd(current->fs, &path);
39249
39250 dput_and_out:
39251 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39252 goto out_putf;
39253
39254 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39255 +
39256 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39257 + error = -EPERM;
39258 +
39259 + if (!error)
39260 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39261 +
39262 if (!error)
39263 set_fs_pwd(current->fs, &file->f_path);
39264 out_putf:
39265 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39266 if (error)
39267 goto dput_and_out;
39268
39269 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39270 + goto dput_and_out;
39271 +
39272 + if (gr_handle_chroot_caps(&path)) {
39273 + error = -ENOMEM;
39274 + goto dput_and_out;
39275 + }
39276 +
39277 set_fs_root(current->fs, &path);
39278 +
39279 + gr_handle_chroot_chdir(&path);
39280 +
39281 error = 0;
39282 dput_and_out:
39283 path_put(&path);
39284 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39285 err = mnt_want_write_file(file);
39286 if (err)
39287 goto out_putf;
39288 +
39289 mutex_lock(&inode->i_mutex);
39290 +
39291 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39292 + err = -EACCES;
39293 + goto out_unlock;
39294 + }
39295 +
39296 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39297 if (err)
39298 goto out_unlock;
39299 if (mode == (mode_t) -1)
39300 mode = inode->i_mode;
39301 +
39302 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39303 + err = -EACCES;
39304 + goto out_unlock;
39305 + }
39306 +
39307 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39308 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39309 err = notify_change(dentry, &newattrs);
39310 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39311 error = mnt_want_write(path.mnt);
39312 if (error)
39313 goto dput_and_out;
39314 +
39315 mutex_lock(&inode->i_mutex);
39316 +
39317 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39318 + error = -EACCES;
39319 + goto out_unlock;
39320 + }
39321 +
39322 error = security_path_chmod(path.dentry, path.mnt, mode);
39323 if (error)
39324 goto out_unlock;
39325 if (mode == (mode_t) -1)
39326 mode = inode->i_mode;
39327 +
39328 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39329 + error = -EACCES;
39330 + goto out_unlock;
39331 + }
39332 +
39333 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39334 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39335 error = notify_change(path.dentry, &newattrs);
39336 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39337 int error;
39338 struct iattr newattrs;
39339
39340 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
39341 + return -EACCES;
39342 +
39343 newattrs.ia_valid = ATTR_CTIME;
39344 if (user != (uid_t) -1) {
39345 newattrs.ia_valid |= ATTR_UID;
39346 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39347 if (!IS_ERR(tmp)) {
39348 fd = get_unused_fd_flags(flags);
39349 if (fd >= 0) {
39350 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39351 + struct file *f;
39352 + /* don't allow to be set by userland */
39353 + flags &= ~FMODE_GREXEC;
39354 + f = do_filp_open(dfd, tmp, &op, lookup);
39355 if (IS_ERR(f)) {
39356 put_unused_fd(fd);
39357 fd = PTR_ERR(f);
39358 diff -urNp linux-3.0.4/fs/partitions/ldm.c linux-3.0.4/fs/partitions/ldm.c
39359 --- linux-3.0.4/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
39360 +++ linux-3.0.4/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
39361 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39362 ldm_error ("A VBLK claims to have %d parts.", num);
39363 return false;
39364 }
39365 +
39366 if (rec >= num) {
39367 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39368 return false;
39369 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39370 goto found;
39371 }
39372
39373 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39374 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39375 if (!f) {
39376 ldm_crit ("Out of memory.");
39377 return false;
39378 diff -urNp linux-3.0.4/fs/pipe.c linux-3.0.4/fs/pipe.c
39379 --- linux-3.0.4/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
39380 +++ linux-3.0.4/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
39381 @@ -420,9 +420,9 @@ redo:
39382 }
39383 if (bufs) /* More to do? */
39384 continue;
39385 - if (!pipe->writers)
39386 + if (!atomic_read(&pipe->writers))
39387 break;
39388 - if (!pipe->waiting_writers) {
39389 + if (!atomic_read(&pipe->waiting_writers)) {
39390 /* syscall merging: Usually we must not sleep
39391 * if O_NONBLOCK is set, or if we got some data.
39392 * But if a writer sleeps in kernel space, then
39393 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39394 mutex_lock(&inode->i_mutex);
39395 pipe = inode->i_pipe;
39396
39397 - if (!pipe->readers) {
39398 + if (!atomic_read(&pipe->readers)) {
39399 send_sig(SIGPIPE, current, 0);
39400 ret = -EPIPE;
39401 goto out;
39402 @@ -530,7 +530,7 @@ redo1:
39403 for (;;) {
39404 int bufs;
39405
39406 - if (!pipe->readers) {
39407 + if (!atomic_read(&pipe->readers)) {
39408 send_sig(SIGPIPE, current, 0);
39409 if (!ret)
39410 ret = -EPIPE;
39411 @@ -616,9 +616,9 @@ redo2:
39412 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39413 do_wakeup = 0;
39414 }
39415 - pipe->waiting_writers++;
39416 + atomic_inc(&pipe->waiting_writers);
39417 pipe_wait(pipe);
39418 - pipe->waiting_writers--;
39419 + atomic_dec(&pipe->waiting_writers);
39420 }
39421 out:
39422 mutex_unlock(&inode->i_mutex);
39423 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39424 mask = 0;
39425 if (filp->f_mode & FMODE_READ) {
39426 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39427 - if (!pipe->writers && filp->f_version != pipe->w_counter)
39428 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39429 mask |= POLLHUP;
39430 }
39431
39432 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39433 * Most Unices do not set POLLERR for FIFOs but on Linux they
39434 * behave exactly like pipes for poll().
39435 */
39436 - if (!pipe->readers)
39437 + if (!atomic_read(&pipe->readers))
39438 mask |= POLLERR;
39439 }
39440
39441 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39442
39443 mutex_lock(&inode->i_mutex);
39444 pipe = inode->i_pipe;
39445 - pipe->readers -= decr;
39446 - pipe->writers -= decw;
39447 + atomic_sub(decr, &pipe->readers);
39448 + atomic_sub(decw, &pipe->writers);
39449
39450 - if (!pipe->readers && !pipe->writers) {
39451 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39452 free_pipe_info(inode);
39453 } else {
39454 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39455 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39456
39457 if (inode->i_pipe) {
39458 ret = 0;
39459 - inode->i_pipe->readers++;
39460 + atomic_inc(&inode->i_pipe->readers);
39461 }
39462
39463 mutex_unlock(&inode->i_mutex);
39464 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39465
39466 if (inode->i_pipe) {
39467 ret = 0;
39468 - inode->i_pipe->writers++;
39469 + atomic_inc(&inode->i_pipe->writers);
39470 }
39471
39472 mutex_unlock(&inode->i_mutex);
39473 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39474 if (inode->i_pipe) {
39475 ret = 0;
39476 if (filp->f_mode & FMODE_READ)
39477 - inode->i_pipe->readers++;
39478 + atomic_inc(&inode->i_pipe->readers);
39479 if (filp->f_mode & FMODE_WRITE)
39480 - inode->i_pipe->writers++;
39481 + atomic_inc(&inode->i_pipe->writers);
39482 }
39483
39484 mutex_unlock(&inode->i_mutex);
39485 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39486 inode->i_pipe = NULL;
39487 }
39488
39489 -static struct vfsmount *pipe_mnt __read_mostly;
39490 +struct vfsmount *pipe_mnt __read_mostly;
39491
39492 /*
39493 * pipefs_dname() is called from d_path().
39494 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39495 goto fail_iput;
39496 inode->i_pipe = pipe;
39497
39498 - pipe->readers = pipe->writers = 1;
39499 + atomic_set(&pipe->readers, 1);
39500 + atomic_set(&pipe->writers, 1);
39501 inode->i_fop = &rdwr_pipefifo_fops;
39502
39503 /*
39504 diff -urNp linux-3.0.4/fs/proc/array.c linux-3.0.4/fs/proc/array.c
39505 --- linux-3.0.4/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
39506 +++ linux-3.0.4/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
39507 @@ -60,6 +60,7 @@
39508 #include <linux/tty.h>
39509 #include <linux/string.h>
39510 #include <linux/mman.h>
39511 +#include <linux/grsecurity.h>
39512 #include <linux/proc_fs.h>
39513 #include <linux/ioport.h>
39514 #include <linux/uaccess.h>
39515 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39516 seq_putc(m, '\n');
39517 }
39518
39519 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39520 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
39521 +{
39522 + if (p->mm)
39523 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39524 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39525 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39526 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39527 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39528 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39529 + else
39530 + seq_printf(m, "PaX:\t-----\n");
39531 +}
39532 +#endif
39533 +
39534 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39535 struct pid *pid, struct task_struct *task)
39536 {
39537 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39538 task_cpus_allowed(m, task);
39539 cpuset_task_status_allowed(m, task);
39540 task_context_switch_counts(m, task);
39541 +
39542 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39543 + task_pax(m, task);
39544 +#endif
39545 +
39546 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39547 + task_grsec_rbac(m, task);
39548 +#endif
39549 +
39550 return 0;
39551 }
39552
39553 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39554 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39555 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39556 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39557 +#endif
39558 +
39559 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39560 struct pid *pid, struct task_struct *task, int whole)
39561 {
39562 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39563 cputime_t cutime, cstime, utime, stime;
39564 cputime_t cgtime, gtime;
39565 unsigned long rsslim = 0;
39566 - char tcomm[sizeof(task->comm)];
39567 + char tcomm[sizeof(task->comm)] = { 0 };
39568 unsigned long flags;
39569
39570 + pax_track_stack();
39571 +
39572 state = *get_task_state(task);
39573 vsize = eip = esp = 0;
39574 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39575 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39576 gtime = task->gtime;
39577 }
39578
39579 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39580 + if (PAX_RAND_FLAGS(mm)) {
39581 + eip = 0;
39582 + esp = 0;
39583 + wchan = 0;
39584 + }
39585 +#endif
39586 +#ifdef CONFIG_GRKERNSEC_HIDESYM
39587 + wchan = 0;
39588 + eip =0;
39589 + esp =0;
39590 +#endif
39591 +
39592 /* scale priority and nice values from timeslices to -20..20 */
39593 /* to make it look like a "normal" Unix priority/nice value */
39594 priority = task_prio(task);
39595 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39596 vsize,
39597 mm ? get_mm_rss(mm) : 0,
39598 rsslim,
39599 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39600 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39601 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39602 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39603 +#else
39604 mm ? (permitted ? mm->start_code : 1) : 0,
39605 mm ? (permitted ? mm->end_code : 1) : 0,
39606 (permitted && mm) ? mm->start_stack : 0,
39607 +#endif
39608 esp,
39609 eip,
39610 /* The signal information here is obsolete.
39611 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39612
39613 return 0;
39614 }
39615 +
39616 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39617 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39618 +{
39619 + u32 curr_ip = 0;
39620 + unsigned long flags;
39621 +
39622 + if (lock_task_sighand(task, &flags)) {
39623 + curr_ip = task->signal->curr_ip;
39624 + unlock_task_sighand(task, &flags);
39625 + }
39626 +
39627 + return sprintf(buffer, "%pI4\n", &curr_ip);
39628 +}
39629 +#endif
39630 diff -urNp linux-3.0.4/fs/proc/base.c linux-3.0.4/fs/proc/base.c
39631 --- linux-3.0.4/fs/proc/base.c 2011-08-23 21:44:40.000000000 -0400
39632 +++ linux-3.0.4/fs/proc/base.c 2011-08-23 21:48:14.000000000 -0400
39633 @@ -107,6 +107,22 @@ struct pid_entry {
39634 union proc_op op;
39635 };
39636
39637 +struct getdents_callback {
39638 + struct linux_dirent __user * current_dir;
39639 + struct linux_dirent __user * previous;
39640 + struct file * file;
39641 + int count;
39642 + int error;
39643 +};
39644 +
39645 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39646 + loff_t offset, u64 ino, unsigned int d_type)
39647 +{
39648 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
39649 + buf->error = -EINVAL;
39650 + return 0;
39651 +}
39652 +
39653 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39654 .name = (NAME), \
39655 .len = sizeof(NAME) - 1, \
39656 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
39657 if (task == current)
39658 return mm;
39659
39660 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39661 + return ERR_PTR(-EPERM);
39662 +
39663 /*
39664 * If current is actively ptrace'ing, and would also be
39665 * permitted to freshly attach with ptrace now, permit it.
39666 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
39667 if (!mm->arg_end)
39668 goto out_mm; /* Shh! No looking before we're done */
39669
39670 + if (gr_acl_handle_procpidmem(task))
39671 + goto out_mm;
39672 +
39673 len = mm->arg_end - mm->arg_start;
39674
39675 if (len > PAGE_SIZE)
39676 @@ -309,12 +331,28 @@ out:
39677 return res;
39678 }
39679
39680 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39681 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39682 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39683 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39684 +#endif
39685 +
39686 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39687 {
39688 struct mm_struct *mm = mm_for_maps(task);
39689 int res = PTR_ERR(mm);
39690 if (mm && !IS_ERR(mm)) {
39691 unsigned int nwords = 0;
39692 +
39693 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39694 + /* allow if we're currently ptracing this task */
39695 + if (PAX_RAND_FLAGS(mm) &&
39696 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39697 + mmput(mm);
39698 + return res;
39699 + }
39700 +#endif
39701 +
39702 do {
39703 nwords += 2;
39704 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39705 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
39706 }
39707
39708
39709 -#ifdef CONFIG_KALLSYMS
39710 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39711 /*
39712 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39713 * Returns the resolved symbol. If that fails, simply return the address.
39714 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
39715 mutex_unlock(&task->signal->cred_guard_mutex);
39716 }
39717
39718 -#ifdef CONFIG_STACKTRACE
39719 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39720
39721 #define MAX_STACK_TRACE_DEPTH 64
39722
39723 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
39724 return count;
39725 }
39726
39727 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39728 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39729 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39730 {
39731 long nr;
39732 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
39733 /************************************************************************/
39734
39735 /* permission checks */
39736 -static int proc_fd_access_allowed(struct inode *inode)
39737 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39738 {
39739 struct task_struct *task;
39740 int allowed = 0;
39741 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
39742 */
39743 task = get_proc_task(inode);
39744 if (task) {
39745 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39746 + if (log)
39747 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39748 + else
39749 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39750 put_task_struct(task);
39751 }
39752 return allowed;
39753 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
39754 if (!task)
39755 goto out_no_task;
39756
39757 + if (gr_acl_handle_procpidmem(task))
39758 + goto out;
39759 +
39760 ret = -ENOMEM;
39761 page = (char *)__get_free_page(GFP_TEMPORARY);
39762 if (!page)
39763 @@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
39764 path_put(&nd->path);
39765
39766 /* Are we allowed to snoop on the tasks file descriptors? */
39767 - if (!proc_fd_access_allowed(inode))
39768 + if (!proc_fd_access_allowed(inode,0))
39769 goto out;
39770
39771 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39772 @@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
39773 struct path path;
39774
39775 /* Are we allowed to snoop on the tasks file descriptors? */
39776 - if (!proc_fd_access_allowed(inode))
39777 - goto out;
39778 + /* logging this is needed for learning on chromium to work properly,
39779 + but we don't want to flood the logs from 'ps' which does a readlink
39780 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39781 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
39782 + */
39783 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39784 + if (!proc_fd_access_allowed(inode,0))
39785 + goto out;
39786 + } else {
39787 + if (!proc_fd_access_allowed(inode,1))
39788 + goto out;
39789 + }
39790
39791 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39792 if (error)
39793 @@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
39794 rcu_read_lock();
39795 cred = __task_cred(task);
39796 inode->i_uid = cred->euid;
39797 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39798 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39799 +#else
39800 inode->i_gid = cred->egid;
39801 +#endif
39802 rcu_read_unlock();
39803 }
39804 security_task_to_inode(task, inode);
39805 @@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
39806 struct inode *inode = dentry->d_inode;
39807 struct task_struct *task;
39808 const struct cred *cred;
39809 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39810 + const struct cred *tmpcred = current_cred();
39811 +#endif
39812
39813 generic_fillattr(inode, stat);
39814
39815 @@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
39816 stat->uid = 0;
39817 stat->gid = 0;
39818 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39819 +
39820 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39821 + rcu_read_unlock();
39822 + return -ENOENT;
39823 + }
39824 +
39825 if (task) {
39826 + cred = __task_cred(task);
39827 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39828 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39829 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39830 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39831 +#endif
39832 + ) {
39833 +#endif
39834 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39835 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39836 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39837 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39838 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39839 +#endif
39840 task_dumpable(task)) {
39841 - cred = __task_cred(task);
39842 stat->uid = cred->euid;
39843 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39844 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39845 +#else
39846 stat->gid = cred->egid;
39847 +#endif
39848 }
39849 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39850 + } else {
39851 + rcu_read_unlock();
39852 + return -ENOENT;
39853 + }
39854 +#endif
39855 }
39856 rcu_read_unlock();
39857 return 0;
39858 @@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
39859
39860 if (task) {
39861 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39862 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39863 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39864 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39865 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39866 +#endif
39867 task_dumpable(task)) {
39868 rcu_read_lock();
39869 cred = __task_cred(task);
39870 inode->i_uid = cred->euid;
39871 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39872 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39873 +#else
39874 inode->i_gid = cred->egid;
39875 +#endif
39876 rcu_read_unlock();
39877 } else {
39878 inode->i_uid = 0;
39879 @@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
39880 int fd = proc_fd(inode);
39881
39882 if (task) {
39883 - files = get_files_struct(task);
39884 + if (!gr_acl_handle_procpidmem(task))
39885 + files = get_files_struct(task);
39886 put_task_struct(task);
39887 }
39888 if (files) {
39889 @@ -2169,11 +2268,21 @@ static const struct file_operations proc
39890 */
39891 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
39892 {
39893 + struct task_struct *task;
39894 int rv = generic_permission(inode, mask, flags, NULL);
39895 - if (rv == 0)
39896 - return 0;
39897 +
39898 if (task_pid(current) == proc_pid(inode))
39899 rv = 0;
39900 +
39901 + task = get_proc_task(inode);
39902 + if (task == NULL)
39903 + return rv;
39904 +
39905 + if (gr_acl_handle_procpidmem(task))
39906 + rv = -EACCES;
39907 +
39908 + put_task_struct(task);
39909 +
39910 return rv;
39911 }
39912
39913 @@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
39914 if (!task)
39915 goto out_no_task;
39916
39917 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39918 + goto out;
39919 +
39920 /*
39921 * Yes, it does not scale. And it should not. Don't add
39922 * new entries into /proc/<tgid>/ without very good reasons.
39923 @@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
39924 if (!task)
39925 goto out_no_task;
39926
39927 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39928 + goto out;
39929 +
39930 ret = 0;
39931 i = filp->f_pos;
39932 switch (i) {
39933 @@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
39934 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
39935 void *cookie)
39936 {
39937 - char *s = nd_get_link(nd);
39938 + const char *s = nd_get_link(nd);
39939 if (!IS_ERR(s))
39940 __putname(s);
39941 }
39942 @@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
39943 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
39944 #endif
39945 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
39946 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39947 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39948 INF("syscall", S_IRUGO, proc_pid_syscall),
39949 #endif
39950 INF("cmdline", S_IRUGO, proc_pid_cmdline),
39951 @@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
39952 #ifdef CONFIG_SECURITY
39953 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
39954 #endif
39955 -#ifdef CONFIG_KALLSYMS
39956 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39957 INF("wchan", S_IRUGO, proc_pid_wchan),
39958 #endif
39959 -#ifdef CONFIG_STACKTRACE
39960 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39961 ONE("stack", S_IRUGO, proc_pid_stack),
39962 #endif
39963 #ifdef CONFIG_SCHEDSTATS
39964 @@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
39965 #ifdef CONFIG_HARDWALL
39966 INF("hardwall", S_IRUGO, proc_pid_hardwall),
39967 #endif
39968 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39969 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
39970 +#endif
39971 };
39972
39973 static int proc_tgid_base_readdir(struct file * filp,
39974 @@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
39975 if (!inode)
39976 goto out;
39977
39978 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39979 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
39980 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39981 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39982 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
39983 +#else
39984 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
39985 +#endif
39986 inode->i_op = &proc_tgid_base_inode_operations;
39987 inode->i_fop = &proc_tgid_base_operations;
39988 inode->i_flags|=S_IMMUTABLE;
39989 @@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
39990 if (!task)
39991 goto out;
39992
39993 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39994 + goto out_put_task;
39995 +
39996 result = proc_pid_instantiate(dir, dentry, task, NULL);
39997 +out_put_task:
39998 put_task_struct(task);
39999 out:
40000 return result;
40001 @@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
40002 {
40003 unsigned int nr;
40004 struct task_struct *reaper;
40005 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40006 + const struct cred *tmpcred = current_cred();
40007 + const struct cred *itercred;
40008 +#endif
40009 + filldir_t __filldir = filldir;
40010 struct tgid_iter iter;
40011 struct pid_namespace *ns;
40012
40013 @@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
40014 for (iter = next_tgid(ns, iter);
40015 iter.task;
40016 iter.tgid += 1, iter = next_tgid(ns, iter)) {
40017 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40018 + rcu_read_lock();
40019 + itercred = __task_cred(iter.task);
40020 +#endif
40021 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
40022 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40023 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
40024 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40025 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
40026 +#endif
40027 + )
40028 +#endif
40029 + )
40030 + __filldir = &gr_fake_filldir;
40031 + else
40032 + __filldir = filldir;
40033 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40034 + rcu_read_unlock();
40035 +#endif
40036 filp->f_pos = iter.tgid + TGID_OFFSET;
40037 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40038 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40039 put_task_struct(iter.task);
40040 goto out;
40041 }
40042 @@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
40043 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40044 #endif
40045 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40046 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40047 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40048 INF("syscall", S_IRUGO, proc_pid_syscall),
40049 #endif
40050 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40051 @@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
40052 #ifdef CONFIG_SECURITY
40053 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40054 #endif
40055 -#ifdef CONFIG_KALLSYMS
40056 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40057 INF("wchan", S_IRUGO, proc_pid_wchan),
40058 #endif
40059 -#ifdef CONFIG_STACKTRACE
40060 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40061 ONE("stack", S_IRUGO, proc_pid_stack),
40062 #endif
40063 #ifdef CONFIG_SCHEDSTATS
40064 diff -urNp linux-3.0.4/fs/proc/cmdline.c linux-3.0.4/fs/proc/cmdline.c
40065 --- linux-3.0.4/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
40066 +++ linux-3.0.4/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
40067 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
40068
40069 static int __init proc_cmdline_init(void)
40070 {
40071 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40072 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40073 +#else
40074 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40075 +#endif
40076 return 0;
40077 }
40078 module_init(proc_cmdline_init);
40079 diff -urNp linux-3.0.4/fs/proc/devices.c linux-3.0.4/fs/proc/devices.c
40080 --- linux-3.0.4/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
40081 +++ linux-3.0.4/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
40082 @@ -64,7 +64,11 @@ static const struct file_operations proc
40083
40084 static int __init proc_devices_init(void)
40085 {
40086 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40087 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40088 +#else
40089 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40090 +#endif
40091 return 0;
40092 }
40093 module_init(proc_devices_init);
40094 diff -urNp linux-3.0.4/fs/proc/inode.c linux-3.0.4/fs/proc/inode.c
40095 --- linux-3.0.4/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
40096 +++ linux-3.0.4/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
40097 @@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
40098 if (de->mode) {
40099 inode->i_mode = de->mode;
40100 inode->i_uid = de->uid;
40101 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40102 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40103 +#else
40104 inode->i_gid = de->gid;
40105 +#endif
40106 }
40107 if (de->size)
40108 inode->i_size = de->size;
40109 diff -urNp linux-3.0.4/fs/proc/internal.h linux-3.0.4/fs/proc/internal.h
40110 --- linux-3.0.4/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
40111 +++ linux-3.0.4/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
40112 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40113 struct pid *pid, struct task_struct *task);
40114 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40115 struct pid *pid, struct task_struct *task);
40116 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40117 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40118 +#endif
40119 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40120
40121 extern const struct file_operations proc_maps_operations;
40122 diff -urNp linux-3.0.4/fs/proc/Kconfig linux-3.0.4/fs/proc/Kconfig
40123 --- linux-3.0.4/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
40124 +++ linux-3.0.4/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
40125 @@ -30,12 +30,12 @@ config PROC_FS
40126
40127 config PROC_KCORE
40128 bool "/proc/kcore support" if !ARM
40129 - depends on PROC_FS && MMU
40130 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40131
40132 config PROC_VMCORE
40133 bool "/proc/vmcore support"
40134 - depends on PROC_FS && CRASH_DUMP
40135 - default y
40136 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40137 + default n
40138 help
40139 Exports the dump image of crashed kernel in ELF format.
40140
40141 @@ -59,8 +59,8 @@ config PROC_SYSCTL
40142 limited in memory.
40143
40144 config PROC_PAGE_MONITOR
40145 - default y
40146 - depends on PROC_FS && MMU
40147 + default n
40148 + depends on PROC_FS && MMU && !GRKERNSEC
40149 bool "Enable /proc page monitoring" if EXPERT
40150 help
40151 Various /proc files exist to monitor process memory utilization:
40152 diff -urNp linux-3.0.4/fs/proc/kcore.c linux-3.0.4/fs/proc/kcore.c
40153 --- linux-3.0.4/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
40154 +++ linux-3.0.4/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
40155 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40156 off_t offset = 0;
40157 struct kcore_list *m;
40158
40159 + pax_track_stack();
40160 +
40161 /* setup ELF header */
40162 elf = (struct elfhdr *) bufp;
40163 bufp += sizeof(struct elfhdr);
40164 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40165 * the addresses in the elf_phdr on our list.
40166 */
40167 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40168 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40169 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40170 + if (tsz > buflen)
40171 tsz = buflen;
40172 -
40173 +
40174 while (buflen) {
40175 struct kcore_list *m;
40176
40177 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40178 kfree(elf_buf);
40179 } else {
40180 if (kern_addr_valid(start)) {
40181 - unsigned long n;
40182 + char *elf_buf;
40183 + mm_segment_t oldfs;
40184
40185 - n = copy_to_user(buffer, (char *)start, tsz);
40186 - /*
40187 - * We cannot distingush between fault on source
40188 - * and fault on destination. When this happens
40189 - * we clear too and hope it will trigger the
40190 - * EFAULT again.
40191 - */
40192 - if (n) {
40193 - if (clear_user(buffer + tsz - n,
40194 - n))
40195 + elf_buf = kmalloc(tsz, GFP_KERNEL);
40196 + if (!elf_buf)
40197 + return -ENOMEM;
40198 + oldfs = get_fs();
40199 + set_fs(KERNEL_DS);
40200 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40201 + set_fs(oldfs);
40202 + if (copy_to_user(buffer, elf_buf, tsz)) {
40203 + kfree(elf_buf);
40204 return -EFAULT;
40205 + }
40206 }
40207 + set_fs(oldfs);
40208 + kfree(elf_buf);
40209 } else {
40210 if (clear_user(buffer, tsz))
40211 return -EFAULT;
40212 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40213
40214 static int open_kcore(struct inode *inode, struct file *filp)
40215 {
40216 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40217 + return -EPERM;
40218 +#endif
40219 if (!capable(CAP_SYS_RAWIO))
40220 return -EPERM;
40221 if (kcore_need_update)
40222 diff -urNp linux-3.0.4/fs/proc/meminfo.c linux-3.0.4/fs/proc/meminfo.c
40223 --- linux-3.0.4/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
40224 +++ linux-3.0.4/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
40225 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40226 unsigned long pages[NR_LRU_LISTS];
40227 int lru;
40228
40229 + pax_track_stack();
40230 +
40231 /*
40232 * display in kilobytes.
40233 */
40234 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40235 vmi.used >> 10,
40236 vmi.largest_chunk >> 10
40237 #ifdef CONFIG_MEMORY_FAILURE
40238 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40239 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40240 #endif
40241 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40242 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40243 diff -urNp linux-3.0.4/fs/proc/nommu.c linux-3.0.4/fs/proc/nommu.c
40244 --- linux-3.0.4/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
40245 +++ linux-3.0.4/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
40246 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40247 if (len < 1)
40248 len = 1;
40249 seq_printf(m, "%*c", len, ' ');
40250 - seq_path(m, &file->f_path, "");
40251 + seq_path(m, &file->f_path, "\n\\");
40252 }
40253
40254 seq_putc(m, '\n');
40255 diff -urNp linux-3.0.4/fs/proc/proc_net.c linux-3.0.4/fs/proc/proc_net.c
40256 --- linux-3.0.4/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
40257 +++ linux-3.0.4/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
40258 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40259 struct task_struct *task;
40260 struct nsproxy *ns;
40261 struct net *net = NULL;
40262 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40263 + const struct cred *cred = current_cred();
40264 +#endif
40265 +
40266 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40267 + if (cred->fsuid)
40268 + return net;
40269 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40270 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40271 + return net;
40272 +#endif
40273
40274 rcu_read_lock();
40275 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40276 diff -urNp linux-3.0.4/fs/proc/proc_sysctl.c linux-3.0.4/fs/proc/proc_sysctl.c
40277 --- linux-3.0.4/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
40278 +++ linux-3.0.4/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
40279 @@ -8,6 +8,8 @@
40280 #include <linux/namei.h>
40281 #include "internal.h"
40282
40283 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40284 +
40285 static const struct dentry_operations proc_sys_dentry_operations;
40286 static const struct file_operations proc_sys_file_operations;
40287 static const struct inode_operations proc_sys_inode_operations;
40288 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40289 if (!p)
40290 goto out;
40291
40292 + if (gr_handle_sysctl(p, MAY_EXEC))
40293 + goto out;
40294 +
40295 err = ERR_PTR(-ENOMEM);
40296 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40297 if (h)
40298 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40299 if (*pos < file->f_pos)
40300 continue;
40301
40302 + if (gr_handle_sysctl(table, 0))
40303 + continue;
40304 +
40305 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40306 if (res)
40307 return res;
40308 @@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
40309 if (IS_ERR(head))
40310 return PTR_ERR(head);
40311
40312 + if (table && gr_handle_sysctl(table, MAY_EXEC))
40313 + return -ENOENT;
40314 +
40315 generic_fillattr(inode, stat);
40316 if (table)
40317 stat->mode = (stat->mode & S_IFMT) | table->mode;
40318 diff -urNp linux-3.0.4/fs/proc/root.c linux-3.0.4/fs/proc/root.c
40319 --- linux-3.0.4/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
40320 +++ linux-3.0.4/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
40321 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
40322 #ifdef CONFIG_PROC_DEVICETREE
40323 proc_device_tree_init();
40324 #endif
40325 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40326 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40327 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40328 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40329 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40330 +#endif
40331 +#else
40332 proc_mkdir("bus", NULL);
40333 +#endif
40334 proc_sys_init();
40335 }
40336
40337 diff -urNp linux-3.0.4/fs/proc/task_mmu.c linux-3.0.4/fs/proc/task_mmu.c
40338 --- linux-3.0.4/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
40339 +++ linux-3.0.4/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
40340 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40341 "VmExe:\t%8lu kB\n"
40342 "VmLib:\t%8lu kB\n"
40343 "VmPTE:\t%8lu kB\n"
40344 - "VmSwap:\t%8lu kB\n",
40345 - hiwater_vm << (PAGE_SHIFT-10),
40346 + "VmSwap:\t%8lu kB\n"
40347 +
40348 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40349 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40350 +#endif
40351 +
40352 + ,hiwater_vm << (PAGE_SHIFT-10),
40353 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40354 mm->locked_vm << (PAGE_SHIFT-10),
40355 hiwater_rss << (PAGE_SHIFT-10),
40356 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40357 data << (PAGE_SHIFT-10),
40358 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40359 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40360 - swap << (PAGE_SHIFT-10));
40361 + swap << (PAGE_SHIFT-10)
40362 +
40363 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40364 + , mm->context.user_cs_base, mm->context.user_cs_limit
40365 +#endif
40366 +
40367 + );
40368 }
40369
40370 unsigned long task_vsize(struct mm_struct *mm)
40371 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40372 return ret;
40373 }
40374
40375 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40376 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40377 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
40378 + _mm->pax_flags & MF_PAX_SEGMEXEC))
40379 +#endif
40380 +
40381 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40382 {
40383 struct mm_struct *mm = vma->vm_mm;
40384 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40385 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40386 }
40387
40388 - /* We don't show the stack guard page in /proc/maps */
40389 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40390 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40391 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40392 +#else
40393 start = vma->vm_start;
40394 - if (stack_guard_page_start(vma, start))
40395 - start += PAGE_SIZE;
40396 end = vma->vm_end;
40397 - if (stack_guard_page_end(vma, end))
40398 - end -= PAGE_SIZE;
40399 +#endif
40400
40401 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40402 start,
40403 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40404 flags & VM_WRITE ? 'w' : '-',
40405 flags & VM_EXEC ? 'x' : '-',
40406 flags & VM_MAYSHARE ? 's' : 'p',
40407 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40408 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40409 +#else
40410 pgoff,
40411 +#endif
40412 MAJOR(dev), MINOR(dev), ino, &len);
40413
40414 /*
40415 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40416 */
40417 if (file) {
40418 pad_len_spaces(m, len);
40419 - seq_path(m, &file->f_path, "\n");
40420 + seq_path(m, &file->f_path, "\n\\");
40421 } else {
40422 const char *name = arch_vma_name(vma);
40423 if (!name) {
40424 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40425 if (vma->vm_start <= mm->brk &&
40426 vma->vm_end >= mm->start_brk) {
40427 name = "[heap]";
40428 - } else if (vma->vm_start <= mm->start_stack &&
40429 - vma->vm_end >= mm->start_stack) {
40430 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40431 + (vma->vm_start <= mm->start_stack &&
40432 + vma->vm_end >= mm->start_stack)) {
40433 name = "[stack]";
40434 }
40435 } else {
40436 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40437 };
40438
40439 memset(&mss, 0, sizeof mss);
40440 - mss.vma = vma;
40441 - /* mmap_sem is held in m_start */
40442 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40443 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40444 -
40445 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40446 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40447 +#endif
40448 + mss.vma = vma;
40449 + /* mmap_sem is held in m_start */
40450 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40451 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40452 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40453 + }
40454 +#endif
40455 show_map_vma(m, vma);
40456
40457 seq_printf(m,
40458 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40459 "KernelPageSize: %8lu kB\n"
40460 "MMUPageSize: %8lu kB\n"
40461 "Locked: %8lu kB\n",
40462 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40463 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40464 +#else
40465 (vma->vm_end - vma->vm_start) >> 10,
40466 +#endif
40467 mss.resident >> 10,
40468 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40469 mss.shared_clean >> 10,
40470 @@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
40471
40472 if (file) {
40473 seq_printf(m, " file=");
40474 - seq_path(m, &file->f_path, "\n\t= ");
40475 + seq_path(m, &file->f_path, "\n\t\\= ");
40476 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
40477 seq_printf(m, " heap");
40478 } else if (vma->vm_start <= mm->start_stack &&
40479 diff -urNp linux-3.0.4/fs/proc/task_nommu.c linux-3.0.4/fs/proc/task_nommu.c
40480 --- linux-3.0.4/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
40481 +++ linux-3.0.4/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
40482 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40483 else
40484 bytes += kobjsize(mm);
40485
40486 - if (current->fs && current->fs->users > 1)
40487 + if (current->fs && atomic_read(&current->fs->users) > 1)
40488 sbytes += kobjsize(current->fs);
40489 else
40490 bytes += kobjsize(current->fs);
40491 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40492
40493 if (file) {
40494 pad_len_spaces(m, len);
40495 - seq_path(m, &file->f_path, "");
40496 + seq_path(m, &file->f_path, "\n\\");
40497 } else if (mm) {
40498 if (vma->vm_start <= mm->start_stack &&
40499 vma->vm_end >= mm->start_stack) {
40500 diff -urNp linux-3.0.4/fs/quota/netlink.c linux-3.0.4/fs/quota/netlink.c
40501 --- linux-3.0.4/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
40502 +++ linux-3.0.4/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
40503 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40504 void quota_send_warning(short type, unsigned int id, dev_t dev,
40505 const char warntype)
40506 {
40507 - static atomic_t seq;
40508 + static atomic_unchecked_t seq;
40509 struct sk_buff *skb;
40510 void *msg_head;
40511 int ret;
40512 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40513 "VFS: Not enough memory to send quota warning.\n");
40514 return;
40515 }
40516 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40517 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40518 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40519 if (!msg_head) {
40520 printk(KERN_ERR
40521 diff -urNp linux-3.0.4/fs/readdir.c linux-3.0.4/fs/readdir.c
40522 --- linux-3.0.4/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
40523 +++ linux-3.0.4/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
40524 @@ -17,6 +17,7 @@
40525 #include <linux/security.h>
40526 #include <linux/syscalls.h>
40527 #include <linux/unistd.h>
40528 +#include <linux/namei.h>
40529
40530 #include <asm/uaccess.h>
40531
40532 @@ -67,6 +68,7 @@ struct old_linux_dirent {
40533
40534 struct readdir_callback {
40535 struct old_linux_dirent __user * dirent;
40536 + struct file * file;
40537 int result;
40538 };
40539
40540 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40541 buf->result = -EOVERFLOW;
40542 return -EOVERFLOW;
40543 }
40544 +
40545 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40546 + return 0;
40547 +
40548 buf->result++;
40549 dirent = buf->dirent;
40550 if (!access_ok(VERIFY_WRITE, dirent,
40551 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40552
40553 buf.result = 0;
40554 buf.dirent = dirent;
40555 + buf.file = file;
40556
40557 error = vfs_readdir(file, fillonedir, &buf);
40558 if (buf.result)
40559 @@ -142,6 +149,7 @@ struct linux_dirent {
40560 struct getdents_callback {
40561 struct linux_dirent __user * current_dir;
40562 struct linux_dirent __user * previous;
40563 + struct file * file;
40564 int count;
40565 int error;
40566 };
40567 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40568 buf->error = -EOVERFLOW;
40569 return -EOVERFLOW;
40570 }
40571 +
40572 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40573 + return 0;
40574 +
40575 dirent = buf->previous;
40576 if (dirent) {
40577 if (__put_user(offset, &dirent->d_off))
40578 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40579 buf.previous = NULL;
40580 buf.count = count;
40581 buf.error = 0;
40582 + buf.file = file;
40583
40584 error = vfs_readdir(file, filldir, &buf);
40585 if (error >= 0)
40586 @@ -229,6 +242,7 @@ out:
40587 struct getdents_callback64 {
40588 struct linux_dirent64 __user * current_dir;
40589 struct linux_dirent64 __user * previous;
40590 + struct file *file;
40591 int count;
40592 int error;
40593 };
40594 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40595 buf->error = -EINVAL; /* only used if we fail.. */
40596 if (reclen > buf->count)
40597 return -EINVAL;
40598 +
40599 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40600 + return 0;
40601 +
40602 dirent = buf->previous;
40603 if (dirent) {
40604 if (__put_user(offset, &dirent->d_off))
40605 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40606
40607 buf.current_dir = dirent;
40608 buf.previous = NULL;
40609 + buf.file = file;
40610 buf.count = count;
40611 buf.error = 0;
40612
40613 diff -urNp linux-3.0.4/fs/reiserfs/dir.c linux-3.0.4/fs/reiserfs/dir.c
40614 --- linux-3.0.4/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
40615 +++ linux-3.0.4/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
40616 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40617 struct reiserfs_dir_entry de;
40618 int ret = 0;
40619
40620 + pax_track_stack();
40621 +
40622 reiserfs_write_lock(inode->i_sb);
40623
40624 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40625 diff -urNp linux-3.0.4/fs/reiserfs/do_balan.c linux-3.0.4/fs/reiserfs/do_balan.c
40626 --- linux-3.0.4/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
40627 +++ linux-3.0.4/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
40628 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40629 return;
40630 }
40631
40632 - atomic_inc(&(fs_generation(tb->tb_sb)));
40633 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40634 do_balance_starts(tb);
40635
40636 /* balance leaf returns 0 except if combining L R and S into
40637 diff -urNp linux-3.0.4/fs/reiserfs/journal.c linux-3.0.4/fs/reiserfs/journal.c
40638 --- linux-3.0.4/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
40639 +++ linux-3.0.4/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
40640 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40641 struct buffer_head *bh;
40642 int i, j;
40643
40644 + pax_track_stack();
40645 +
40646 bh = __getblk(dev, block, bufsize);
40647 if (buffer_uptodate(bh))
40648 return (bh);
40649 diff -urNp linux-3.0.4/fs/reiserfs/namei.c linux-3.0.4/fs/reiserfs/namei.c
40650 --- linux-3.0.4/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
40651 +++ linux-3.0.4/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
40652 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40653 unsigned long savelink = 1;
40654 struct timespec ctime;
40655
40656 + pax_track_stack();
40657 +
40658 /* three balancings: (1) old name removal, (2) new name insertion
40659 and (3) maybe "save" link insertion
40660 stat data updates: (1) old directory,
40661 diff -urNp linux-3.0.4/fs/reiserfs/procfs.c linux-3.0.4/fs/reiserfs/procfs.c
40662 --- linux-3.0.4/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
40663 +++ linux-3.0.4/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
40664 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40665 "SMALL_TAILS " : "NO_TAILS ",
40666 replay_only(sb) ? "REPLAY_ONLY " : "",
40667 convert_reiserfs(sb) ? "CONV " : "",
40668 - atomic_read(&r->s_generation_counter),
40669 + atomic_read_unchecked(&r->s_generation_counter),
40670 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40671 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40672 SF(s_good_search_by_key_reada), SF(s_bmaps),
40673 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40674 struct journal_params *jp = &rs->s_v1.s_journal;
40675 char b[BDEVNAME_SIZE];
40676
40677 + pax_track_stack();
40678 +
40679 seq_printf(m, /* on-disk fields */
40680 "jp_journal_1st_block: \t%i\n"
40681 "jp_journal_dev: \t%s[%x]\n"
40682 diff -urNp linux-3.0.4/fs/reiserfs/stree.c linux-3.0.4/fs/reiserfs/stree.c
40683 --- linux-3.0.4/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
40684 +++ linux-3.0.4/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
40685 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40686 int iter = 0;
40687 #endif
40688
40689 + pax_track_stack();
40690 +
40691 BUG_ON(!th->t_trans_id);
40692
40693 init_tb_struct(th, &s_del_balance, sb, path,
40694 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40695 int retval;
40696 int quota_cut_bytes = 0;
40697
40698 + pax_track_stack();
40699 +
40700 BUG_ON(!th->t_trans_id);
40701
40702 le_key2cpu_key(&cpu_key, key);
40703 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40704 int quota_cut_bytes;
40705 loff_t tail_pos = 0;
40706
40707 + pax_track_stack();
40708 +
40709 BUG_ON(!th->t_trans_id);
40710
40711 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40712 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40713 int retval;
40714 int fs_gen;
40715
40716 + pax_track_stack();
40717 +
40718 BUG_ON(!th->t_trans_id);
40719
40720 fs_gen = get_generation(inode->i_sb);
40721 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40722 int fs_gen = 0;
40723 int quota_bytes = 0;
40724
40725 + pax_track_stack();
40726 +
40727 BUG_ON(!th->t_trans_id);
40728
40729 if (inode) { /* Do we count quotas for item? */
40730 diff -urNp linux-3.0.4/fs/reiserfs/super.c linux-3.0.4/fs/reiserfs/super.c
40731 --- linux-3.0.4/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
40732 +++ linux-3.0.4/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
40733 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40734 {.option_name = NULL}
40735 };
40736
40737 + pax_track_stack();
40738 +
40739 *blocks = 0;
40740 if (!options || !*options)
40741 /* use default configuration: create tails, journaling on, no
40742 diff -urNp linux-3.0.4/fs/select.c linux-3.0.4/fs/select.c
40743 --- linux-3.0.4/fs/select.c 2011-07-21 22:17:23.000000000 -0400
40744 +++ linux-3.0.4/fs/select.c 2011-08-23 21:48:14.000000000 -0400
40745 @@ -20,6 +20,7 @@
40746 #include <linux/module.h>
40747 #include <linux/slab.h>
40748 #include <linux/poll.h>
40749 +#include <linux/security.h>
40750 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40751 #include <linux/file.h>
40752 #include <linux/fdtable.h>
40753 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40754 int retval, i, timed_out = 0;
40755 unsigned long slack = 0;
40756
40757 + pax_track_stack();
40758 +
40759 rcu_read_lock();
40760 retval = max_select_fd(n, fds);
40761 rcu_read_unlock();
40762 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40763 /* Allocate small arguments on the stack to save memory and be faster */
40764 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40765
40766 + pax_track_stack();
40767 +
40768 ret = -EINVAL;
40769 if (n < 0)
40770 goto out_nofds;
40771 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40772 struct poll_list *walk = head;
40773 unsigned long todo = nfds;
40774
40775 + pax_track_stack();
40776 +
40777 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40778 if (nfds > rlimit(RLIMIT_NOFILE))
40779 return -EINVAL;
40780
40781 diff -urNp linux-3.0.4/fs/seq_file.c linux-3.0.4/fs/seq_file.c
40782 --- linux-3.0.4/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
40783 +++ linux-3.0.4/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
40784 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40785 return 0;
40786 }
40787 if (!m->buf) {
40788 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40789 + m->size = PAGE_SIZE;
40790 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40791 if (!m->buf)
40792 return -ENOMEM;
40793 }
40794 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40795 Eoverflow:
40796 m->op->stop(m, p);
40797 kfree(m->buf);
40798 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40799 + m->size <<= 1;
40800 + m->buf = kmalloc(m->size, GFP_KERNEL);
40801 return !m->buf ? -ENOMEM : -EAGAIN;
40802 }
40803
40804 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40805 m->version = file->f_version;
40806 /* grab buffer if we didn't have one */
40807 if (!m->buf) {
40808 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40809 + m->size = PAGE_SIZE;
40810 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40811 if (!m->buf)
40812 goto Enomem;
40813 }
40814 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40815 goto Fill;
40816 m->op->stop(m, p);
40817 kfree(m->buf);
40818 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40819 + m->size <<= 1;
40820 + m->buf = kmalloc(m->size, GFP_KERNEL);
40821 if (!m->buf)
40822 goto Enomem;
40823 m->count = 0;
40824 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40825 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40826 void *data)
40827 {
40828 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40829 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40830 int res = -ENOMEM;
40831
40832 if (op) {
40833 diff -urNp linux-3.0.4/fs/splice.c linux-3.0.4/fs/splice.c
40834 --- linux-3.0.4/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
40835 +++ linux-3.0.4/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
40836 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40837 pipe_lock(pipe);
40838
40839 for (;;) {
40840 - if (!pipe->readers) {
40841 + if (!atomic_read(&pipe->readers)) {
40842 send_sig(SIGPIPE, current, 0);
40843 if (!ret)
40844 ret = -EPIPE;
40845 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40846 do_wakeup = 0;
40847 }
40848
40849 - pipe->waiting_writers++;
40850 + atomic_inc(&pipe->waiting_writers);
40851 pipe_wait(pipe);
40852 - pipe->waiting_writers--;
40853 + atomic_dec(&pipe->waiting_writers);
40854 }
40855
40856 pipe_unlock(pipe);
40857 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
40858 .spd_release = spd_release_page,
40859 };
40860
40861 + pax_track_stack();
40862 +
40863 if (splice_grow_spd(pipe, &spd))
40864 return -ENOMEM;
40865
40866 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
40867 old_fs = get_fs();
40868 set_fs(get_ds());
40869 /* The cast to a user pointer is valid due to the set_fs() */
40870 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40871 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40872 set_fs(old_fs);
40873
40874 return res;
40875 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
40876 old_fs = get_fs();
40877 set_fs(get_ds());
40878 /* The cast to a user pointer is valid due to the set_fs() */
40879 - res = vfs_write(file, (const char __user *)buf, count, &pos);
40880 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
40881 set_fs(old_fs);
40882
40883 return res;
40884 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
40885 .spd_release = spd_release_page,
40886 };
40887
40888 + pax_track_stack();
40889 +
40890 if (splice_grow_spd(pipe, &spd))
40891 return -ENOMEM;
40892
40893 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
40894 goto err;
40895
40896 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
40897 - vec[i].iov_base = (void __user *) page_address(page);
40898 + vec[i].iov_base = (__force void __user *) page_address(page);
40899 vec[i].iov_len = this_len;
40900 spd.pages[i] = page;
40901 spd.nr_pages++;
40902 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
40903 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
40904 {
40905 while (!pipe->nrbufs) {
40906 - if (!pipe->writers)
40907 + if (!atomic_read(&pipe->writers))
40908 return 0;
40909
40910 - if (!pipe->waiting_writers && sd->num_spliced)
40911 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
40912 return 0;
40913
40914 if (sd->flags & SPLICE_F_NONBLOCK)
40915 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
40916 * out of the pipe right after the splice_to_pipe(). So set
40917 * PIPE_READERS appropriately.
40918 */
40919 - pipe->readers = 1;
40920 + atomic_set(&pipe->readers, 1);
40921
40922 current->splice_pipe = pipe;
40923 }
40924 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
40925 };
40926 long ret;
40927
40928 + pax_track_stack();
40929 +
40930 pipe = get_pipe_info(file);
40931 if (!pipe)
40932 return -EBADF;
40933 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
40934 ret = -ERESTARTSYS;
40935 break;
40936 }
40937 - if (!pipe->writers)
40938 + if (!atomic_read(&pipe->writers))
40939 break;
40940 - if (!pipe->waiting_writers) {
40941 + if (!atomic_read(&pipe->waiting_writers)) {
40942 if (flags & SPLICE_F_NONBLOCK) {
40943 ret = -EAGAIN;
40944 break;
40945 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
40946 pipe_lock(pipe);
40947
40948 while (pipe->nrbufs >= pipe->buffers) {
40949 - if (!pipe->readers) {
40950 + if (!atomic_read(&pipe->readers)) {
40951 send_sig(SIGPIPE, current, 0);
40952 ret = -EPIPE;
40953 break;
40954 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
40955 ret = -ERESTARTSYS;
40956 break;
40957 }
40958 - pipe->waiting_writers++;
40959 + atomic_inc(&pipe->waiting_writers);
40960 pipe_wait(pipe);
40961 - pipe->waiting_writers--;
40962 + atomic_dec(&pipe->waiting_writers);
40963 }
40964
40965 pipe_unlock(pipe);
40966 @@ -1819,14 +1825,14 @@ retry:
40967 pipe_double_lock(ipipe, opipe);
40968
40969 do {
40970 - if (!opipe->readers) {
40971 + if (!atomic_read(&opipe->readers)) {
40972 send_sig(SIGPIPE, current, 0);
40973 if (!ret)
40974 ret = -EPIPE;
40975 break;
40976 }
40977
40978 - if (!ipipe->nrbufs && !ipipe->writers)
40979 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
40980 break;
40981
40982 /*
40983 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
40984 pipe_double_lock(ipipe, opipe);
40985
40986 do {
40987 - if (!opipe->readers) {
40988 + if (!atomic_read(&opipe->readers)) {
40989 send_sig(SIGPIPE, current, 0);
40990 if (!ret)
40991 ret = -EPIPE;
40992 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
40993 * return EAGAIN if we have the potential of some data in the
40994 * future, otherwise just return 0
40995 */
40996 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
40997 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
40998 ret = -EAGAIN;
40999
41000 pipe_unlock(ipipe);
41001 diff -urNp linux-3.0.4/fs/sysfs/file.c linux-3.0.4/fs/sysfs/file.c
41002 --- linux-3.0.4/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
41003 +++ linux-3.0.4/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
41004 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
41005
41006 struct sysfs_open_dirent {
41007 atomic_t refcnt;
41008 - atomic_t event;
41009 + atomic_unchecked_t event;
41010 wait_queue_head_t poll;
41011 struct list_head buffers; /* goes through sysfs_buffer.list */
41012 };
41013 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
41014 if (!sysfs_get_active(attr_sd))
41015 return -ENODEV;
41016
41017 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
41018 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
41019 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
41020
41021 sysfs_put_active(attr_sd);
41022 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
41023 return -ENOMEM;
41024
41025 atomic_set(&new_od->refcnt, 0);
41026 - atomic_set(&new_od->event, 1);
41027 + atomic_set_unchecked(&new_od->event, 1);
41028 init_waitqueue_head(&new_od->poll);
41029 INIT_LIST_HEAD(&new_od->buffers);
41030 goto retry;
41031 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
41032
41033 sysfs_put_active(attr_sd);
41034
41035 - if (buffer->event != atomic_read(&od->event))
41036 + if (buffer->event != atomic_read_unchecked(&od->event))
41037 goto trigger;
41038
41039 return DEFAULT_POLLMASK;
41040 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
41041
41042 od = sd->s_attr.open;
41043 if (od) {
41044 - atomic_inc(&od->event);
41045 + atomic_inc_unchecked(&od->event);
41046 wake_up_interruptible(&od->poll);
41047 }
41048
41049 diff -urNp linux-3.0.4/fs/sysfs/mount.c linux-3.0.4/fs/sysfs/mount.c
41050 --- linux-3.0.4/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
41051 +++ linux-3.0.4/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
41052 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41053 .s_name = "",
41054 .s_count = ATOMIC_INIT(1),
41055 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41056 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41057 + .s_mode = S_IFDIR | S_IRWXU,
41058 +#else
41059 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41060 +#endif
41061 .s_ino = 1,
41062 };
41063
41064 diff -urNp linux-3.0.4/fs/sysfs/symlink.c linux-3.0.4/fs/sysfs/symlink.c
41065 --- linux-3.0.4/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
41066 +++ linux-3.0.4/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
41067 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41068
41069 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41070 {
41071 - char *page = nd_get_link(nd);
41072 + const char *page = nd_get_link(nd);
41073 if (!IS_ERR(page))
41074 free_page((unsigned long)page);
41075 }
41076 diff -urNp linux-3.0.4/fs/udf/inode.c linux-3.0.4/fs/udf/inode.c
41077 --- linux-3.0.4/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
41078 +++ linux-3.0.4/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
41079 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41080 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41081 int lastblock = 0;
41082
41083 + pax_track_stack();
41084 +
41085 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41086 prev_epos.block = iinfo->i_location;
41087 prev_epos.bh = NULL;
41088 diff -urNp linux-3.0.4/fs/udf/misc.c linux-3.0.4/fs/udf/misc.c
41089 --- linux-3.0.4/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
41090 +++ linux-3.0.4/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
41091 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41092
41093 u8 udf_tag_checksum(const struct tag *t)
41094 {
41095 - u8 *data = (u8 *)t;
41096 + const u8 *data = (const u8 *)t;
41097 u8 checksum = 0;
41098 int i;
41099 for (i = 0; i < sizeof(struct tag); ++i)
41100 diff -urNp linux-3.0.4/fs/utimes.c linux-3.0.4/fs/utimes.c
41101 --- linux-3.0.4/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
41102 +++ linux-3.0.4/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
41103 @@ -1,6 +1,7 @@
41104 #include <linux/compiler.h>
41105 #include <linux/file.h>
41106 #include <linux/fs.h>
41107 +#include <linux/security.h>
41108 #include <linux/linkage.h>
41109 #include <linux/mount.h>
41110 #include <linux/namei.h>
41111 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41112 goto mnt_drop_write_and_out;
41113 }
41114 }
41115 +
41116 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41117 + error = -EACCES;
41118 + goto mnt_drop_write_and_out;
41119 + }
41120 +
41121 mutex_lock(&inode->i_mutex);
41122 error = notify_change(path->dentry, &newattrs);
41123 mutex_unlock(&inode->i_mutex);
41124 diff -urNp linux-3.0.4/fs/xattr_acl.c linux-3.0.4/fs/xattr_acl.c
41125 --- linux-3.0.4/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
41126 +++ linux-3.0.4/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
41127 @@ -17,8 +17,8 @@
41128 struct posix_acl *
41129 posix_acl_from_xattr(const void *value, size_t size)
41130 {
41131 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41132 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41133 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41134 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41135 int count;
41136 struct posix_acl *acl;
41137 struct posix_acl_entry *acl_e;
41138 diff -urNp linux-3.0.4/fs/xattr.c linux-3.0.4/fs/xattr.c
41139 --- linux-3.0.4/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
41140 +++ linux-3.0.4/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
41141 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41142 * Extended attribute SET operations
41143 */
41144 static long
41145 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
41146 +setxattr(struct path *path, const char __user *name, const void __user *value,
41147 size_t size, int flags)
41148 {
41149 int error;
41150 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
41151 return PTR_ERR(kvalue);
41152 }
41153
41154 - error = vfs_setxattr(d, kname, kvalue, size, flags);
41155 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41156 + error = -EACCES;
41157 + goto out;
41158 + }
41159 +
41160 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41161 +out:
41162 kfree(kvalue);
41163 return error;
41164 }
41165 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41166 return error;
41167 error = mnt_want_write(path.mnt);
41168 if (!error) {
41169 - error = setxattr(path.dentry, name, value, size, flags);
41170 + error = setxattr(&path, name, value, size, flags);
41171 mnt_drop_write(path.mnt);
41172 }
41173 path_put(&path);
41174 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41175 return error;
41176 error = mnt_want_write(path.mnt);
41177 if (!error) {
41178 - error = setxattr(path.dentry, name, value, size, flags);
41179 + error = setxattr(&path, name, value, size, flags);
41180 mnt_drop_write(path.mnt);
41181 }
41182 path_put(&path);
41183 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41184 const void __user *,value, size_t, size, int, flags)
41185 {
41186 struct file *f;
41187 - struct dentry *dentry;
41188 int error = -EBADF;
41189
41190 f = fget(fd);
41191 if (!f)
41192 return error;
41193 - dentry = f->f_path.dentry;
41194 - audit_inode(NULL, dentry);
41195 + audit_inode(NULL, f->f_path.dentry);
41196 error = mnt_want_write_file(f);
41197 if (!error) {
41198 - error = setxattr(dentry, name, value, size, flags);
41199 + error = setxattr(&f->f_path, name, value, size, flags);
41200 mnt_drop_write(f->f_path.mnt);
41201 }
41202 fput(f);
41203 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c
41204 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
41205 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
41206 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41207 xfs_fsop_geom_t fsgeo;
41208 int error;
41209
41210 + memset(&fsgeo, 0, sizeof(fsgeo));
41211 error = xfs_fs_geometry(mp, &fsgeo, 3);
41212 if (error)
41213 return -error;
41214 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c
41215 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
41216 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
41217 @@ -128,7 +128,7 @@ xfs_find_handle(
41218 }
41219
41220 error = -EFAULT;
41221 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41222 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41223 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41224 goto out_put;
41225
41226 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c
41227 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
41228 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
41229 @@ -437,7 +437,7 @@ xfs_vn_put_link(
41230 struct nameidata *nd,
41231 void *p)
41232 {
41233 - char *s = nd_get_link(nd);
41234 + const char *s = nd_get_link(nd);
41235
41236 if (!IS_ERR(s))
41237 kfree(s);
41238 diff -urNp linux-3.0.4/fs/xfs/xfs_bmap.c linux-3.0.4/fs/xfs/xfs_bmap.c
41239 --- linux-3.0.4/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
41240 +++ linux-3.0.4/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
41241 @@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
41242 int nmap,
41243 int ret_nmap);
41244 #else
41245 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41246 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41247 #endif /* DEBUG */
41248
41249 STATIC int
41250 diff -urNp linux-3.0.4/fs/xfs/xfs_dir2_sf.c linux-3.0.4/fs/xfs/xfs_dir2_sf.c
41251 --- linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
41252 +++ linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
41253 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41254 }
41255
41256 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41257 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41258 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41259 + char name[sfep->namelen];
41260 + memcpy(name, sfep->name, sfep->namelen);
41261 + if (filldir(dirent, name, sfep->namelen,
41262 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
41263 + *offset = off & 0x7fffffff;
41264 + return 0;
41265 + }
41266 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41267 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41268 *offset = off & 0x7fffffff;
41269 return 0;
41270 diff -urNp linux-3.0.4/grsecurity/gracl_alloc.c linux-3.0.4/grsecurity/gracl_alloc.c
41271 --- linux-3.0.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41272 +++ linux-3.0.4/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
41273 @@ -0,0 +1,105 @@
41274 +#include <linux/kernel.h>
41275 +#include <linux/mm.h>
41276 +#include <linux/slab.h>
41277 +#include <linux/vmalloc.h>
41278 +#include <linux/gracl.h>
41279 +#include <linux/grsecurity.h>
41280 +
41281 +static unsigned long alloc_stack_next = 1;
41282 +static unsigned long alloc_stack_size = 1;
41283 +static void **alloc_stack;
41284 +
41285 +static __inline__ int
41286 +alloc_pop(void)
41287 +{
41288 + if (alloc_stack_next == 1)
41289 + return 0;
41290 +
41291 + kfree(alloc_stack[alloc_stack_next - 2]);
41292 +
41293 + alloc_stack_next--;
41294 +
41295 + return 1;
41296 +}
41297 +
41298 +static __inline__ int
41299 +alloc_push(void *buf)
41300 +{
41301 + if (alloc_stack_next >= alloc_stack_size)
41302 + return 1;
41303 +
41304 + alloc_stack[alloc_stack_next - 1] = buf;
41305 +
41306 + alloc_stack_next++;
41307 +
41308 + return 0;
41309 +}
41310 +
41311 +void *
41312 +acl_alloc(unsigned long len)
41313 +{
41314 + void *ret = NULL;
41315 +
41316 + if (!len || len > PAGE_SIZE)
41317 + goto out;
41318 +
41319 + ret = kmalloc(len, GFP_KERNEL);
41320 +
41321 + if (ret) {
41322 + if (alloc_push(ret)) {
41323 + kfree(ret);
41324 + ret = NULL;
41325 + }
41326 + }
41327 +
41328 +out:
41329 + return ret;
41330 +}
41331 +
41332 +void *
41333 +acl_alloc_num(unsigned long num, unsigned long len)
41334 +{
41335 + if (!len || (num > (PAGE_SIZE / len)))
41336 + return NULL;
41337 +
41338 + return acl_alloc(num * len);
41339 +}
41340 +
41341 +void
41342 +acl_free_all(void)
41343 +{
41344 + if (gr_acl_is_enabled() || !alloc_stack)
41345 + return;
41346 +
41347 + while (alloc_pop()) ;
41348 +
41349 + if (alloc_stack) {
41350 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41351 + kfree(alloc_stack);
41352 + else
41353 + vfree(alloc_stack);
41354 + }
41355 +
41356 + alloc_stack = NULL;
41357 + alloc_stack_size = 1;
41358 + alloc_stack_next = 1;
41359 +
41360 + return;
41361 +}
41362 +
41363 +int
41364 +acl_alloc_stack_init(unsigned long size)
41365 +{
41366 + if ((size * sizeof (void *)) <= PAGE_SIZE)
41367 + alloc_stack =
41368 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41369 + else
41370 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
41371 +
41372 + alloc_stack_size = size;
41373 +
41374 + if (!alloc_stack)
41375 + return 0;
41376 + else
41377 + return 1;
41378 +}
41379 diff -urNp linux-3.0.4/grsecurity/gracl.c linux-3.0.4/grsecurity/gracl.c
41380 --- linux-3.0.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41381 +++ linux-3.0.4/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
41382 @@ -0,0 +1,4106 @@
41383 +#include <linux/kernel.h>
41384 +#include <linux/module.h>
41385 +#include <linux/sched.h>
41386 +#include <linux/mm.h>
41387 +#include <linux/file.h>
41388 +#include <linux/fs.h>
41389 +#include <linux/namei.h>
41390 +#include <linux/mount.h>
41391 +#include <linux/tty.h>
41392 +#include <linux/proc_fs.h>
41393 +#include <linux/lglock.h>
41394 +#include <linux/slab.h>
41395 +#include <linux/vmalloc.h>
41396 +#include <linux/types.h>
41397 +#include <linux/sysctl.h>
41398 +#include <linux/netdevice.h>
41399 +#include <linux/ptrace.h>
41400 +#include <linux/gracl.h>
41401 +#include <linux/gralloc.h>
41402 +#include <linux/grsecurity.h>
41403 +#include <linux/grinternal.h>
41404 +#include <linux/pid_namespace.h>
41405 +#include <linux/fdtable.h>
41406 +#include <linux/percpu.h>
41407 +
41408 +#include <asm/uaccess.h>
41409 +#include <asm/errno.h>
41410 +#include <asm/mman.h>
41411 +
41412 +static struct acl_role_db acl_role_set;
41413 +static struct name_db name_set;
41414 +static struct inodev_db inodev_set;
41415 +
41416 +/* for keeping track of userspace pointers used for subjects, so we
41417 + can share references in the kernel as well
41418 +*/
41419 +
41420 +static struct path real_root;
41421 +
41422 +static struct acl_subj_map_db subj_map_set;
41423 +
41424 +static struct acl_role_label *default_role;
41425 +
41426 +static struct acl_role_label *role_list;
41427 +
41428 +static u16 acl_sp_role_value;
41429 +
41430 +extern char *gr_shared_page[4];
41431 +static DEFINE_MUTEX(gr_dev_mutex);
41432 +DEFINE_RWLOCK(gr_inode_lock);
41433 +
41434 +struct gr_arg *gr_usermode;
41435 +
41436 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
41437 +
41438 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41439 +extern void gr_clear_learn_entries(void);
41440 +
41441 +#ifdef CONFIG_GRKERNSEC_RESLOG
41442 +extern void gr_log_resource(const struct task_struct *task,
41443 + const int res, const unsigned long wanted, const int gt);
41444 +#endif
41445 +
41446 +unsigned char *gr_system_salt;
41447 +unsigned char *gr_system_sum;
41448 +
41449 +static struct sprole_pw **acl_special_roles = NULL;
41450 +static __u16 num_sprole_pws = 0;
41451 +
41452 +static struct acl_role_label *kernel_role = NULL;
41453 +
41454 +static unsigned int gr_auth_attempts = 0;
41455 +static unsigned long gr_auth_expires = 0UL;
41456 +
41457 +#ifdef CONFIG_NET
41458 +extern struct vfsmount *sock_mnt;
41459 +#endif
41460 +
41461 +extern struct vfsmount *pipe_mnt;
41462 +extern struct vfsmount *shm_mnt;
41463 +#ifdef CONFIG_HUGETLBFS
41464 +extern struct vfsmount *hugetlbfs_vfsmount;
41465 +#endif
41466 +
41467 +static struct acl_object_label *fakefs_obj_rw;
41468 +static struct acl_object_label *fakefs_obj_rwx;
41469 +
41470 +extern int gr_init_uidset(void);
41471 +extern void gr_free_uidset(void);
41472 +extern void gr_remove_uid(uid_t uid);
41473 +extern int gr_find_uid(uid_t uid);
41474 +
41475 +DECLARE_BRLOCK(vfsmount_lock);
41476 +
41477 +__inline__ int
41478 +gr_acl_is_enabled(void)
41479 +{
41480 + return (gr_status & GR_READY);
41481 +}
41482 +
41483 +#ifdef CONFIG_BTRFS_FS
41484 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41485 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41486 +#endif
41487 +
41488 +static inline dev_t __get_dev(const struct dentry *dentry)
41489 +{
41490 +#ifdef CONFIG_BTRFS_FS
41491 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41492 + return get_btrfs_dev_from_inode(dentry->d_inode);
41493 + else
41494 +#endif
41495 + return dentry->d_inode->i_sb->s_dev;
41496 +}
41497 +
41498 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41499 +{
41500 + return __get_dev(dentry);
41501 +}
41502 +
41503 +static char gr_task_roletype_to_char(struct task_struct *task)
41504 +{
41505 + switch (task->role->roletype &
41506 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41507 + GR_ROLE_SPECIAL)) {
41508 + case GR_ROLE_DEFAULT:
41509 + return 'D';
41510 + case GR_ROLE_USER:
41511 + return 'U';
41512 + case GR_ROLE_GROUP:
41513 + return 'G';
41514 + case GR_ROLE_SPECIAL:
41515 + return 'S';
41516 + }
41517 +
41518 + return 'X';
41519 +}
41520 +
41521 +char gr_roletype_to_char(void)
41522 +{
41523 + return gr_task_roletype_to_char(current);
41524 +}
41525 +
41526 +__inline__ int
41527 +gr_acl_tpe_check(void)
41528 +{
41529 + if (unlikely(!(gr_status & GR_READY)))
41530 + return 0;
41531 + if (current->role->roletype & GR_ROLE_TPE)
41532 + return 1;
41533 + else
41534 + return 0;
41535 +}
41536 +
41537 +int
41538 +gr_handle_rawio(const struct inode *inode)
41539 +{
41540 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41541 + if (inode && S_ISBLK(inode->i_mode) &&
41542 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41543 + !capable(CAP_SYS_RAWIO))
41544 + return 1;
41545 +#endif
41546 + return 0;
41547 +}
41548 +
41549 +static int
41550 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41551 +{
41552 + if (likely(lena != lenb))
41553 + return 0;
41554 +
41555 + return !memcmp(a, b, lena);
41556 +}
41557 +
41558 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41559 +{
41560 + *buflen -= namelen;
41561 + if (*buflen < 0)
41562 + return -ENAMETOOLONG;
41563 + *buffer -= namelen;
41564 + memcpy(*buffer, str, namelen);
41565 + return 0;
41566 +}
41567 +
41568 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41569 +{
41570 + return prepend(buffer, buflen, name->name, name->len);
41571 +}
41572 +
41573 +static int prepend_path(const struct path *path, struct path *root,
41574 + char **buffer, int *buflen)
41575 +{
41576 + struct dentry *dentry = path->dentry;
41577 + struct vfsmount *vfsmnt = path->mnt;
41578 + bool slash = false;
41579 + int error = 0;
41580 +
41581 + while (dentry != root->dentry || vfsmnt != root->mnt) {
41582 + struct dentry * parent;
41583 +
41584 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41585 + /* Global root? */
41586 + if (vfsmnt->mnt_parent == vfsmnt) {
41587 + goto out;
41588 + }
41589 + dentry = vfsmnt->mnt_mountpoint;
41590 + vfsmnt = vfsmnt->mnt_parent;
41591 + continue;
41592 + }
41593 + parent = dentry->d_parent;
41594 + prefetch(parent);
41595 + spin_lock(&dentry->d_lock);
41596 + error = prepend_name(buffer, buflen, &dentry->d_name);
41597 + spin_unlock(&dentry->d_lock);
41598 + if (!error)
41599 + error = prepend(buffer, buflen, "/", 1);
41600 + if (error)
41601 + break;
41602 +
41603 + slash = true;
41604 + dentry = parent;
41605 + }
41606 +
41607 +out:
41608 + if (!error && !slash)
41609 + error = prepend(buffer, buflen, "/", 1);
41610 +
41611 + return error;
41612 +}
41613 +
41614 +/* this must be called with vfsmount_lock and rename_lock held */
41615 +
41616 +static char *__our_d_path(const struct path *path, struct path *root,
41617 + char *buf, int buflen)
41618 +{
41619 + char *res = buf + buflen;
41620 + int error;
41621 +
41622 + prepend(&res, &buflen, "\0", 1);
41623 + error = prepend_path(path, root, &res, &buflen);
41624 + if (error)
41625 + return ERR_PTR(error);
41626 +
41627 + return res;
41628 +}
41629 +
41630 +static char *
41631 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41632 +{
41633 + char *retval;
41634 +
41635 + retval = __our_d_path(path, root, buf, buflen);
41636 + if (unlikely(IS_ERR(retval)))
41637 + retval = strcpy(buf, "<path too long>");
41638 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41639 + retval[1] = '\0';
41640 +
41641 + return retval;
41642 +}
41643 +
41644 +static char *
41645 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41646 + char *buf, int buflen)
41647 +{
41648 + struct path path;
41649 + char *res;
41650 +
41651 + path.dentry = (struct dentry *)dentry;
41652 + path.mnt = (struct vfsmount *)vfsmnt;
41653 +
41654 + /* we can use real_root.dentry, real_root.mnt, because this is only called
41655 + by the RBAC system */
41656 + res = gen_full_path(&path, &real_root, buf, buflen);
41657 +
41658 + return res;
41659 +}
41660 +
41661 +static char *
41662 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41663 + char *buf, int buflen)
41664 +{
41665 + char *res;
41666 + struct path path;
41667 + struct path root;
41668 + struct task_struct *reaper = &init_task;
41669 +
41670 + path.dentry = (struct dentry *)dentry;
41671 + path.mnt = (struct vfsmount *)vfsmnt;
41672 +
41673 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41674 + get_fs_root(reaper->fs, &root);
41675 +
41676 + write_seqlock(&rename_lock);
41677 + br_read_lock(vfsmount_lock);
41678 + res = gen_full_path(&path, &root, buf, buflen);
41679 + br_read_unlock(vfsmount_lock);
41680 + write_sequnlock(&rename_lock);
41681 +
41682 + path_put(&root);
41683 + return res;
41684 +}
41685 +
41686 +static char *
41687 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41688 +{
41689 + char *ret;
41690 + write_seqlock(&rename_lock);
41691 + br_read_lock(vfsmount_lock);
41692 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41693 + PAGE_SIZE);
41694 + br_read_unlock(vfsmount_lock);
41695 + write_sequnlock(&rename_lock);
41696 + return ret;
41697 +}
41698 +
41699 +char *
41700 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41701 +{
41702 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41703 + PAGE_SIZE);
41704 +}
41705 +
41706 +char *
41707 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41708 +{
41709 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41710 + PAGE_SIZE);
41711 +}
41712 +
41713 +char *
41714 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41715 +{
41716 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41717 + PAGE_SIZE);
41718 +}
41719 +
41720 +char *
41721 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41722 +{
41723 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41724 + PAGE_SIZE);
41725 +}
41726 +
41727 +char *
41728 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41729 +{
41730 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41731 + PAGE_SIZE);
41732 +}
41733 +
41734 +__inline__ __u32
41735 +to_gr_audit(const __u32 reqmode)
41736 +{
41737 + /* masks off auditable permission flags, then shifts them to create
41738 + auditing flags, and adds the special case of append auditing if
41739 + we're requesting write */
41740 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41741 +}
41742 +
41743 +struct acl_subject_label *
41744 +lookup_subject_map(const struct acl_subject_label *userp)
41745 +{
41746 + unsigned int index = shash(userp, subj_map_set.s_size);
41747 + struct subject_map *match;
41748 +
41749 + match = subj_map_set.s_hash[index];
41750 +
41751 + while (match && match->user != userp)
41752 + match = match->next;
41753 +
41754 + if (match != NULL)
41755 + return match->kernel;
41756 + else
41757 + return NULL;
41758 +}
41759 +
41760 +static void
41761 +insert_subj_map_entry(struct subject_map *subjmap)
41762 +{
41763 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41764 + struct subject_map **curr;
41765 +
41766 + subjmap->prev = NULL;
41767 +
41768 + curr = &subj_map_set.s_hash[index];
41769 + if (*curr != NULL)
41770 + (*curr)->prev = subjmap;
41771 +
41772 + subjmap->next = *curr;
41773 + *curr = subjmap;
41774 +
41775 + return;
41776 +}
41777 +
41778 +static struct acl_role_label *
41779 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41780 + const gid_t gid)
41781 +{
41782 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41783 + struct acl_role_label *match;
41784 + struct role_allowed_ip *ipp;
41785 + unsigned int x;
41786 + u32 curr_ip = task->signal->curr_ip;
41787 +
41788 + task->signal->saved_ip = curr_ip;
41789 +
41790 + match = acl_role_set.r_hash[index];
41791 +
41792 + while (match) {
41793 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41794 + for (x = 0; x < match->domain_child_num; x++) {
41795 + if (match->domain_children[x] == uid)
41796 + goto found;
41797 + }
41798 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41799 + break;
41800 + match = match->next;
41801 + }
41802 +found:
41803 + if (match == NULL) {
41804 + try_group:
41805 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41806 + match = acl_role_set.r_hash[index];
41807 +
41808 + while (match) {
41809 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41810 + for (x = 0; x < match->domain_child_num; x++) {
41811 + if (match->domain_children[x] == gid)
41812 + goto found2;
41813 + }
41814 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41815 + break;
41816 + match = match->next;
41817 + }
41818 +found2:
41819 + if (match == NULL)
41820 + match = default_role;
41821 + if (match->allowed_ips == NULL)
41822 + return match;
41823 + else {
41824 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41825 + if (likely
41826 + ((ntohl(curr_ip) & ipp->netmask) ==
41827 + (ntohl(ipp->addr) & ipp->netmask)))
41828 + return match;
41829 + }
41830 + match = default_role;
41831 + }
41832 + } else if (match->allowed_ips == NULL) {
41833 + return match;
41834 + } else {
41835 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41836 + if (likely
41837 + ((ntohl(curr_ip) & ipp->netmask) ==
41838 + (ntohl(ipp->addr) & ipp->netmask)))
41839 + return match;
41840 + }
41841 + goto try_group;
41842 + }
41843 +
41844 + return match;
41845 +}
41846 +
41847 +struct acl_subject_label *
41848 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41849 + const struct acl_role_label *role)
41850 +{
41851 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41852 + struct acl_subject_label *match;
41853 +
41854 + match = role->subj_hash[index];
41855 +
41856 + while (match && (match->inode != ino || match->device != dev ||
41857 + (match->mode & GR_DELETED))) {
41858 + match = match->next;
41859 + }
41860 +
41861 + if (match && !(match->mode & GR_DELETED))
41862 + return match;
41863 + else
41864 + return NULL;
41865 +}
41866 +
41867 +struct acl_subject_label *
41868 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41869 + const struct acl_role_label *role)
41870 +{
41871 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41872 + struct acl_subject_label *match;
41873 +
41874 + match = role->subj_hash[index];
41875 +
41876 + while (match && (match->inode != ino || match->device != dev ||
41877 + !(match->mode & GR_DELETED))) {
41878 + match = match->next;
41879 + }
41880 +
41881 + if (match && (match->mode & GR_DELETED))
41882 + return match;
41883 + else
41884 + return NULL;
41885 +}
41886 +
41887 +static struct acl_object_label *
41888 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
41889 + const struct acl_subject_label *subj)
41890 +{
41891 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41892 + struct acl_object_label *match;
41893 +
41894 + match = subj->obj_hash[index];
41895 +
41896 + while (match && (match->inode != ino || match->device != dev ||
41897 + (match->mode & GR_DELETED))) {
41898 + match = match->next;
41899 + }
41900 +
41901 + if (match && !(match->mode & GR_DELETED))
41902 + return match;
41903 + else
41904 + return NULL;
41905 +}
41906 +
41907 +static struct acl_object_label *
41908 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
41909 + const struct acl_subject_label *subj)
41910 +{
41911 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41912 + struct acl_object_label *match;
41913 +
41914 + match = subj->obj_hash[index];
41915 +
41916 + while (match && (match->inode != ino || match->device != dev ||
41917 + !(match->mode & GR_DELETED))) {
41918 + match = match->next;
41919 + }
41920 +
41921 + if (match && (match->mode & GR_DELETED))
41922 + return match;
41923 +
41924 + match = subj->obj_hash[index];
41925 +
41926 + while (match && (match->inode != ino || match->device != dev ||
41927 + (match->mode & GR_DELETED))) {
41928 + match = match->next;
41929 + }
41930 +
41931 + if (match && !(match->mode & GR_DELETED))
41932 + return match;
41933 + else
41934 + return NULL;
41935 +}
41936 +
41937 +static struct name_entry *
41938 +lookup_name_entry(const char *name)
41939 +{
41940 + unsigned int len = strlen(name);
41941 + unsigned int key = full_name_hash(name, len);
41942 + unsigned int index = key % name_set.n_size;
41943 + struct name_entry *match;
41944 +
41945 + match = name_set.n_hash[index];
41946 +
41947 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
41948 + match = match->next;
41949 +
41950 + return match;
41951 +}
41952 +
41953 +static struct name_entry *
41954 +lookup_name_entry_create(const char *name)
41955 +{
41956 + unsigned int len = strlen(name);
41957 + unsigned int key = full_name_hash(name, len);
41958 + unsigned int index = key % name_set.n_size;
41959 + struct name_entry *match;
41960 +
41961 + match = name_set.n_hash[index];
41962 +
41963 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41964 + !match->deleted))
41965 + match = match->next;
41966 +
41967 + if (match && match->deleted)
41968 + return match;
41969 +
41970 + match = name_set.n_hash[index];
41971 +
41972 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41973 + match->deleted))
41974 + match = match->next;
41975 +
41976 + if (match && !match->deleted)
41977 + return match;
41978 + else
41979 + return NULL;
41980 +}
41981 +
41982 +static struct inodev_entry *
41983 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
41984 +{
41985 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
41986 + struct inodev_entry *match;
41987 +
41988 + match = inodev_set.i_hash[index];
41989 +
41990 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
41991 + match = match->next;
41992 +
41993 + return match;
41994 +}
41995 +
41996 +static void
41997 +insert_inodev_entry(struct inodev_entry *entry)
41998 +{
41999 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
42000 + inodev_set.i_size);
42001 + struct inodev_entry **curr;
42002 +
42003 + entry->prev = NULL;
42004 +
42005 + curr = &inodev_set.i_hash[index];
42006 + if (*curr != NULL)
42007 + (*curr)->prev = entry;
42008 +
42009 + entry->next = *curr;
42010 + *curr = entry;
42011 +
42012 + return;
42013 +}
42014 +
42015 +static void
42016 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
42017 +{
42018 + unsigned int index =
42019 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
42020 + struct acl_role_label **curr;
42021 + struct acl_role_label *tmp;
42022 +
42023 + curr = &acl_role_set.r_hash[index];
42024 +
42025 + /* if role was already inserted due to domains and already has
42026 + a role in the same bucket as it attached, then we need to
42027 + combine these two buckets
42028 + */
42029 + if (role->next) {
42030 + tmp = role->next;
42031 + while (tmp->next)
42032 + tmp = tmp->next;
42033 + tmp->next = *curr;
42034 + } else
42035 + role->next = *curr;
42036 + *curr = role;
42037 +
42038 + return;
42039 +}
42040 +
42041 +static void
42042 +insert_acl_role_label(struct acl_role_label *role)
42043 +{
42044 + int i;
42045 +
42046 + if (role_list == NULL) {
42047 + role_list = role;
42048 + role->prev = NULL;
42049 + } else {
42050 + role->prev = role_list;
42051 + role_list = role;
42052 + }
42053 +
42054 + /* used for hash chains */
42055 + role->next = NULL;
42056 +
42057 + if (role->roletype & GR_ROLE_DOMAIN) {
42058 + for (i = 0; i < role->domain_child_num; i++)
42059 + __insert_acl_role_label(role, role->domain_children[i]);
42060 + } else
42061 + __insert_acl_role_label(role, role->uidgid);
42062 +}
42063 +
42064 +static int
42065 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42066 +{
42067 + struct name_entry **curr, *nentry;
42068 + struct inodev_entry *ientry;
42069 + unsigned int len = strlen(name);
42070 + unsigned int key = full_name_hash(name, len);
42071 + unsigned int index = key % name_set.n_size;
42072 +
42073 + curr = &name_set.n_hash[index];
42074 +
42075 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42076 + curr = &((*curr)->next);
42077 +
42078 + if (*curr != NULL)
42079 + return 1;
42080 +
42081 + nentry = acl_alloc(sizeof (struct name_entry));
42082 + if (nentry == NULL)
42083 + return 0;
42084 + ientry = acl_alloc(sizeof (struct inodev_entry));
42085 + if (ientry == NULL)
42086 + return 0;
42087 + ientry->nentry = nentry;
42088 +
42089 + nentry->key = key;
42090 + nentry->name = name;
42091 + nentry->inode = inode;
42092 + nentry->device = device;
42093 + nentry->len = len;
42094 + nentry->deleted = deleted;
42095 +
42096 + nentry->prev = NULL;
42097 + curr = &name_set.n_hash[index];
42098 + if (*curr != NULL)
42099 + (*curr)->prev = nentry;
42100 + nentry->next = *curr;
42101 + *curr = nentry;
42102 +
42103 + /* insert us into the table searchable by inode/dev */
42104 + insert_inodev_entry(ientry);
42105 +
42106 + return 1;
42107 +}
42108 +
42109 +static void
42110 +insert_acl_obj_label(struct acl_object_label *obj,
42111 + struct acl_subject_label *subj)
42112 +{
42113 + unsigned int index =
42114 + fhash(obj->inode, obj->device, subj->obj_hash_size);
42115 + struct acl_object_label **curr;
42116 +
42117 +
42118 + obj->prev = NULL;
42119 +
42120 + curr = &subj->obj_hash[index];
42121 + if (*curr != NULL)
42122 + (*curr)->prev = obj;
42123 +
42124 + obj->next = *curr;
42125 + *curr = obj;
42126 +
42127 + return;
42128 +}
42129 +
42130 +static void
42131 +insert_acl_subj_label(struct acl_subject_label *obj,
42132 + struct acl_role_label *role)
42133 +{
42134 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42135 + struct acl_subject_label **curr;
42136 +
42137 + obj->prev = NULL;
42138 +
42139 + curr = &role->subj_hash[index];
42140 + if (*curr != NULL)
42141 + (*curr)->prev = obj;
42142 +
42143 + obj->next = *curr;
42144 + *curr = obj;
42145 +
42146 + return;
42147 +}
42148 +
42149 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42150 +
42151 +static void *
42152 +create_table(__u32 * len, int elementsize)
42153 +{
42154 + unsigned int table_sizes[] = {
42155 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42156 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42157 + 4194301, 8388593, 16777213, 33554393, 67108859
42158 + };
42159 + void *newtable = NULL;
42160 + unsigned int pwr = 0;
42161 +
42162 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42163 + table_sizes[pwr] <= *len)
42164 + pwr++;
42165 +
42166 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42167 + return newtable;
42168 +
42169 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42170 + newtable =
42171 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42172 + else
42173 + newtable = vmalloc(table_sizes[pwr] * elementsize);
42174 +
42175 + *len = table_sizes[pwr];
42176 +
42177 + return newtable;
42178 +}
42179 +
42180 +static int
42181 +init_variables(const struct gr_arg *arg)
42182 +{
42183 + struct task_struct *reaper = &init_task;
42184 + unsigned int stacksize;
42185 +
42186 + subj_map_set.s_size = arg->role_db.num_subjects;
42187 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42188 + name_set.n_size = arg->role_db.num_objects;
42189 + inodev_set.i_size = arg->role_db.num_objects;
42190 +
42191 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
42192 + !name_set.n_size || !inodev_set.i_size)
42193 + return 1;
42194 +
42195 + if (!gr_init_uidset())
42196 + return 1;
42197 +
42198 + /* set up the stack that holds allocation info */
42199 +
42200 + stacksize = arg->role_db.num_pointers + 5;
42201 +
42202 + if (!acl_alloc_stack_init(stacksize))
42203 + return 1;
42204 +
42205 + /* grab reference for the real root dentry and vfsmount */
42206 + get_fs_root(reaper->fs, &real_root);
42207 +
42208 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42209 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42210 +#endif
42211 +
42212 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42213 + if (fakefs_obj_rw == NULL)
42214 + return 1;
42215 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42216 +
42217 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42218 + if (fakefs_obj_rwx == NULL)
42219 + return 1;
42220 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42221 +
42222 + subj_map_set.s_hash =
42223 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42224 + acl_role_set.r_hash =
42225 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42226 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42227 + inodev_set.i_hash =
42228 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42229 +
42230 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42231 + !name_set.n_hash || !inodev_set.i_hash)
42232 + return 1;
42233 +
42234 + memset(subj_map_set.s_hash, 0,
42235 + sizeof(struct subject_map *) * subj_map_set.s_size);
42236 + memset(acl_role_set.r_hash, 0,
42237 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
42238 + memset(name_set.n_hash, 0,
42239 + sizeof (struct name_entry *) * name_set.n_size);
42240 + memset(inodev_set.i_hash, 0,
42241 + sizeof (struct inodev_entry *) * inodev_set.i_size);
42242 +
42243 + return 0;
42244 +}
42245 +
42246 +/* free information not needed after startup
42247 + currently contains user->kernel pointer mappings for subjects
42248 +*/
42249 +
42250 +static void
42251 +free_init_variables(void)
42252 +{
42253 + __u32 i;
42254 +
42255 + if (subj_map_set.s_hash) {
42256 + for (i = 0; i < subj_map_set.s_size; i++) {
42257 + if (subj_map_set.s_hash[i]) {
42258 + kfree(subj_map_set.s_hash[i]);
42259 + subj_map_set.s_hash[i] = NULL;
42260 + }
42261 + }
42262 +
42263 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42264 + PAGE_SIZE)
42265 + kfree(subj_map_set.s_hash);
42266 + else
42267 + vfree(subj_map_set.s_hash);
42268 + }
42269 +
42270 + return;
42271 +}
42272 +
42273 +static void
42274 +free_variables(void)
42275 +{
42276 + struct acl_subject_label *s;
42277 + struct acl_role_label *r;
42278 + struct task_struct *task, *task2;
42279 + unsigned int x;
42280 +
42281 + gr_clear_learn_entries();
42282 +
42283 + read_lock(&tasklist_lock);
42284 + do_each_thread(task2, task) {
42285 + task->acl_sp_role = 0;
42286 + task->acl_role_id = 0;
42287 + task->acl = NULL;
42288 + task->role = NULL;
42289 + } while_each_thread(task2, task);
42290 + read_unlock(&tasklist_lock);
42291 +
42292 + /* release the reference to the real root dentry and vfsmount */
42293 + path_put(&real_root);
42294 +
42295 + /* free all object hash tables */
42296 +
42297 + FOR_EACH_ROLE_START(r)
42298 + if (r->subj_hash == NULL)
42299 + goto next_role;
42300 + FOR_EACH_SUBJECT_START(r, s, x)
42301 + if (s->obj_hash == NULL)
42302 + break;
42303 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42304 + kfree(s->obj_hash);
42305 + else
42306 + vfree(s->obj_hash);
42307 + FOR_EACH_SUBJECT_END(s, x)
42308 + FOR_EACH_NESTED_SUBJECT_START(r, s)
42309 + if (s->obj_hash == NULL)
42310 + break;
42311 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42312 + kfree(s->obj_hash);
42313 + else
42314 + vfree(s->obj_hash);
42315 + FOR_EACH_NESTED_SUBJECT_END(s)
42316 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42317 + kfree(r->subj_hash);
42318 + else
42319 + vfree(r->subj_hash);
42320 + r->subj_hash = NULL;
42321 +next_role:
42322 + FOR_EACH_ROLE_END(r)
42323 +
42324 + acl_free_all();
42325 +
42326 + if (acl_role_set.r_hash) {
42327 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42328 + PAGE_SIZE)
42329 + kfree(acl_role_set.r_hash);
42330 + else
42331 + vfree(acl_role_set.r_hash);
42332 + }
42333 + if (name_set.n_hash) {
42334 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
42335 + PAGE_SIZE)
42336 + kfree(name_set.n_hash);
42337 + else
42338 + vfree(name_set.n_hash);
42339 + }
42340 +
42341 + if (inodev_set.i_hash) {
42342 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42343 + PAGE_SIZE)
42344 + kfree(inodev_set.i_hash);
42345 + else
42346 + vfree(inodev_set.i_hash);
42347 + }
42348 +
42349 + gr_free_uidset();
42350 +
42351 + memset(&name_set, 0, sizeof (struct name_db));
42352 + memset(&inodev_set, 0, sizeof (struct inodev_db));
42353 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42354 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42355 +
42356 + default_role = NULL;
42357 + role_list = NULL;
42358 +
42359 + return;
42360 +}
42361 +
42362 +static __u32
42363 +count_user_objs(struct acl_object_label *userp)
42364 +{
42365 + struct acl_object_label o_tmp;
42366 + __u32 num = 0;
42367 +
42368 + while (userp) {
42369 + if (copy_from_user(&o_tmp, userp,
42370 + sizeof (struct acl_object_label)))
42371 + break;
42372 +
42373 + userp = o_tmp.prev;
42374 + num++;
42375 + }
42376 +
42377 + return num;
42378 +}
42379 +
42380 +static struct acl_subject_label *
42381 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42382 +
42383 +static int
42384 +copy_user_glob(struct acl_object_label *obj)
42385 +{
42386 + struct acl_object_label *g_tmp, **guser;
42387 + unsigned int len;
42388 + char *tmp;
42389 +
42390 + if (obj->globbed == NULL)
42391 + return 0;
42392 +
42393 + guser = &obj->globbed;
42394 + while (*guser) {
42395 + g_tmp = (struct acl_object_label *)
42396 + acl_alloc(sizeof (struct acl_object_label));
42397 + if (g_tmp == NULL)
42398 + return -ENOMEM;
42399 +
42400 + if (copy_from_user(g_tmp, *guser,
42401 + sizeof (struct acl_object_label)))
42402 + return -EFAULT;
42403 +
42404 + len = strnlen_user(g_tmp->filename, PATH_MAX);
42405 +
42406 + if (!len || len >= PATH_MAX)
42407 + return -EINVAL;
42408 +
42409 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42410 + return -ENOMEM;
42411 +
42412 + if (copy_from_user(tmp, g_tmp->filename, len))
42413 + return -EFAULT;
42414 + tmp[len-1] = '\0';
42415 + g_tmp->filename = tmp;
42416 +
42417 + *guser = g_tmp;
42418 + guser = &(g_tmp->next);
42419 + }
42420 +
42421 + return 0;
42422 +}
42423 +
42424 +static int
42425 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42426 + struct acl_role_label *role)
42427 +{
42428 + struct acl_object_label *o_tmp;
42429 + unsigned int len;
42430 + int ret;
42431 + char *tmp;
42432 +
42433 + while (userp) {
42434 + if ((o_tmp = (struct acl_object_label *)
42435 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
42436 + return -ENOMEM;
42437 +
42438 + if (copy_from_user(o_tmp, userp,
42439 + sizeof (struct acl_object_label)))
42440 + return -EFAULT;
42441 +
42442 + userp = o_tmp->prev;
42443 +
42444 + len = strnlen_user(o_tmp->filename, PATH_MAX);
42445 +
42446 + if (!len || len >= PATH_MAX)
42447 + return -EINVAL;
42448 +
42449 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42450 + return -ENOMEM;
42451 +
42452 + if (copy_from_user(tmp, o_tmp->filename, len))
42453 + return -EFAULT;
42454 + tmp[len-1] = '\0';
42455 + o_tmp->filename = tmp;
42456 +
42457 + insert_acl_obj_label(o_tmp, subj);
42458 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42459 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42460 + return -ENOMEM;
42461 +
42462 + ret = copy_user_glob(o_tmp);
42463 + if (ret)
42464 + return ret;
42465 +
42466 + if (o_tmp->nested) {
42467 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42468 + if (IS_ERR(o_tmp->nested))
42469 + return PTR_ERR(o_tmp->nested);
42470 +
42471 + /* insert into nested subject list */
42472 + o_tmp->nested->next = role->hash->first;
42473 + role->hash->first = o_tmp->nested;
42474 + }
42475 + }
42476 +
42477 + return 0;
42478 +}
42479 +
42480 +static __u32
42481 +count_user_subjs(struct acl_subject_label *userp)
42482 +{
42483 + struct acl_subject_label s_tmp;
42484 + __u32 num = 0;
42485 +
42486 + while (userp) {
42487 + if (copy_from_user(&s_tmp, userp,
42488 + sizeof (struct acl_subject_label)))
42489 + break;
42490 +
42491 + userp = s_tmp.prev;
42492 + /* do not count nested subjects against this count, since
42493 + they are not included in the hash table, but are
42494 + attached to objects. We have already counted
42495 + the subjects in userspace for the allocation
42496 + stack
42497 + */
42498 + if (!(s_tmp.mode & GR_NESTED))
42499 + num++;
42500 + }
42501 +
42502 + return num;
42503 +}
42504 +
42505 +static int
42506 +copy_user_allowedips(struct acl_role_label *rolep)
42507 +{
42508 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42509 +
42510 + ruserip = rolep->allowed_ips;
42511 +
42512 + while (ruserip) {
42513 + rlast = rtmp;
42514 +
42515 + if ((rtmp = (struct role_allowed_ip *)
42516 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42517 + return -ENOMEM;
42518 +
42519 + if (copy_from_user(rtmp, ruserip,
42520 + sizeof (struct role_allowed_ip)))
42521 + return -EFAULT;
42522 +
42523 + ruserip = rtmp->prev;
42524 +
42525 + if (!rlast) {
42526 + rtmp->prev = NULL;
42527 + rolep->allowed_ips = rtmp;
42528 + } else {
42529 + rlast->next = rtmp;
42530 + rtmp->prev = rlast;
42531 + }
42532 +
42533 + if (!ruserip)
42534 + rtmp->next = NULL;
42535 + }
42536 +
42537 + return 0;
42538 +}
42539 +
42540 +static int
42541 +copy_user_transitions(struct acl_role_label *rolep)
42542 +{
42543 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
42544 +
42545 + unsigned int len;
42546 + char *tmp;
42547 +
42548 + rusertp = rolep->transitions;
42549 +
42550 + while (rusertp) {
42551 + rlast = rtmp;
42552 +
42553 + if ((rtmp = (struct role_transition *)
42554 + acl_alloc(sizeof (struct role_transition))) == NULL)
42555 + return -ENOMEM;
42556 +
42557 + if (copy_from_user(rtmp, rusertp,
42558 + sizeof (struct role_transition)))
42559 + return -EFAULT;
42560 +
42561 + rusertp = rtmp->prev;
42562 +
42563 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42564 +
42565 + if (!len || len >= GR_SPROLE_LEN)
42566 + return -EINVAL;
42567 +
42568 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42569 + return -ENOMEM;
42570 +
42571 + if (copy_from_user(tmp, rtmp->rolename, len))
42572 + return -EFAULT;
42573 + tmp[len-1] = '\0';
42574 + rtmp->rolename = tmp;
42575 +
42576 + if (!rlast) {
42577 + rtmp->prev = NULL;
42578 + rolep->transitions = rtmp;
42579 + } else {
42580 + rlast->next = rtmp;
42581 + rtmp->prev = rlast;
42582 + }
42583 +
42584 + if (!rusertp)
42585 + rtmp->next = NULL;
42586 + }
42587 +
42588 + return 0;
42589 +}
42590 +
42591 +static struct acl_subject_label *
42592 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42593 +{
42594 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42595 + unsigned int len;
42596 + char *tmp;
42597 + __u32 num_objs;
42598 + struct acl_ip_label **i_tmp, *i_utmp2;
42599 + struct gr_hash_struct ghash;
42600 + struct subject_map *subjmap;
42601 + unsigned int i_num;
42602 + int err;
42603 +
42604 + s_tmp = lookup_subject_map(userp);
42605 +
42606 + /* we've already copied this subject into the kernel, just return
42607 + the reference to it, and don't copy it over again
42608 + */
42609 + if (s_tmp)
42610 + return(s_tmp);
42611 +
42612 + if ((s_tmp = (struct acl_subject_label *)
42613 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42614 + return ERR_PTR(-ENOMEM);
42615 +
42616 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42617 + if (subjmap == NULL)
42618 + return ERR_PTR(-ENOMEM);
42619 +
42620 + subjmap->user = userp;
42621 + subjmap->kernel = s_tmp;
42622 + insert_subj_map_entry(subjmap);
42623 +
42624 + if (copy_from_user(s_tmp, userp,
42625 + sizeof (struct acl_subject_label)))
42626 + return ERR_PTR(-EFAULT);
42627 +
42628 + len = strnlen_user(s_tmp->filename, PATH_MAX);
42629 +
42630 + if (!len || len >= PATH_MAX)
42631 + return ERR_PTR(-EINVAL);
42632 +
42633 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42634 + return ERR_PTR(-ENOMEM);
42635 +
42636 + if (copy_from_user(tmp, s_tmp->filename, len))
42637 + return ERR_PTR(-EFAULT);
42638 + tmp[len-1] = '\0';
42639 + s_tmp->filename = tmp;
42640 +
42641 + if (!strcmp(s_tmp->filename, "/"))
42642 + role->root_label = s_tmp;
42643 +
42644 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42645 + return ERR_PTR(-EFAULT);
42646 +
42647 + /* copy user and group transition tables */
42648 +
42649 + if (s_tmp->user_trans_num) {
42650 + uid_t *uidlist;
42651 +
42652 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42653 + if (uidlist == NULL)
42654 + return ERR_PTR(-ENOMEM);
42655 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42656 + return ERR_PTR(-EFAULT);
42657 +
42658 + s_tmp->user_transitions = uidlist;
42659 + }
42660 +
42661 + if (s_tmp->group_trans_num) {
42662 + gid_t *gidlist;
42663 +
42664 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42665 + if (gidlist == NULL)
42666 + return ERR_PTR(-ENOMEM);
42667 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42668 + return ERR_PTR(-EFAULT);
42669 +
42670 + s_tmp->group_transitions = gidlist;
42671 + }
42672 +
42673 + /* set up object hash table */
42674 + num_objs = count_user_objs(ghash.first);
42675 +
42676 + s_tmp->obj_hash_size = num_objs;
42677 + s_tmp->obj_hash =
42678 + (struct acl_object_label **)
42679 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42680 +
42681 + if (!s_tmp->obj_hash)
42682 + return ERR_PTR(-ENOMEM);
42683 +
42684 + memset(s_tmp->obj_hash, 0,
42685 + s_tmp->obj_hash_size *
42686 + sizeof (struct acl_object_label *));
42687 +
42688 + /* add in objects */
42689 + err = copy_user_objs(ghash.first, s_tmp, role);
42690 +
42691 + if (err)
42692 + return ERR_PTR(err);
42693 +
42694 + /* set pointer for parent subject */
42695 + if (s_tmp->parent_subject) {
42696 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42697 +
42698 + if (IS_ERR(s_tmp2))
42699 + return s_tmp2;
42700 +
42701 + s_tmp->parent_subject = s_tmp2;
42702 + }
42703 +
42704 + /* add in ip acls */
42705 +
42706 + if (!s_tmp->ip_num) {
42707 + s_tmp->ips = NULL;
42708 + goto insert;
42709 + }
42710 +
42711 + i_tmp =
42712 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42713 + sizeof (struct acl_ip_label *));
42714 +
42715 + if (!i_tmp)
42716 + return ERR_PTR(-ENOMEM);
42717 +
42718 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42719 + *(i_tmp + i_num) =
42720 + (struct acl_ip_label *)
42721 + acl_alloc(sizeof (struct acl_ip_label));
42722 + if (!*(i_tmp + i_num))
42723 + return ERR_PTR(-ENOMEM);
42724 +
42725 + if (copy_from_user
42726 + (&i_utmp2, s_tmp->ips + i_num,
42727 + sizeof (struct acl_ip_label *)))
42728 + return ERR_PTR(-EFAULT);
42729 +
42730 + if (copy_from_user
42731 + (*(i_tmp + i_num), i_utmp2,
42732 + sizeof (struct acl_ip_label)))
42733 + return ERR_PTR(-EFAULT);
42734 +
42735 + if ((*(i_tmp + i_num))->iface == NULL)
42736 + continue;
42737 +
42738 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42739 + if (!len || len >= IFNAMSIZ)
42740 + return ERR_PTR(-EINVAL);
42741 + tmp = acl_alloc(len);
42742 + if (tmp == NULL)
42743 + return ERR_PTR(-ENOMEM);
42744 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42745 + return ERR_PTR(-EFAULT);
42746 + (*(i_tmp + i_num))->iface = tmp;
42747 + }
42748 +
42749 + s_tmp->ips = i_tmp;
42750 +
42751 +insert:
42752 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42753 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42754 + return ERR_PTR(-ENOMEM);
42755 +
42756 + return s_tmp;
42757 +}
42758 +
42759 +static int
42760 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42761 +{
42762 + struct acl_subject_label s_pre;
42763 + struct acl_subject_label * ret;
42764 + int err;
42765 +
42766 + while (userp) {
42767 + if (copy_from_user(&s_pre, userp,
42768 + sizeof (struct acl_subject_label)))
42769 + return -EFAULT;
42770 +
42771 + /* do not add nested subjects here, add
42772 + while parsing objects
42773 + */
42774 +
42775 + if (s_pre.mode & GR_NESTED) {
42776 + userp = s_pre.prev;
42777 + continue;
42778 + }
42779 +
42780 + ret = do_copy_user_subj(userp, role);
42781 +
42782 + err = PTR_ERR(ret);
42783 + if (IS_ERR(ret))
42784 + return err;
42785 +
42786 + insert_acl_subj_label(ret, role);
42787 +
42788 + userp = s_pre.prev;
42789 + }
42790 +
42791 + return 0;
42792 +}
42793 +
42794 +static int
42795 +copy_user_acl(struct gr_arg *arg)
42796 +{
42797 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42798 + struct sprole_pw *sptmp;
42799 + struct gr_hash_struct *ghash;
42800 + uid_t *domainlist;
42801 + unsigned int r_num;
42802 + unsigned int len;
42803 + char *tmp;
42804 + int err = 0;
42805 + __u16 i;
42806 + __u32 num_subjs;
42807 +
42808 + /* we need a default and kernel role */
42809 + if (arg->role_db.num_roles < 2)
42810 + return -EINVAL;
42811 +
42812 + /* copy special role authentication info from userspace */
42813 +
42814 + num_sprole_pws = arg->num_sprole_pws;
42815 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42816 +
42817 + if (!acl_special_roles) {
42818 + err = -ENOMEM;
42819 + goto cleanup;
42820 + }
42821 +
42822 + for (i = 0; i < num_sprole_pws; i++) {
42823 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42824 + if (!sptmp) {
42825 + err = -ENOMEM;
42826 + goto cleanup;
42827 + }
42828 + if (copy_from_user(sptmp, arg->sprole_pws + i,
42829 + sizeof (struct sprole_pw))) {
42830 + err = -EFAULT;
42831 + goto cleanup;
42832 + }
42833 +
42834 + len =
42835 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42836 +
42837 + if (!len || len >= GR_SPROLE_LEN) {
42838 + err = -EINVAL;
42839 + goto cleanup;
42840 + }
42841 +
42842 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42843 + err = -ENOMEM;
42844 + goto cleanup;
42845 + }
42846 +
42847 + if (copy_from_user(tmp, sptmp->rolename, len)) {
42848 + err = -EFAULT;
42849 + goto cleanup;
42850 + }
42851 + tmp[len-1] = '\0';
42852 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42853 + printk(KERN_ALERT "Copying special role %s\n", tmp);
42854 +#endif
42855 + sptmp->rolename = tmp;
42856 + acl_special_roles[i] = sptmp;
42857 + }
42858 +
42859 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42860 +
42861 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42862 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
42863 +
42864 + if (!r_tmp) {
42865 + err = -ENOMEM;
42866 + goto cleanup;
42867 + }
42868 +
42869 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
42870 + sizeof (struct acl_role_label *))) {
42871 + err = -EFAULT;
42872 + goto cleanup;
42873 + }
42874 +
42875 + if (copy_from_user(r_tmp, r_utmp2,
42876 + sizeof (struct acl_role_label))) {
42877 + err = -EFAULT;
42878 + goto cleanup;
42879 + }
42880 +
42881 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
42882 +
42883 + if (!len || len >= PATH_MAX) {
42884 + err = -EINVAL;
42885 + goto cleanup;
42886 + }
42887 +
42888 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42889 + err = -ENOMEM;
42890 + goto cleanup;
42891 + }
42892 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
42893 + err = -EFAULT;
42894 + goto cleanup;
42895 + }
42896 + tmp[len-1] = '\0';
42897 + r_tmp->rolename = tmp;
42898 +
42899 + if (!strcmp(r_tmp->rolename, "default")
42900 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
42901 + default_role = r_tmp;
42902 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
42903 + kernel_role = r_tmp;
42904 + }
42905 +
42906 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
42907 + err = -ENOMEM;
42908 + goto cleanup;
42909 + }
42910 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
42911 + err = -EFAULT;
42912 + goto cleanup;
42913 + }
42914 +
42915 + r_tmp->hash = ghash;
42916 +
42917 + num_subjs = count_user_subjs(r_tmp->hash->first);
42918 +
42919 + r_tmp->subj_hash_size = num_subjs;
42920 + r_tmp->subj_hash =
42921 + (struct acl_subject_label **)
42922 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
42923 +
42924 + if (!r_tmp->subj_hash) {
42925 + err = -ENOMEM;
42926 + goto cleanup;
42927 + }
42928 +
42929 + err = copy_user_allowedips(r_tmp);
42930 + if (err)
42931 + goto cleanup;
42932 +
42933 + /* copy domain info */
42934 + if (r_tmp->domain_children != NULL) {
42935 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
42936 + if (domainlist == NULL) {
42937 + err = -ENOMEM;
42938 + goto cleanup;
42939 + }
42940 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
42941 + err = -EFAULT;
42942 + goto cleanup;
42943 + }
42944 + r_tmp->domain_children = domainlist;
42945 + }
42946 +
42947 + err = copy_user_transitions(r_tmp);
42948 + if (err)
42949 + goto cleanup;
42950 +
42951 + memset(r_tmp->subj_hash, 0,
42952 + r_tmp->subj_hash_size *
42953 + sizeof (struct acl_subject_label *));
42954 +
42955 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
42956 +
42957 + if (err)
42958 + goto cleanup;
42959 +
42960 + /* set nested subject list to null */
42961 + r_tmp->hash->first = NULL;
42962 +
42963 + insert_acl_role_label(r_tmp);
42964 + }
42965 +
42966 + goto return_err;
42967 + cleanup:
42968 + free_variables();
42969 + return_err:
42970 + return err;
42971 +
42972 +}
42973 +
42974 +static int
42975 +gracl_init(struct gr_arg *args)
42976 +{
42977 + int error = 0;
42978 +
42979 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
42980 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
42981 +
42982 + if (init_variables(args)) {
42983 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
42984 + error = -ENOMEM;
42985 + free_variables();
42986 + goto out;
42987 + }
42988 +
42989 + error = copy_user_acl(args);
42990 + free_init_variables();
42991 + if (error) {
42992 + free_variables();
42993 + goto out;
42994 + }
42995 +
42996 + if ((error = gr_set_acls(0))) {
42997 + free_variables();
42998 + goto out;
42999 + }
43000 +
43001 + pax_open_kernel();
43002 + gr_status |= GR_READY;
43003 + pax_close_kernel();
43004 +
43005 + out:
43006 + return error;
43007 +}
43008 +
43009 +/* derived from glibc fnmatch() 0: match, 1: no match*/
43010 +
43011 +static int
43012 +glob_match(const char *p, const char *n)
43013 +{
43014 + char c;
43015 +
43016 + while ((c = *p++) != '\0') {
43017 + switch (c) {
43018 + case '?':
43019 + if (*n == '\0')
43020 + return 1;
43021 + else if (*n == '/')
43022 + return 1;
43023 + break;
43024 + case '\\':
43025 + if (*n != c)
43026 + return 1;
43027 + break;
43028 + case '*':
43029 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
43030 + if (*n == '/')
43031 + return 1;
43032 + else if (c == '?') {
43033 + if (*n == '\0')
43034 + return 1;
43035 + else
43036 + ++n;
43037 + }
43038 + }
43039 + if (c == '\0') {
43040 + return 0;
43041 + } else {
43042 + const char *endp;
43043 +
43044 + if ((endp = strchr(n, '/')) == NULL)
43045 + endp = n + strlen(n);
43046 +
43047 + if (c == '[') {
43048 + for (--p; n < endp; ++n)
43049 + if (!glob_match(p, n))
43050 + return 0;
43051 + } else if (c == '/') {
43052 + while (*n != '\0' && *n != '/')
43053 + ++n;
43054 + if (*n == '/' && !glob_match(p, n + 1))
43055 + return 0;
43056 + } else {
43057 + for (--p; n < endp; ++n)
43058 + if (*n == c && !glob_match(p, n))
43059 + return 0;
43060 + }
43061 +
43062 + return 1;
43063 + }
43064 + case '[':
43065 + {
43066 + int not;
43067 + char cold;
43068 +
43069 + if (*n == '\0' || *n == '/')
43070 + return 1;
43071 +
43072 + not = (*p == '!' || *p == '^');
43073 + if (not)
43074 + ++p;
43075 +
43076 + c = *p++;
43077 + for (;;) {
43078 + unsigned char fn = (unsigned char)*n;
43079 +
43080 + if (c == '\0')
43081 + return 1;
43082 + else {
43083 + if (c == fn)
43084 + goto matched;
43085 + cold = c;
43086 + c = *p++;
43087 +
43088 + if (c == '-' && *p != ']') {
43089 + unsigned char cend = *p++;
43090 +
43091 + if (cend == '\0')
43092 + return 1;
43093 +
43094 + if (cold <= fn && fn <= cend)
43095 + goto matched;
43096 +
43097 + c = *p++;
43098 + }
43099 + }
43100 +
43101 + if (c == ']')
43102 + break;
43103 + }
43104 + if (!not)
43105 + return 1;
43106 + break;
43107 + matched:
43108 + while (c != ']') {
43109 + if (c == '\0')
43110 + return 1;
43111 +
43112 + c = *p++;
43113 + }
43114 + if (not)
43115 + return 1;
43116 + }
43117 + break;
43118 + default:
43119 + if (c != *n)
43120 + return 1;
43121 + }
43122 +
43123 + ++n;
43124 + }
43125 +
43126 + if (*n == '\0')
43127 + return 0;
43128 +
43129 + if (*n == '/')
43130 + return 0;
43131 +
43132 + return 1;
43133 +}
43134 +
43135 +static struct acl_object_label *
43136 +chk_glob_label(struct acl_object_label *globbed,
43137 + struct dentry *dentry, struct vfsmount *mnt, char **path)
43138 +{
43139 + struct acl_object_label *tmp;
43140 +
43141 + if (*path == NULL)
43142 + *path = gr_to_filename_nolock(dentry, mnt);
43143 +
43144 + tmp = globbed;
43145 +
43146 + while (tmp) {
43147 + if (!glob_match(tmp->filename, *path))
43148 + return tmp;
43149 + tmp = tmp->next;
43150 + }
43151 +
43152 + return NULL;
43153 +}
43154 +
43155 +static struct acl_object_label *
43156 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43157 + const ino_t curr_ino, const dev_t curr_dev,
43158 + const struct acl_subject_label *subj, char **path, const int checkglob)
43159 +{
43160 + struct acl_subject_label *tmpsubj;
43161 + struct acl_object_label *retval;
43162 + struct acl_object_label *retval2;
43163 +
43164 + tmpsubj = (struct acl_subject_label *) subj;
43165 + read_lock(&gr_inode_lock);
43166 + do {
43167 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43168 + if (retval) {
43169 + if (checkglob && retval->globbed) {
43170 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43171 + (struct vfsmount *)orig_mnt, path);
43172 + if (retval2)
43173 + retval = retval2;
43174 + }
43175 + break;
43176 + }
43177 + } while ((tmpsubj = tmpsubj->parent_subject));
43178 + read_unlock(&gr_inode_lock);
43179 +
43180 + return retval;
43181 +}
43182 +
43183 +static __inline__ struct acl_object_label *
43184 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43185 + struct dentry *curr_dentry,
43186 + const struct acl_subject_label *subj, char **path, const int checkglob)
43187 +{
43188 + int newglob = checkglob;
43189 + ino_t inode;
43190 + dev_t device;
43191 +
43192 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43193 + as we don't want a / * rule to match instead of the / object
43194 + don't do this for create lookups that call this function though, since they're looking up
43195 + on the parent and thus need globbing checks on all paths
43196 + */
43197 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43198 + newglob = GR_NO_GLOB;
43199 +
43200 + spin_lock(&curr_dentry->d_lock);
43201 + inode = curr_dentry->d_inode->i_ino;
43202 + device = __get_dev(curr_dentry);
43203 + spin_unlock(&curr_dentry->d_lock);
43204 +
43205 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43206 +}
43207 +
43208 +static struct acl_object_label *
43209 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43210 + const struct acl_subject_label *subj, char *path, const int checkglob)
43211 +{
43212 + struct dentry *dentry = (struct dentry *) l_dentry;
43213 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43214 + struct acl_object_label *retval;
43215 + struct dentry *parent;
43216 +
43217 + write_seqlock(&rename_lock);
43218 + br_read_lock(vfsmount_lock);
43219 +
43220 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43221 +#ifdef CONFIG_NET
43222 + mnt == sock_mnt ||
43223 +#endif
43224 +#ifdef CONFIG_HUGETLBFS
43225 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43226 +#endif
43227 + /* ignore Eric Biederman */
43228 + IS_PRIVATE(l_dentry->d_inode))) {
43229 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43230 + goto out;
43231 + }
43232 +
43233 + for (;;) {
43234 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43235 + break;
43236 +
43237 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43238 + if (mnt->mnt_parent == mnt)
43239 + break;
43240 +
43241 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43242 + if (retval != NULL)
43243 + goto out;
43244 +
43245 + dentry = mnt->mnt_mountpoint;
43246 + mnt = mnt->mnt_parent;
43247 + continue;
43248 + }
43249 +
43250 + parent = dentry->d_parent;
43251 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43252 + if (retval != NULL)
43253 + goto out;
43254 +
43255 + dentry = parent;
43256 + }
43257 +
43258 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43259 +
43260 + /* real_root is pinned so we don't have to hold a reference */
43261 + if (retval == NULL)
43262 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43263 +out:
43264 + br_read_unlock(vfsmount_lock);
43265 + write_sequnlock(&rename_lock);
43266 +
43267 + BUG_ON(retval == NULL);
43268 +
43269 + return retval;
43270 +}
43271 +
43272 +static __inline__ struct acl_object_label *
43273 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43274 + const struct acl_subject_label *subj)
43275 +{
43276 + char *path = NULL;
43277 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43278 +}
43279 +
43280 +static __inline__ struct acl_object_label *
43281 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43282 + const struct acl_subject_label *subj)
43283 +{
43284 + char *path = NULL;
43285 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43286 +}
43287 +
43288 +static __inline__ struct acl_object_label *
43289 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43290 + const struct acl_subject_label *subj, char *path)
43291 +{
43292 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43293 +}
43294 +
43295 +static struct acl_subject_label *
43296 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43297 + const struct acl_role_label *role)
43298 +{
43299 + struct dentry *dentry = (struct dentry *) l_dentry;
43300 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43301 + struct acl_subject_label *retval;
43302 + struct dentry *parent;
43303 +
43304 + write_seqlock(&rename_lock);
43305 + br_read_lock(vfsmount_lock);
43306 +
43307 + for (;;) {
43308 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43309 + break;
43310 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43311 + if (mnt->mnt_parent == mnt)
43312 + break;
43313 +
43314 + spin_lock(&dentry->d_lock);
43315 + read_lock(&gr_inode_lock);
43316 + retval =
43317 + lookup_acl_subj_label(dentry->d_inode->i_ino,
43318 + __get_dev(dentry), role);
43319 + read_unlock(&gr_inode_lock);
43320 + spin_unlock(&dentry->d_lock);
43321 + if (retval != NULL)
43322 + goto out;
43323 +
43324 + dentry = mnt->mnt_mountpoint;
43325 + mnt = mnt->mnt_parent;
43326 + continue;
43327 + }
43328 +
43329 + spin_lock(&dentry->d_lock);
43330 + read_lock(&gr_inode_lock);
43331 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43332 + __get_dev(dentry), role);
43333 + read_unlock(&gr_inode_lock);
43334 + parent = dentry->d_parent;
43335 + spin_unlock(&dentry->d_lock);
43336 +
43337 + if (retval != NULL)
43338 + goto out;
43339 +
43340 + dentry = parent;
43341 + }
43342 +
43343 + spin_lock(&dentry->d_lock);
43344 + read_lock(&gr_inode_lock);
43345 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43346 + __get_dev(dentry), role);
43347 + read_unlock(&gr_inode_lock);
43348 + spin_unlock(&dentry->d_lock);
43349 +
43350 + if (unlikely(retval == NULL)) {
43351 + /* real_root is pinned, we don't need to hold a reference */
43352 + read_lock(&gr_inode_lock);
43353 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43354 + __get_dev(real_root.dentry), role);
43355 + read_unlock(&gr_inode_lock);
43356 + }
43357 +out:
43358 + br_read_unlock(vfsmount_lock);
43359 + write_sequnlock(&rename_lock);
43360 +
43361 + BUG_ON(retval == NULL);
43362 +
43363 + return retval;
43364 +}
43365 +
43366 +static void
43367 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43368 +{
43369 + struct task_struct *task = current;
43370 + const struct cred *cred = current_cred();
43371 +
43372 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43373 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43374 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43375 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43376 +
43377 + return;
43378 +}
43379 +
43380 +static void
43381 +gr_log_learn_sysctl(const char *path, const __u32 mode)
43382 +{
43383 + struct task_struct *task = current;
43384 + const struct cred *cred = current_cred();
43385 +
43386 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43387 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43388 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43389 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43390 +
43391 + return;
43392 +}
43393 +
43394 +static void
43395 +gr_log_learn_id_change(const char type, const unsigned int real,
43396 + const unsigned int effective, const unsigned int fs)
43397 +{
43398 + struct task_struct *task = current;
43399 + const struct cred *cred = current_cred();
43400 +
43401 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43402 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43403 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43404 + type, real, effective, fs, &task->signal->saved_ip);
43405 +
43406 + return;
43407 +}
43408 +
43409 +__u32
43410 +gr_check_link(const struct dentry * new_dentry,
43411 + const struct dentry * parent_dentry,
43412 + const struct vfsmount * parent_mnt,
43413 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43414 +{
43415 + struct acl_object_label *obj;
43416 + __u32 oldmode, newmode;
43417 + __u32 needmode;
43418 +
43419 + if (unlikely(!(gr_status & GR_READY)))
43420 + return (GR_CREATE | GR_LINK);
43421 +
43422 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43423 + oldmode = obj->mode;
43424 +
43425 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43426 + oldmode |= (GR_CREATE | GR_LINK);
43427 +
43428 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43429 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43430 + needmode |= GR_SETID | GR_AUDIT_SETID;
43431 +
43432 + newmode =
43433 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
43434 + oldmode | needmode);
43435 +
43436 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43437 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43438 + GR_INHERIT | GR_AUDIT_INHERIT);
43439 +
43440 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43441 + goto bad;
43442 +
43443 + if ((oldmode & needmode) != needmode)
43444 + goto bad;
43445 +
43446 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43447 + if ((newmode & needmode) != needmode)
43448 + goto bad;
43449 +
43450 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43451 + return newmode;
43452 +bad:
43453 + needmode = oldmode;
43454 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43455 + needmode |= GR_SETID;
43456 +
43457 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43458 + gr_log_learn(old_dentry, old_mnt, needmode);
43459 + return (GR_CREATE | GR_LINK);
43460 + } else if (newmode & GR_SUPPRESS)
43461 + return GR_SUPPRESS;
43462 + else
43463 + return 0;
43464 +}
43465 +
43466 +__u32
43467 +gr_search_file(const struct dentry * dentry, const __u32 mode,
43468 + const struct vfsmount * mnt)
43469 +{
43470 + __u32 retval = mode;
43471 + struct acl_subject_label *curracl;
43472 + struct acl_object_label *currobj;
43473 +
43474 + if (unlikely(!(gr_status & GR_READY)))
43475 + return (mode & ~GR_AUDITS);
43476 +
43477 + curracl = current->acl;
43478 +
43479 + currobj = chk_obj_label(dentry, mnt, curracl);
43480 + retval = currobj->mode & mode;
43481 +
43482 + /* if we're opening a specified transfer file for writing
43483 + (e.g. /dev/initctl), then transfer our role to init
43484 + */
43485 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43486 + current->role->roletype & GR_ROLE_PERSIST)) {
43487 + struct task_struct *task = init_pid_ns.child_reaper;
43488 +
43489 + if (task->role != current->role) {
43490 + task->acl_sp_role = 0;
43491 + task->acl_role_id = current->acl_role_id;
43492 + task->role = current->role;
43493 + rcu_read_lock();
43494 + read_lock(&grsec_exec_file_lock);
43495 + gr_apply_subject_to_task(task);
43496 + read_unlock(&grsec_exec_file_lock);
43497 + rcu_read_unlock();
43498 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43499 + }
43500 + }
43501 +
43502 + if (unlikely
43503 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43504 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43505 + __u32 new_mode = mode;
43506 +
43507 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43508 +
43509 + retval = new_mode;
43510 +
43511 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43512 + new_mode |= GR_INHERIT;
43513 +
43514 + if (!(mode & GR_NOLEARN))
43515 + gr_log_learn(dentry, mnt, new_mode);
43516 + }
43517 +
43518 + return retval;
43519 +}
43520 +
43521 +__u32
43522 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43523 + const struct vfsmount * mnt, const __u32 mode)
43524 +{
43525 + struct name_entry *match;
43526 + struct acl_object_label *matchpo;
43527 + struct acl_subject_label *curracl;
43528 + char *path;
43529 + __u32 retval;
43530 +
43531 + if (unlikely(!(gr_status & GR_READY)))
43532 + return (mode & ~GR_AUDITS);
43533 +
43534 + preempt_disable();
43535 + path = gr_to_filename_rbac(new_dentry, mnt);
43536 + match = lookup_name_entry_create(path);
43537 +
43538 + if (!match)
43539 + goto check_parent;
43540 +
43541 + curracl = current->acl;
43542 +
43543 + read_lock(&gr_inode_lock);
43544 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43545 + read_unlock(&gr_inode_lock);
43546 +
43547 + if (matchpo) {
43548 + if ((matchpo->mode & mode) !=
43549 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
43550 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43551 + __u32 new_mode = mode;
43552 +
43553 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43554 +
43555 + gr_log_learn(new_dentry, mnt, new_mode);
43556 +
43557 + preempt_enable();
43558 + return new_mode;
43559 + }
43560 + preempt_enable();
43561 + return (matchpo->mode & mode);
43562 + }
43563 +
43564 + check_parent:
43565 + curracl = current->acl;
43566 +
43567 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43568 + retval = matchpo->mode & mode;
43569 +
43570 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43571 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43572 + __u32 new_mode = mode;
43573 +
43574 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43575 +
43576 + gr_log_learn(new_dentry, mnt, new_mode);
43577 + preempt_enable();
43578 + return new_mode;
43579 + }
43580 +
43581 + preempt_enable();
43582 + return retval;
43583 +}
43584 +
43585 +int
43586 +gr_check_hidden_task(const struct task_struct *task)
43587 +{
43588 + if (unlikely(!(gr_status & GR_READY)))
43589 + return 0;
43590 +
43591 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43592 + return 1;
43593 +
43594 + return 0;
43595 +}
43596 +
43597 +int
43598 +gr_check_protected_task(const struct task_struct *task)
43599 +{
43600 + if (unlikely(!(gr_status & GR_READY) || !task))
43601 + return 0;
43602 +
43603 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43604 + task->acl != current->acl)
43605 + return 1;
43606 +
43607 + return 0;
43608 +}
43609 +
43610 +int
43611 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43612 +{
43613 + struct task_struct *p;
43614 + int ret = 0;
43615 +
43616 + if (unlikely(!(gr_status & GR_READY) || !pid))
43617 + return ret;
43618 +
43619 + read_lock(&tasklist_lock);
43620 + do_each_pid_task(pid, type, p) {
43621 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43622 + p->acl != current->acl) {
43623 + ret = 1;
43624 + goto out;
43625 + }
43626 + } while_each_pid_task(pid, type, p);
43627 +out:
43628 + read_unlock(&tasklist_lock);
43629 +
43630 + return ret;
43631 +}
43632 +
43633 +void
43634 +gr_copy_label(struct task_struct *tsk)
43635 +{
43636 + tsk->signal->used_accept = 0;
43637 + tsk->acl_sp_role = 0;
43638 + tsk->acl_role_id = current->acl_role_id;
43639 + tsk->acl = current->acl;
43640 + tsk->role = current->role;
43641 + tsk->signal->curr_ip = current->signal->curr_ip;
43642 + tsk->signal->saved_ip = current->signal->saved_ip;
43643 + if (current->exec_file)
43644 + get_file(current->exec_file);
43645 + tsk->exec_file = current->exec_file;
43646 + tsk->is_writable = current->is_writable;
43647 + if (unlikely(current->signal->used_accept)) {
43648 + current->signal->curr_ip = 0;
43649 + current->signal->saved_ip = 0;
43650 + }
43651 +
43652 + return;
43653 +}
43654 +
43655 +static void
43656 +gr_set_proc_res(struct task_struct *task)
43657 +{
43658 + struct acl_subject_label *proc;
43659 + unsigned short i;
43660 +
43661 + proc = task->acl;
43662 +
43663 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43664 + return;
43665 +
43666 + for (i = 0; i < RLIM_NLIMITS; i++) {
43667 + if (!(proc->resmask & (1 << i)))
43668 + continue;
43669 +
43670 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43671 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43672 + }
43673 +
43674 + return;
43675 +}
43676 +
43677 +extern int __gr_process_user_ban(struct user_struct *user);
43678 +
43679 +int
43680 +gr_check_user_change(int real, int effective, int fs)
43681 +{
43682 + unsigned int i;
43683 + __u16 num;
43684 + uid_t *uidlist;
43685 + int curuid;
43686 + int realok = 0;
43687 + int effectiveok = 0;
43688 + int fsok = 0;
43689 +
43690 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43691 + struct user_struct *user;
43692 +
43693 + if (real == -1)
43694 + goto skipit;
43695 +
43696 + user = find_user(real);
43697 + if (user == NULL)
43698 + goto skipit;
43699 +
43700 + if (__gr_process_user_ban(user)) {
43701 + /* for find_user */
43702 + free_uid(user);
43703 + return 1;
43704 + }
43705 +
43706 + /* for find_user */
43707 + free_uid(user);
43708 +
43709 +skipit:
43710 +#endif
43711 +
43712 + if (unlikely(!(gr_status & GR_READY)))
43713 + return 0;
43714 +
43715 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43716 + gr_log_learn_id_change('u', real, effective, fs);
43717 +
43718 + num = current->acl->user_trans_num;
43719 + uidlist = current->acl->user_transitions;
43720 +
43721 + if (uidlist == NULL)
43722 + return 0;
43723 +
43724 + if (real == -1)
43725 + realok = 1;
43726 + if (effective == -1)
43727 + effectiveok = 1;
43728 + if (fs == -1)
43729 + fsok = 1;
43730 +
43731 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
43732 + for (i = 0; i < num; i++) {
43733 + curuid = (int)uidlist[i];
43734 + if (real == curuid)
43735 + realok = 1;
43736 + if (effective == curuid)
43737 + effectiveok = 1;
43738 + if (fs == curuid)
43739 + fsok = 1;
43740 + }
43741 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
43742 + for (i = 0; i < num; i++) {
43743 + curuid = (int)uidlist[i];
43744 + if (real == curuid)
43745 + break;
43746 + if (effective == curuid)
43747 + break;
43748 + if (fs == curuid)
43749 + break;
43750 + }
43751 + /* not in deny list */
43752 + if (i == num) {
43753 + realok = 1;
43754 + effectiveok = 1;
43755 + fsok = 1;
43756 + }
43757 + }
43758 +
43759 + if (realok && effectiveok && fsok)
43760 + return 0;
43761 + else {
43762 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43763 + return 1;
43764 + }
43765 +}
43766 +
43767 +int
43768 +gr_check_group_change(int real, int effective, int fs)
43769 +{
43770 + unsigned int i;
43771 + __u16 num;
43772 + gid_t *gidlist;
43773 + int curgid;
43774 + int realok = 0;
43775 + int effectiveok = 0;
43776 + int fsok = 0;
43777 +
43778 + if (unlikely(!(gr_status & GR_READY)))
43779 + return 0;
43780 +
43781 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43782 + gr_log_learn_id_change('g', real, effective, fs);
43783 +
43784 + num = current->acl->group_trans_num;
43785 + gidlist = current->acl->group_transitions;
43786 +
43787 + if (gidlist == NULL)
43788 + return 0;
43789 +
43790 + if (real == -1)
43791 + realok = 1;
43792 + if (effective == -1)
43793 + effectiveok = 1;
43794 + if (fs == -1)
43795 + fsok = 1;
43796 +
43797 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
43798 + for (i = 0; i < num; i++) {
43799 + curgid = (int)gidlist[i];
43800 + if (real == curgid)
43801 + realok = 1;
43802 + if (effective == curgid)
43803 + effectiveok = 1;
43804 + if (fs == curgid)
43805 + fsok = 1;
43806 + }
43807 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
43808 + for (i = 0; i < num; i++) {
43809 + curgid = (int)gidlist[i];
43810 + if (real == curgid)
43811 + break;
43812 + if (effective == curgid)
43813 + break;
43814 + if (fs == curgid)
43815 + break;
43816 + }
43817 + /* not in deny list */
43818 + if (i == num) {
43819 + realok = 1;
43820 + effectiveok = 1;
43821 + fsok = 1;
43822 + }
43823 + }
43824 +
43825 + if (realok && effectiveok && fsok)
43826 + return 0;
43827 + else {
43828 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43829 + return 1;
43830 + }
43831 +}
43832 +
43833 +void
43834 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43835 +{
43836 + struct acl_role_label *role = task->role;
43837 + struct acl_subject_label *subj = NULL;
43838 + struct acl_object_label *obj;
43839 + struct file *filp;
43840 +
43841 + if (unlikely(!(gr_status & GR_READY)))
43842 + return;
43843 +
43844 + filp = task->exec_file;
43845 +
43846 + /* kernel process, we'll give them the kernel role */
43847 + if (unlikely(!filp)) {
43848 + task->role = kernel_role;
43849 + task->acl = kernel_role->root_label;
43850 + return;
43851 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43852 + role = lookup_acl_role_label(task, uid, gid);
43853 +
43854 + /* perform subject lookup in possibly new role
43855 + we can use this result below in the case where role == task->role
43856 + */
43857 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43858 +
43859 + /* if we changed uid/gid, but result in the same role
43860 + and are using inheritance, don't lose the inherited subject
43861 + if current subject is other than what normal lookup
43862 + would result in, we arrived via inheritance, don't
43863 + lose subject
43864 + */
43865 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43866 + (subj == task->acl)))
43867 + task->acl = subj;
43868 +
43869 + task->role = role;
43870 +
43871 + task->is_writable = 0;
43872 +
43873 + /* ignore additional mmap checks for processes that are writable
43874 + by the default ACL */
43875 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43876 + if (unlikely(obj->mode & GR_WRITE))
43877 + task->is_writable = 1;
43878 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43879 + if (unlikely(obj->mode & GR_WRITE))
43880 + task->is_writable = 1;
43881 +
43882 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43883 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43884 +#endif
43885 +
43886 + gr_set_proc_res(task);
43887 +
43888 + return;
43889 +}
43890 +
43891 +int
43892 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
43893 + const int unsafe_share)
43894 +{
43895 + struct task_struct *task = current;
43896 + struct acl_subject_label *newacl;
43897 + struct acl_object_label *obj;
43898 + __u32 retmode;
43899 +
43900 + if (unlikely(!(gr_status & GR_READY)))
43901 + return 0;
43902 +
43903 + newacl = chk_subj_label(dentry, mnt, task->role);
43904 +
43905 + task_lock(task);
43906 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
43907 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
43908 + !(task->role->roletype & GR_ROLE_GOD) &&
43909 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
43910 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
43911 + task_unlock(task);
43912 + if (unsafe_share)
43913 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
43914 + else
43915 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
43916 + return -EACCES;
43917 + }
43918 + task_unlock(task);
43919 +
43920 + obj = chk_obj_label(dentry, mnt, task->acl);
43921 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
43922 +
43923 + if (!(task->acl->mode & GR_INHERITLEARN) &&
43924 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
43925 + if (obj->nested)
43926 + task->acl = obj->nested;
43927 + else
43928 + task->acl = newacl;
43929 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
43930 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
43931 +
43932 + task->is_writable = 0;
43933 +
43934 + /* ignore additional mmap checks for processes that are writable
43935 + by the default ACL */
43936 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
43937 + if (unlikely(obj->mode & GR_WRITE))
43938 + task->is_writable = 1;
43939 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
43940 + if (unlikely(obj->mode & GR_WRITE))
43941 + task->is_writable = 1;
43942 +
43943 + gr_set_proc_res(task);
43944 +
43945 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43946 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43947 +#endif
43948 + return 0;
43949 +}
43950 +
43951 +/* always called with valid inodev ptr */
43952 +static void
43953 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
43954 +{
43955 + struct acl_object_label *matchpo;
43956 + struct acl_subject_label *matchps;
43957 + struct acl_subject_label *subj;
43958 + struct acl_role_label *role;
43959 + unsigned int x;
43960 +
43961 + FOR_EACH_ROLE_START(role)
43962 + FOR_EACH_SUBJECT_START(role, subj, x)
43963 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
43964 + matchpo->mode |= GR_DELETED;
43965 + FOR_EACH_SUBJECT_END(subj,x)
43966 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
43967 + if (subj->inode == ino && subj->device == dev)
43968 + subj->mode |= GR_DELETED;
43969 + FOR_EACH_NESTED_SUBJECT_END(subj)
43970 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
43971 + matchps->mode |= GR_DELETED;
43972 + FOR_EACH_ROLE_END(role)
43973 +
43974 + inodev->nentry->deleted = 1;
43975 +
43976 + return;
43977 +}
43978 +
43979 +void
43980 +gr_handle_delete(const ino_t ino, const dev_t dev)
43981 +{
43982 + struct inodev_entry *inodev;
43983 +
43984 + if (unlikely(!(gr_status & GR_READY)))
43985 + return;
43986 +
43987 + write_lock(&gr_inode_lock);
43988 + inodev = lookup_inodev_entry(ino, dev);
43989 + if (inodev != NULL)
43990 + do_handle_delete(inodev, ino, dev);
43991 + write_unlock(&gr_inode_lock);
43992 +
43993 + return;
43994 +}
43995 +
43996 +static void
43997 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
43998 + const ino_t newinode, const dev_t newdevice,
43999 + struct acl_subject_label *subj)
44000 +{
44001 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
44002 + struct acl_object_label *match;
44003 +
44004 + match = subj->obj_hash[index];
44005 +
44006 + while (match && (match->inode != oldinode ||
44007 + match->device != olddevice ||
44008 + !(match->mode & GR_DELETED)))
44009 + match = match->next;
44010 +
44011 + if (match && (match->inode == oldinode)
44012 + && (match->device == olddevice)
44013 + && (match->mode & GR_DELETED)) {
44014 + if (match->prev == NULL) {
44015 + subj->obj_hash[index] = match->next;
44016 + if (match->next != NULL)
44017 + match->next->prev = NULL;
44018 + } else {
44019 + match->prev->next = match->next;
44020 + if (match->next != NULL)
44021 + match->next->prev = match->prev;
44022 + }
44023 + match->prev = NULL;
44024 + match->next = NULL;
44025 + match->inode = newinode;
44026 + match->device = newdevice;
44027 + match->mode &= ~GR_DELETED;
44028 +
44029 + insert_acl_obj_label(match, subj);
44030 + }
44031 +
44032 + return;
44033 +}
44034 +
44035 +static void
44036 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
44037 + const ino_t newinode, const dev_t newdevice,
44038 + struct acl_role_label *role)
44039 +{
44040 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
44041 + struct acl_subject_label *match;
44042 +
44043 + match = role->subj_hash[index];
44044 +
44045 + while (match && (match->inode != oldinode ||
44046 + match->device != olddevice ||
44047 + !(match->mode & GR_DELETED)))
44048 + match = match->next;
44049 +
44050 + if (match && (match->inode == oldinode)
44051 + && (match->device == olddevice)
44052 + && (match->mode & GR_DELETED)) {
44053 + if (match->prev == NULL) {
44054 + role->subj_hash[index] = match->next;
44055 + if (match->next != NULL)
44056 + match->next->prev = NULL;
44057 + } else {
44058 + match->prev->next = match->next;
44059 + if (match->next != NULL)
44060 + match->next->prev = match->prev;
44061 + }
44062 + match->prev = NULL;
44063 + match->next = NULL;
44064 + match->inode = newinode;
44065 + match->device = newdevice;
44066 + match->mode &= ~GR_DELETED;
44067 +
44068 + insert_acl_subj_label(match, role);
44069 + }
44070 +
44071 + return;
44072 +}
44073 +
44074 +static void
44075 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44076 + const ino_t newinode, const dev_t newdevice)
44077 +{
44078 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44079 + struct inodev_entry *match;
44080 +
44081 + match = inodev_set.i_hash[index];
44082 +
44083 + while (match && (match->nentry->inode != oldinode ||
44084 + match->nentry->device != olddevice || !match->nentry->deleted))
44085 + match = match->next;
44086 +
44087 + if (match && (match->nentry->inode == oldinode)
44088 + && (match->nentry->device == olddevice) &&
44089 + match->nentry->deleted) {
44090 + if (match->prev == NULL) {
44091 + inodev_set.i_hash[index] = match->next;
44092 + if (match->next != NULL)
44093 + match->next->prev = NULL;
44094 + } else {
44095 + match->prev->next = match->next;
44096 + if (match->next != NULL)
44097 + match->next->prev = match->prev;
44098 + }
44099 + match->prev = NULL;
44100 + match->next = NULL;
44101 + match->nentry->inode = newinode;
44102 + match->nentry->device = newdevice;
44103 + match->nentry->deleted = 0;
44104 +
44105 + insert_inodev_entry(match);
44106 + }
44107 +
44108 + return;
44109 +}
44110 +
44111 +static void
44112 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44113 + const struct vfsmount *mnt)
44114 +{
44115 + struct acl_subject_label *subj;
44116 + struct acl_role_label *role;
44117 + unsigned int x;
44118 + ino_t ino = dentry->d_inode->i_ino;
44119 + dev_t dev = __get_dev(dentry);
44120 +
44121 + FOR_EACH_ROLE_START(role)
44122 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44123 +
44124 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44125 + if ((subj->inode == ino) && (subj->device == dev)) {
44126 + subj->inode = ino;
44127 + subj->device = dev;
44128 + }
44129 + FOR_EACH_NESTED_SUBJECT_END(subj)
44130 + FOR_EACH_SUBJECT_START(role, subj, x)
44131 + update_acl_obj_label(matchn->inode, matchn->device,
44132 + ino, dev, subj);
44133 + FOR_EACH_SUBJECT_END(subj,x)
44134 + FOR_EACH_ROLE_END(role)
44135 +
44136 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44137 +
44138 + return;
44139 +}
44140 +
44141 +void
44142 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44143 +{
44144 + struct name_entry *matchn;
44145 +
44146 + if (unlikely(!(gr_status & GR_READY)))
44147 + return;
44148 +
44149 + preempt_disable();
44150 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44151 +
44152 + if (unlikely((unsigned long)matchn)) {
44153 + write_lock(&gr_inode_lock);
44154 + do_handle_create(matchn, dentry, mnt);
44155 + write_unlock(&gr_inode_lock);
44156 + }
44157 + preempt_enable();
44158 +
44159 + return;
44160 +}
44161 +
44162 +void
44163 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44164 + struct dentry *old_dentry,
44165 + struct dentry *new_dentry,
44166 + struct vfsmount *mnt, const __u8 replace)
44167 +{
44168 + struct name_entry *matchn;
44169 + struct inodev_entry *inodev;
44170 + ino_t old_ino = old_dentry->d_inode->i_ino;
44171 + dev_t old_dev = __get_dev(old_dentry);
44172 +
44173 + /* vfs_rename swaps the name and parent link for old_dentry and
44174 + new_dentry
44175 + at this point, old_dentry has the new name, parent link, and inode
44176 + for the renamed file
44177 + if a file is being replaced by a rename, new_dentry has the inode
44178 + and name for the replaced file
44179 + */
44180 +
44181 + if (unlikely(!(gr_status & GR_READY)))
44182 + return;
44183 +
44184 + preempt_disable();
44185 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44186 +
44187 + /* we wouldn't have to check d_inode if it weren't for
44188 + NFS silly-renaming
44189 + */
44190 +
44191 + write_lock(&gr_inode_lock);
44192 + if (unlikely(replace && new_dentry->d_inode)) {
44193 + ino_t new_ino = new_dentry->d_inode->i_ino;
44194 + dev_t new_dev = __get_dev(new_dentry);
44195 +
44196 + inodev = lookup_inodev_entry(new_ino, new_dev);
44197 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44198 + do_handle_delete(inodev, new_ino, new_dev);
44199 + }
44200 +
44201 + inodev = lookup_inodev_entry(old_ino, old_dev);
44202 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44203 + do_handle_delete(inodev, old_ino, old_dev);
44204 +
44205 + if (unlikely((unsigned long)matchn))
44206 + do_handle_create(matchn, old_dentry, mnt);
44207 +
44208 + write_unlock(&gr_inode_lock);
44209 + preempt_enable();
44210 +
44211 + return;
44212 +}
44213 +
44214 +static int
44215 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44216 + unsigned char **sum)
44217 +{
44218 + struct acl_role_label *r;
44219 + struct role_allowed_ip *ipp;
44220 + struct role_transition *trans;
44221 + unsigned int i;
44222 + int found = 0;
44223 + u32 curr_ip = current->signal->curr_ip;
44224 +
44225 + current->signal->saved_ip = curr_ip;
44226 +
44227 + /* check transition table */
44228 +
44229 + for (trans = current->role->transitions; trans; trans = trans->next) {
44230 + if (!strcmp(rolename, trans->rolename)) {
44231 + found = 1;
44232 + break;
44233 + }
44234 + }
44235 +
44236 + if (!found)
44237 + return 0;
44238 +
44239 + /* handle special roles that do not require authentication
44240 + and check ip */
44241 +
44242 + FOR_EACH_ROLE_START(r)
44243 + if (!strcmp(rolename, r->rolename) &&
44244 + (r->roletype & GR_ROLE_SPECIAL)) {
44245 + found = 0;
44246 + if (r->allowed_ips != NULL) {
44247 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44248 + if ((ntohl(curr_ip) & ipp->netmask) ==
44249 + (ntohl(ipp->addr) & ipp->netmask))
44250 + found = 1;
44251 + }
44252 + } else
44253 + found = 2;
44254 + if (!found)
44255 + return 0;
44256 +
44257 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44258 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44259 + *salt = NULL;
44260 + *sum = NULL;
44261 + return 1;
44262 + }
44263 + }
44264 + FOR_EACH_ROLE_END(r)
44265 +
44266 + for (i = 0; i < num_sprole_pws; i++) {
44267 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44268 + *salt = acl_special_roles[i]->salt;
44269 + *sum = acl_special_roles[i]->sum;
44270 + return 1;
44271 + }
44272 + }
44273 +
44274 + return 0;
44275 +}
44276 +
44277 +static void
44278 +assign_special_role(char *rolename)
44279 +{
44280 + struct acl_object_label *obj;
44281 + struct acl_role_label *r;
44282 + struct acl_role_label *assigned = NULL;
44283 + struct task_struct *tsk;
44284 + struct file *filp;
44285 +
44286 + FOR_EACH_ROLE_START(r)
44287 + if (!strcmp(rolename, r->rolename) &&
44288 + (r->roletype & GR_ROLE_SPECIAL)) {
44289 + assigned = r;
44290 + break;
44291 + }
44292 + FOR_EACH_ROLE_END(r)
44293 +
44294 + if (!assigned)
44295 + return;
44296 +
44297 + read_lock(&tasklist_lock);
44298 + read_lock(&grsec_exec_file_lock);
44299 +
44300 + tsk = current->real_parent;
44301 + if (tsk == NULL)
44302 + goto out_unlock;
44303 +
44304 + filp = tsk->exec_file;
44305 + if (filp == NULL)
44306 + goto out_unlock;
44307 +
44308 + tsk->is_writable = 0;
44309 +
44310 + tsk->acl_sp_role = 1;
44311 + tsk->acl_role_id = ++acl_sp_role_value;
44312 + tsk->role = assigned;
44313 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44314 +
44315 + /* ignore additional mmap checks for processes that are writable
44316 + by the default ACL */
44317 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44318 + if (unlikely(obj->mode & GR_WRITE))
44319 + tsk->is_writable = 1;
44320 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44321 + if (unlikely(obj->mode & GR_WRITE))
44322 + tsk->is_writable = 1;
44323 +
44324 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44325 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44326 +#endif
44327 +
44328 +out_unlock:
44329 + read_unlock(&grsec_exec_file_lock);
44330 + read_unlock(&tasklist_lock);
44331 + return;
44332 +}
44333 +
44334 +int gr_check_secure_terminal(struct task_struct *task)
44335 +{
44336 + struct task_struct *p, *p2, *p3;
44337 + struct files_struct *files;
44338 + struct fdtable *fdt;
44339 + struct file *our_file = NULL, *file;
44340 + int i;
44341 +
44342 + if (task->signal->tty == NULL)
44343 + return 1;
44344 +
44345 + files = get_files_struct(task);
44346 + if (files != NULL) {
44347 + rcu_read_lock();
44348 + fdt = files_fdtable(files);
44349 + for (i=0; i < fdt->max_fds; i++) {
44350 + file = fcheck_files(files, i);
44351 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44352 + get_file(file);
44353 + our_file = file;
44354 + }
44355 + }
44356 + rcu_read_unlock();
44357 + put_files_struct(files);
44358 + }
44359 +
44360 + if (our_file == NULL)
44361 + return 1;
44362 +
44363 + read_lock(&tasklist_lock);
44364 + do_each_thread(p2, p) {
44365 + files = get_files_struct(p);
44366 + if (files == NULL ||
44367 + (p->signal && p->signal->tty == task->signal->tty)) {
44368 + if (files != NULL)
44369 + put_files_struct(files);
44370 + continue;
44371 + }
44372 + rcu_read_lock();
44373 + fdt = files_fdtable(files);
44374 + for (i=0; i < fdt->max_fds; i++) {
44375 + file = fcheck_files(files, i);
44376 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44377 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44378 + p3 = task;
44379 + while (p3->pid > 0) {
44380 + if (p3 == p)
44381 + break;
44382 + p3 = p3->real_parent;
44383 + }
44384 + if (p3 == p)
44385 + break;
44386 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44387 + gr_handle_alertkill(p);
44388 + rcu_read_unlock();
44389 + put_files_struct(files);
44390 + read_unlock(&tasklist_lock);
44391 + fput(our_file);
44392 + return 0;
44393 + }
44394 + }
44395 + rcu_read_unlock();
44396 + put_files_struct(files);
44397 + } while_each_thread(p2, p);
44398 + read_unlock(&tasklist_lock);
44399 +
44400 + fput(our_file);
44401 + return 1;
44402 +}
44403 +
44404 +ssize_t
44405 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44406 +{
44407 + struct gr_arg_wrapper uwrap;
44408 + unsigned char *sprole_salt = NULL;
44409 + unsigned char *sprole_sum = NULL;
44410 + int error = sizeof (struct gr_arg_wrapper);
44411 + int error2 = 0;
44412 +
44413 + mutex_lock(&gr_dev_mutex);
44414 +
44415 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44416 + error = -EPERM;
44417 + goto out;
44418 + }
44419 +
44420 + if (count != sizeof (struct gr_arg_wrapper)) {
44421 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44422 + error = -EINVAL;
44423 + goto out;
44424 + }
44425 +
44426 +
44427 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44428 + gr_auth_expires = 0;
44429 + gr_auth_attempts = 0;
44430 + }
44431 +
44432 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44433 + error = -EFAULT;
44434 + goto out;
44435 + }
44436 +
44437 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44438 + error = -EINVAL;
44439 + goto out;
44440 + }
44441 +
44442 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44443 + error = -EFAULT;
44444 + goto out;
44445 + }
44446 +
44447 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44448 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44449 + time_after(gr_auth_expires, get_seconds())) {
44450 + error = -EBUSY;
44451 + goto out;
44452 + }
44453 +
44454 + /* if non-root trying to do anything other than use a special role,
44455 + do not attempt authentication, do not count towards authentication
44456 + locking
44457 + */
44458 +
44459 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44460 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44461 + current_uid()) {
44462 + error = -EPERM;
44463 + goto out;
44464 + }
44465 +
44466 + /* ensure pw and special role name are null terminated */
44467 +
44468 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44469 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44470 +
44471 + /* Okay.
44472 + * We have our enough of the argument structure..(we have yet
44473 + * to copy_from_user the tables themselves) . Copy the tables
44474 + * only if we need them, i.e. for loading operations. */
44475 +
44476 + switch (gr_usermode->mode) {
44477 + case GR_STATUS:
44478 + if (gr_status & GR_READY) {
44479 + error = 1;
44480 + if (!gr_check_secure_terminal(current))
44481 + error = 3;
44482 + } else
44483 + error = 2;
44484 + goto out;
44485 + case GR_SHUTDOWN:
44486 + if ((gr_status & GR_READY)
44487 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44488 + pax_open_kernel();
44489 + gr_status &= ~GR_READY;
44490 + pax_close_kernel();
44491 +
44492 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44493 + free_variables();
44494 + memset(gr_usermode, 0, sizeof (struct gr_arg));
44495 + memset(gr_system_salt, 0, GR_SALT_LEN);
44496 + memset(gr_system_sum, 0, GR_SHA_LEN);
44497 + } else if (gr_status & GR_READY) {
44498 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44499 + error = -EPERM;
44500 + } else {
44501 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44502 + error = -EAGAIN;
44503 + }
44504 + break;
44505 + case GR_ENABLE:
44506 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44507 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44508 + else {
44509 + if (gr_status & GR_READY)
44510 + error = -EAGAIN;
44511 + else
44512 + error = error2;
44513 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44514 + }
44515 + break;
44516 + case GR_RELOAD:
44517 + if (!(gr_status & GR_READY)) {
44518 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44519 + error = -EAGAIN;
44520 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44521 + preempt_disable();
44522 +
44523 + pax_open_kernel();
44524 + gr_status &= ~GR_READY;
44525 + pax_close_kernel();
44526 +
44527 + free_variables();
44528 + if (!(error2 = gracl_init(gr_usermode))) {
44529 + preempt_enable();
44530 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44531 + } else {
44532 + preempt_enable();
44533 + error = error2;
44534 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44535 + }
44536 + } else {
44537 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44538 + error = -EPERM;
44539 + }
44540 + break;
44541 + case GR_SEGVMOD:
44542 + if (unlikely(!(gr_status & GR_READY))) {
44543 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44544 + error = -EAGAIN;
44545 + break;
44546 + }
44547 +
44548 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44549 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44550 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44551 + struct acl_subject_label *segvacl;
44552 + segvacl =
44553 + lookup_acl_subj_label(gr_usermode->segv_inode,
44554 + gr_usermode->segv_device,
44555 + current->role);
44556 + if (segvacl) {
44557 + segvacl->crashes = 0;
44558 + segvacl->expires = 0;
44559 + }
44560 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44561 + gr_remove_uid(gr_usermode->segv_uid);
44562 + }
44563 + } else {
44564 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44565 + error = -EPERM;
44566 + }
44567 + break;
44568 + case GR_SPROLE:
44569 + case GR_SPROLEPAM:
44570 + if (unlikely(!(gr_status & GR_READY))) {
44571 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44572 + error = -EAGAIN;
44573 + break;
44574 + }
44575 +
44576 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44577 + current->role->expires = 0;
44578 + current->role->auth_attempts = 0;
44579 + }
44580 +
44581 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44582 + time_after(current->role->expires, get_seconds())) {
44583 + error = -EBUSY;
44584 + goto out;
44585 + }
44586 +
44587 + if (lookup_special_role_auth
44588 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44589 + && ((!sprole_salt && !sprole_sum)
44590 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44591 + char *p = "";
44592 + assign_special_role(gr_usermode->sp_role);
44593 + read_lock(&tasklist_lock);
44594 + if (current->real_parent)
44595 + p = current->real_parent->role->rolename;
44596 + read_unlock(&tasklist_lock);
44597 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44598 + p, acl_sp_role_value);
44599 + } else {
44600 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44601 + error = -EPERM;
44602 + if(!(current->role->auth_attempts++))
44603 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44604 +
44605 + goto out;
44606 + }
44607 + break;
44608 + case GR_UNSPROLE:
44609 + if (unlikely(!(gr_status & GR_READY))) {
44610 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44611 + error = -EAGAIN;
44612 + break;
44613 + }
44614 +
44615 + if (current->role->roletype & GR_ROLE_SPECIAL) {
44616 + char *p = "";
44617 + int i = 0;
44618 +
44619 + read_lock(&tasklist_lock);
44620 + if (current->real_parent) {
44621 + p = current->real_parent->role->rolename;
44622 + i = current->real_parent->acl_role_id;
44623 + }
44624 + read_unlock(&tasklist_lock);
44625 +
44626 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44627 + gr_set_acls(1);
44628 + } else {
44629 + error = -EPERM;
44630 + goto out;
44631 + }
44632 + break;
44633 + default:
44634 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44635 + error = -EINVAL;
44636 + break;
44637 + }
44638 +
44639 + if (error != -EPERM)
44640 + goto out;
44641 +
44642 + if(!(gr_auth_attempts++))
44643 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44644 +
44645 + out:
44646 + mutex_unlock(&gr_dev_mutex);
44647 + return error;
44648 +}
44649 +
44650 +/* must be called with
44651 + rcu_read_lock();
44652 + read_lock(&tasklist_lock);
44653 + read_lock(&grsec_exec_file_lock);
44654 +*/
44655 +int gr_apply_subject_to_task(struct task_struct *task)
44656 +{
44657 + struct acl_object_label *obj;
44658 + char *tmpname;
44659 + struct acl_subject_label *tmpsubj;
44660 + struct file *filp;
44661 + struct name_entry *nmatch;
44662 +
44663 + filp = task->exec_file;
44664 + if (filp == NULL)
44665 + return 0;
44666 +
44667 + /* the following is to apply the correct subject
44668 + on binaries running when the RBAC system
44669 + is enabled, when the binaries have been
44670 + replaced or deleted since their execution
44671 + -----
44672 + when the RBAC system starts, the inode/dev
44673 + from exec_file will be one the RBAC system
44674 + is unaware of. It only knows the inode/dev
44675 + of the present file on disk, or the absence
44676 + of it.
44677 + */
44678 + preempt_disable();
44679 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44680 +
44681 + nmatch = lookup_name_entry(tmpname);
44682 + preempt_enable();
44683 + tmpsubj = NULL;
44684 + if (nmatch) {
44685 + if (nmatch->deleted)
44686 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44687 + else
44688 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44689 + if (tmpsubj != NULL)
44690 + task->acl = tmpsubj;
44691 + }
44692 + if (tmpsubj == NULL)
44693 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44694 + task->role);
44695 + if (task->acl) {
44696 + task->is_writable = 0;
44697 + /* ignore additional mmap checks for processes that are writable
44698 + by the default ACL */
44699 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44700 + if (unlikely(obj->mode & GR_WRITE))
44701 + task->is_writable = 1;
44702 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44703 + if (unlikely(obj->mode & GR_WRITE))
44704 + task->is_writable = 1;
44705 +
44706 + gr_set_proc_res(task);
44707 +
44708 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44709 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44710 +#endif
44711 + } else {
44712 + return 1;
44713 + }
44714 +
44715 + return 0;
44716 +}
44717 +
44718 +int
44719 +gr_set_acls(const int type)
44720 +{
44721 + struct task_struct *task, *task2;
44722 + struct acl_role_label *role = current->role;
44723 + __u16 acl_role_id = current->acl_role_id;
44724 + const struct cred *cred;
44725 + int ret;
44726 +
44727 + rcu_read_lock();
44728 + read_lock(&tasklist_lock);
44729 + read_lock(&grsec_exec_file_lock);
44730 + do_each_thread(task2, task) {
44731 + /* check to see if we're called from the exit handler,
44732 + if so, only replace ACLs that have inherited the admin
44733 + ACL */
44734 +
44735 + if (type && (task->role != role ||
44736 + task->acl_role_id != acl_role_id))
44737 + continue;
44738 +
44739 + task->acl_role_id = 0;
44740 + task->acl_sp_role = 0;
44741 +
44742 + if (task->exec_file) {
44743 + cred = __task_cred(task);
44744 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44745 + ret = gr_apply_subject_to_task(task);
44746 + if (ret) {
44747 + read_unlock(&grsec_exec_file_lock);
44748 + read_unlock(&tasklist_lock);
44749 + rcu_read_unlock();
44750 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44751 + return ret;
44752 + }
44753 + } else {
44754 + // it's a kernel process
44755 + task->role = kernel_role;
44756 + task->acl = kernel_role->root_label;
44757 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44758 + task->acl->mode &= ~GR_PROCFIND;
44759 +#endif
44760 + }
44761 + } while_each_thread(task2, task);
44762 + read_unlock(&grsec_exec_file_lock);
44763 + read_unlock(&tasklist_lock);
44764 + rcu_read_unlock();
44765 +
44766 + return 0;
44767 +}
44768 +
44769 +void
44770 +gr_learn_resource(const struct task_struct *task,
44771 + const int res, const unsigned long wanted, const int gt)
44772 +{
44773 + struct acl_subject_label *acl;
44774 + const struct cred *cred;
44775 +
44776 + if (unlikely((gr_status & GR_READY) &&
44777 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44778 + goto skip_reslog;
44779 +
44780 +#ifdef CONFIG_GRKERNSEC_RESLOG
44781 + gr_log_resource(task, res, wanted, gt);
44782 +#endif
44783 + skip_reslog:
44784 +
44785 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44786 + return;
44787 +
44788 + acl = task->acl;
44789 +
44790 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44791 + !(acl->resmask & (1 << (unsigned short) res))))
44792 + return;
44793 +
44794 + if (wanted >= acl->res[res].rlim_cur) {
44795 + unsigned long res_add;
44796 +
44797 + res_add = wanted;
44798 + switch (res) {
44799 + case RLIMIT_CPU:
44800 + res_add += GR_RLIM_CPU_BUMP;
44801 + break;
44802 + case RLIMIT_FSIZE:
44803 + res_add += GR_RLIM_FSIZE_BUMP;
44804 + break;
44805 + case RLIMIT_DATA:
44806 + res_add += GR_RLIM_DATA_BUMP;
44807 + break;
44808 + case RLIMIT_STACK:
44809 + res_add += GR_RLIM_STACK_BUMP;
44810 + break;
44811 + case RLIMIT_CORE:
44812 + res_add += GR_RLIM_CORE_BUMP;
44813 + break;
44814 + case RLIMIT_RSS:
44815 + res_add += GR_RLIM_RSS_BUMP;
44816 + break;
44817 + case RLIMIT_NPROC:
44818 + res_add += GR_RLIM_NPROC_BUMP;
44819 + break;
44820 + case RLIMIT_NOFILE:
44821 + res_add += GR_RLIM_NOFILE_BUMP;
44822 + break;
44823 + case RLIMIT_MEMLOCK:
44824 + res_add += GR_RLIM_MEMLOCK_BUMP;
44825 + break;
44826 + case RLIMIT_AS:
44827 + res_add += GR_RLIM_AS_BUMP;
44828 + break;
44829 + case RLIMIT_LOCKS:
44830 + res_add += GR_RLIM_LOCKS_BUMP;
44831 + break;
44832 + case RLIMIT_SIGPENDING:
44833 + res_add += GR_RLIM_SIGPENDING_BUMP;
44834 + break;
44835 + case RLIMIT_MSGQUEUE:
44836 + res_add += GR_RLIM_MSGQUEUE_BUMP;
44837 + break;
44838 + case RLIMIT_NICE:
44839 + res_add += GR_RLIM_NICE_BUMP;
44840 + break;
44841 + case RLIMIT_RTPRIO:
44842 + res_add += GR_RLIM_RTPRIO_BUMP;
44843 + break;
44844 + case RLIMIT_RTTIME:
44845 + res_add += GR_RLIM_RTTIME_BUMP;
44846 + break;
44847 + }
44848 +
44849 + acl->res[res].rlim_cur = res_add;
44850 +
44851 + if (wanted > acl->res[res].rlim_max)
44852 + acl->res[res].rlim_max = res_add;
44853 +
44854 + /* only log the subject filename, since resource logging is supported for
44855 + single-subject learning only */
44856 + rcu_read_lock();
44857 + cred = __task_cred(task);
44858 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44859 + task->role->roletype, cred->uid, cred->gid, acl->filename,
44860 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44861 + "", (unsigned long) res, &task->signal->saved_ip);
44862 + rcu_read_unlock();
44863 + }
44864 +
44865 + return;
44866 +}
44867 +
44868 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44869 +void
44870 +pax_set_initial_flags(struct linux_binprm *bprm)
44871 +{
44872 + struct task_struct *task = current;
44873 + struct acl_subject_label *proc;
44874 + unsigned long flags;
44875 +
44876 + if (unlikely(!(gr_status & GR_READY)))
44877 + return;
44878 +
44879 + flags = pax_get_flags(task);
44880 +
44881 + proc = task->acl;
44882 +
44883 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
44884 + flags &= ~MF_PAX_PAGEEXEC;
44885 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
44886 + flags &= ~MF_PAX_SEGMEXEC;
44887 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
44888 + flags &= ~MF_PAX_RANDMMAP;
44889 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
44890 + flags &= ~MF_PAX_EMUTRAMP;
44891 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
44892 + flags &= ~MF_PAX_MPROTECT;
44893 +
44894 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
44895 + flags |= MF_PAX_PAGEEXEC;
44896 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
44897 + flags |= MF_PAX_SEGMEXEC;
44898 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
44899 + flags |= MF_PAX_RANDMMAP;
44900 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
44901 + flags |= MF_PAX_EMUTRAMP;
44902 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
44903 + flags |= MF_PAX_MPROTECT;
44904 +
44905 + pax_set_flags(task, flags);
44906 +
44907 + return;
44908 +}
44909 +#endif
44910 +
44911 +#ifdef CONFIG_SYSCTL
44912 +/* Eric Biederman likes breaking userland ABI and every inode-based security
44913 + system to save 35kb of memory */
44914 +
44915 +/* we modify the passed in filename, but adjust it back before returning */
44916 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
44917 +{
44918 + struct name_entry *nmatch;
44919 + char *p, *lastp = NULL;
44920 + struct acl_object_label *obj = NULL, *tmp;
44921 + struct acl_subject_label *tmpsubj;
44922 + char c = '\0';
44923 +
44924 + read_lock(&gr_inode_lock);
44925 +
44926 + p = name + len - 1;
44927 + do {
44928 + nmatch = lookup_name_entry(name);
44929 + if (lastp != NULL)
44930 + *lastp = c;
44931 +
44932 + if (nmatch == NULL)
44933 + goto next_component;
44934 + tmpsubj = current->acl;
44935 + do {
44936 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
44937 + if (obj != NULL) {
44938 + tmp = obj->globbed;
44939 + while (tmp) {
44940 + if (!glob_match(tmp->filename, name)) {
44941 + obj = tmp;
44942 + goto found_obj;
44943 + }
44944 + tmp = tmp->next;
44945 + }
44946 + goto found_obj;
44947 + }
44948 + } while ((tmpsubj = tmpsubj->parent_subject));
44949 +next_component:
44950 + /* end case */
44951 + if (p == name)
44952 + break;
44953 +
44954 + while (*p != '/')
44955 + p--;
44956 + if (p == name)
44957 + lastp = p + 1;
44958 + else {
44959 + lastp = p;
44960 + p--;
44961 + }
44962 + c = *lastp;
44963 + *lastp = '\0';
44964 + } while (1);
44965 +found_obj:
44966 + read_unlock(&gr_inode_lock);
44967 + /* obj returned will always be non-null */
44968 + return obj;
44969 +}
44970 +
44971 +/* returns 0 when allowing, non-zero on error
44972 + op of 0 is used for readdir, so we don't log the names of hidden files
44973 +*/
44974 +__u32
44975 +gr_handle_sysctl(const struct ctl_table *table, const int op)
44976 +{
44977 + struct ctl_table *tmp;
44978 + const char *proc_sys = "/proc/sys";
44979 + char *path;
44980 + struct acl_object_label *obj;
44981 + unsigned short len = 0, pos = 0, depth = 0, i;
44982 + __u32 err = 0;
44983 + __u32 mode = 0;
44984 +
44985 + if (unlikely(!(gr_status & GR_READY)))
44986 + return 0;
44987 +
44988 + /* for now, ignore operations on non-sysctl entries if it's not a
44989 + readdir*/
44990 + if (table->child != NULL && op != 0)
44991 + return 0;
44992 +
44993 + mode |= GR_FIND;
44994 + /* it's only a read if it's an entry, read on dirs is for readdir */
44995 + if (op & MAY_READ)
44996 + mode |= GR_READ;
44997 + if (op & MAY_WRITE)
44998 + mode |= GR_WRITE;
44999 +
45000 + preempt_disable();
45001 +
45002 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
45003 +
45004 + /* it's only a read/write if it's an actual entry, not a dir
45005 + (which are opened for readdir)
45006 + */
45007 +
45008 + /* convert the requested sysctl entry into a pathname */
45009 +
45010 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45011 + len += strlen(tmp->procname);
45012 + len++;
45013 + depth++;
45014 + }
45015 +
45016 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
45017 + /* deny */
45018 + goto out;
45019 + }
45020 +
45021 + memset(path, 0, PAGE_SIZE);
45022 +
45023 + memcpy(path, proc_sys, strlen(proc_sys));
45024 +
45025 + pos += strlen(proc_sys);
45026 +
45027 + for (; depth > 0; depth--) {
45028 + path[pos] = '/';
45029 + pos++;
45030 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45031 + if (depth == i) {
45032 + memcpy(path + pos, tmp->procname,
45033 + strlen(tmp->procname));
45034 + pos += strlen(tmp->procname);
45035 + }
45036 + i++;
45037 + }
45038 + }
45039 +
45040 + obj = gr_lookup_by_name(path, pos);
45041 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
45042 +
45043 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
45044 + ((err & mode) != mode))) {
45045 + __u32 new_mode = mode;
45046 +
45047 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45048 +
45049 + err = 0;
45050 + gr_log_learn_sysctl(path, new_mode);
45051 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45052 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45053 + err = -ENOENT;
45054 + } else if (!(err & GR_FIND)) {
45055 + err = -ENOENT;
45056 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45057 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45058 + path, (mode & GR_READ) ? " reading" : "",
45059 + (mode & GR_WRITE) ? " writing" : "");
45060 + err = -EACCES;
45061 + } else if ((err & mode) != mode) {
45062 + err = -EACCES;
45063 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45064 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45065 + path, (mode & GR_READ) ? " reading" : "",
45066 + (mode & GR_WRITE) ? " writing" : "");
45067 + err = 0;
45068 + } else
45069 + err = 0;
45070 +
45071 + out:
45072 + preempt_enable();
45073 +
45074 + return err;
45075 +}
45076 +#endif
45077 +
45078 +int
45079 +gr_handle_proc_ptrace(struct task_struct *task)
45080 +{
45081 + struct file *filp;
45082 + struct task_struct *tmp = task;
45083 + struct task_struct *curtemp = current;
45084 + __u32 retmode;
45085 +
45086 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45087 + if (unlikely(!(gr_status & GR_READY)))
45088 + return 0;
45089 +#endif
45090 +
45091 + read_lock(&tasklist_lock);
45092 + read_lock(&grsec_exec_file_lock);
45093 + filp = task->exec_file;
45094 +
45095 + while (tmp->pid > 0) {
45096 + if (tmp == curtemp)
45097 + break;
45098 + tmp = tmp->real_parent;
45099 + }
45100 +
45101 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45102 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45103 + read_unlock(&grsec_exec_file_lock);
45104 + read_unlock(&tasklist_lock);
45105 + return 1;
45106 + }
45107 +
45108 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45109 + if (!(gr_status & GR_READY)) {
45110 + read_unlock(&grsec_exec_file_lock);
45111 + read_unlock(&tasklist_lock);
45112 + return 0;
45113 + }
45114 +#endif
45115 +
45116 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45117 + read_unlock(&grsec_exec_file_lock);
45118 + read_unlock(&tasklist_lock);
45119 +
45120 + if (retmode & GR_NOPTRACE)
45121 + return 1;
45122 +
45123 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45124 + && (current->acl != task->acl || (current->acl != current->role->root_label
45125 + && current->pid != task->pid)))
45126 + return 1;
45127 +
45128 + return 0;
45129 +}
45130 +
45131 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45132 +{
45133 + if (unlikely(!(gr_status & GR_READY)))
45134 + return;
45135 +
45136 + if (!(current->role->roletype & GR_ROLE_GOD))
45137 + return;
45138 +
45139 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45140 + p->role->rolename, gr_task_roletype_to_char(p),
45141 + p->acl->filename);
45142 +}
45143 +
45144 +int
45145 +gr_handle_ptrace(struct task_struct *task, const long request)
45146 +{
45147 + struct task_struct *tmp = task;
45148 + struct task_struct *curtemp = current;
45149 + __u32 retmode;
45150 +
45151 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45152 + if (unlikely(!(gr_status & GR_READY)))
45153 + return 0;
45154 +#endif
45155 +
45156 + read_lock(&tasklist_lock);
45157 + while (tmp->pid > 0) {
45158 + if (tmp == curtemp)
45159 + break;
45160 + tmp = tmp->real_parent;
45161 + }
45162 +
45163 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45164 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45165 + read_unlock(&tasklist_lock);
45166 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45167 + return 1;
45168 + }
45169 + read_unlock(&tasklist_lock);
45170 +
45171 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45172 + if (!(gr_status & GR_READY))
45173 + return 0;
45174 +#endif
45175 +
45176 + read_lock(&grsec_exec_file_lock);
45177 + if (unlikely(!task->exec_file)) {
45178 + read_unlock(&grsec_exec_file_lock);
45179 + return 0;
45180 + }
45181 +
45182 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45183 + read_unlock(&grsec_exec_file_lock);
45184 +
45185 + if (retmode & GR_NOPTRACE) {
45186 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45187 + return 1;
45188 + }
45189 +
45190 + if (retmode & GR_PTRACERD) {
45191 + switch (request) {
45192 + case PTRACE_POKETEXT:
45193 + case PTRACE_POKEDATA:
45194 + case PTRACE_POKEUSR:
45195 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45196 + case PTRACE_SETREGS:
45197 + case PTRACE_SETFPREGS:
45198 +#endif
45199 +#ifdef CONFIG_X86
45200 + case PTRACE_SETFPXREGS:
45201 +#endif
45202 +#ifdef CONFIG_ALTIVEC
45203 + case PTRACE_SETVRREGS:
45204 +#endif
45205 + return 1;
45206 + default:
45207 + return 0;
45208 + }
45209 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
45210 + !(current->role->roletype & GR_ROLE_GOD) &&
45211 + (current->acl != task->acl)) {
45212 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45213 + return 1;
45214 + }
45215 +
45216 + return 0;
45217 +}
45218 +
45219 +static int is_writable_mmap(const struct file *filp)
45220 +{
45221 + struct task_struct *task = current;
45222 + struct acl_object_label *obj, *obj2;
45223 +
45224 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45225 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45226 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45227 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45228 + task->role->root_label);
45229 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45230 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45231 + return 1;
45232 + }
45233 + }
45234 + return 0;
45235 +}
45236 +
45237 +int
45238 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45239 +{
45240 + __u32 mode;
45241 +
45242 + if (unlikely(!file || !(prot & PROT_EXEC)))
45243 + return 1;
45244 +
45245 + if (is_writable_mmap(file))
45246 + return 0;
45247 +
45248 + mode =
45249 + gr_search_file(file->f_path.dentry,
45250 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45251 + file->f_path.mnt);
45252 +
45253 + if (!gr_tpe_allow(file))
45254 + return 0;
45255 +
45256 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45257 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45258 + return 0;
45259 + } else if (unlikely(!(mode & GR_EXEC))) {
45260 + return 0;
45261 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45262 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45263 + return 1;
45264 + }
45265 +
45266 + return 1;
45267 +}
45268 +
45269 +int
45270 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45271 +{
45272 + __u32 mode;
45273 +
45274 + if (unlikely(!file || !(prot & PROT_EXEC)))
45275 + return 1;
45276 +
45277 + if (is_writable_mmap(file))
45278 + return 0;
45279 +
45280 + mode =
45281 + gr_search_file(file->f_path.dentry,
45282 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45283 + file->f_path.mnt);
45284 +
45285 + if (!gr_tpe_allow(file))
45286 + return 0;
45287 +
45288 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45289 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45290 + return 0;
45291 + } else if (unlikely(!(mode & GR_EXEC))) {
45292 + return 0;
45293 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45294 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45295 + return 1;
45296 + }
45297 +
45298 + return 1;
45299 +}
45300 +
45301 +void
45302 +gr_acl_handle_psacct(struct task_struct *task, const long code)
45303 +{
45304 + unsigned long runtime;
45305 + unsigned long cputime;
45306 + unsigned int wday, cday;
45307 + __u8 whr, chr;
45308 + __u8 wmin, cmin;
45309 + __u8 wsec, csec;
45310 + struct timespec timeval;
45311 +
45312 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45313 + !(task->acl->mode & GR_PROCACCT)))
45314 + return;
45315 +
45316 + do_posix_clock_monotonic_gettime(&timeval);
45317 + runtime = timeval.tv_sec - task->start_time.tv_sec;
45318 + wday = runtime / (3600 * 24);
45319 + runtime -= wday * (3600 * 24);
45320 + whr = runtime / 3600;
45321 + runtime -= whr * 3600;
45322 + wmin = runtime / 60;
45323 + runtime -= wmin * 60;
45324 + wsec = runtime;
45325 +
45326 + cputime = (task->utime + task->stime) / HZ;
45327 + cday = cputime / (3600 * 24);
45328 + cputime -= cday * (3600 * 24);
45329 + chr = cputime / 3600;
45330 + cputime -= chr * 3600;
45331 + cmin = cputime / 60;
45332 + cputime -= cmin * 60;
45333 + csec = cputime;
45334 +
45335 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45336 +
45337 + return;
45338 +}
45339 +
45340 +void gr_set_kernel_label(struct task_struct *task)
45341 +{
45342 + if (gr_status & GR_READY) {
45343 + task->role = kernel_role;
45344 + task->acl = kernel_role->root_label;
45345 + }
45346 + return;
45347 +}
45348 +
45349 +#ifdef CONFIG_TASKSTATS
45350 +int gr_is_taskstats_denied(int pid)
45351 +{
45352 + struct task_struct *task;
45353 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45354 + const struct cred *cred;
45355 +#endif
45356 + int ret = 0;
45357 +
45358 + /* restrict taskstats viewing to un-chrooted root users
45359 + who have the 'view' subject flag if the RBAC system is enabled
45360 + */
45361 +
45362 + rcu_read_lock();
45363 + read_lock(&tasklist_lock);
45364 + task = find_task_by_vpid(pid);
45365 + if (task) {
45366 +#ifdef CONFIG_GRKERNSEC_CHROOT
45367 + if (proc_is_chrooted(task))
45368 + ret = -EACCES;
45369 +#endif
45370 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45371 + cred = __task_cred(task);
45372 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45373 + if (cred->uid != 0)
45374 + ret = -EACCES;
45375 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45376 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45377 + ret = -EACCES;
45378 +#endif
45379 +#endif
45380 + if (gr_status & GR_READY) {
45381 + if (!(task->acl->mode & GR_VIEW))
45382 + ret = -EACCES;
45383 + }
45384 + } else
45385 + ret = -ENOENT;
45386 +
45387 + read_unlock(&tasklist_lock);
45388 + rcu_read_unlock();
45389 +
45390 + return ret;
45391 +}
45392 +#endif
45393 +
45394 +/* AUXV entries are filled via a descendant of search_binary_handler
45395 + after we've already applied the subject for the target
45396 +*/
45397 +int gr_acl_enable_at_secure(void)
45398 +{
45399 + if (unlikely(!(gr_status & GR_READY)))
45400 + return 0;
45401 +
45402 + if (current->acl->mode & GR_ATSECURE)
45403 + return 1;
45404 +
45405 + return 0;
45406 +}
45407 +
45408 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45409 +{
45410 + struct task_struct *task = current;
45411 + struct dentry *dentry = file->f_path.dentry;
45412 + struct vfsmount *mnt = file->f_path.mnt;
45413 + struct acl_object_label *obj, *tmp;
45414 + struct acl_subject_label *subj;
45415 + unsigned int bufsize;
45416 + int is_not_root;
45417 + char *path;
45418 + dev_t dev = __get_dev(dentry);
45419 +
45420 + if (unlikely(!(gr_status & GR_READY)))
45421 + return 1;
45422 +
45423 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45424 + return 1;
45425 +
45426 + /* ignore Eric Biederman */
45427 + if (IS_PRIVATE(dentry->d_inode))
45428 + return 1;
45429 +
45430 + subj = task->acl;
45431 + do {
45432 + obj = lookup_acl_obj_label(ino, dev, subj);
45433 + if (obj != NULL)
45434 + return (obj->mode & GR_FIND) ? 1 : 0;
45435 + } while ((subj = subj->parent_subject));
45436 +
45437 + /* this is purely an optimization since we're looking for an object
45438 + for the directory we're doing a readdir on
45439 + if it's possible for any globbed object to match the entry we're
45440 + filling into the directory, then the object we find here will be
45441 + an anchor point with attached globbed objects
45442 + */
45443 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45444 + if (obj->globbed == NULL)
45445 + return (obj->mode & GR_FIND) ? 1 : 0;
45446 +
45447 + is_not_root = ((obj->filename[0] == '/') &&
45448 + (obj->filename[1] == '\0')) ? 0 : 1;
45449 + bufsize = PAGE_SIZE - namelen - is_not_root;
45450 +
45451 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
45452 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45453 + return 1;
45454 +
45455 + preempt_disable();
45456 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45457 + bufsize);
45458 +
45459 + bufsize = strlen(path);
45460 +
45461 + /* if base is "/", don't append an additional slash */
45462 + if (is_not_root)
45463 + *(path + bufsize) = '/';
45464 + memcpy(path + bufsize + is_not_root, name, namelen);
45465 + *(path + bufsize + namelen + is_not_root) = '\0';
45466 +
45467 + tmp = obj->globbed;
45468 + while (tmp) {
45469 + if (!glob_match(tmp->filename, path)) {
45470 + preempt_enable();
45471 + return (tmp->mode & GR_FIND) ? 1 : 0;
45472 + }
45473 + tmp = tmp->next;
45474 + }
45475 + preempt_enable();
45476 + return (obj->mode & GR_FIND) ? 1 : 0;
45477 +}
45478 +
45479 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45480 +EXPORT_SYMBOL(gr_acl_is_enabled);
45481 +#endif
45482 +EXPORT_SYMBOL(gr_learn_resource);
45483 +EXPORT_SYMBOL(gr_set_kernel_label);
45484 +#ifdef CONFIG_SECURITY
45485 +EXPORT_SYMBOL(gr_check_user_change);
45486 +EXPORT_SYMBOL(gr_check_group_change);
45487 +#endif
45488 +
45489 diff -urNp linux-3.0.4/grsecurity/gracl_cap.c linux-3.0.4/grsecurity/gracl_cap.c
45490 --- linux-3.0.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45491 +++ linux-3.0.4/grsecurity/gracl_cap.c 2011-08-23 21:48:14.000000000 -0400
45492 @@ -0,0 +1,139 @@
45493 +#include <linux/kernel.h>
45494 +#include <linux/module.h>
45495 +#include <linux/sched.h>
45496 +#include <linux/gracl.h>
45497 +#include <linux/grsecurity.h>
45498 +#include <linux/grinternal.h>
45499 +
45500 +static const char *captab_log[] = {
45501 + "CAP_CHOWN",
45502 + "CAP_DAC_OVERRIDE",
45503 + "CAP_DAC_READ_SEARCH",
45504 + "CAP_FOWNER",
45505 + "CAP_FSETID",
45506 + "CAP_KILL",
45507 + "CAP_SETGID",
45508 + "CAP_SETUID",
45509 + "CAP_SETPCAP",
45510 + "CAP_LINUX_IMMUTABLE",
45511 + "CAP_NET_BIND_SERVICE",
45512 + "CAP_NET_BROADCAST",
45513 + "CAP_NET_ADMIN",
45514 + "CAP_NET_RAW",
45515 + "CAP_IPC_LOCK",
45516 + "CAP_IPC_OWNER",
45517 + "CAP_SYS_MODULE",
45518 + "CAP_SYS_RAWIO",
45519 + "CAP_SYS_CHROOT",
45520 + "CAP_SYS_PTRACE",
45521 + "CAP_SYS_PACCT",
45522 + "CAP_SYS_ADMIN",
45523 + "CAP_SYS_BOOT",
45524 + "CAP_SYS_NICE",
45525 + "CAP_SYS_RESOURCE",
45526 + "CAP_SYS_TIME",
45527 + "CAP_SYS_TTY_CONFIG",
45528 + "CAP_MKNOD",
45529 + "CAP_LEASE",
45530 + "CAP_AUDIT_WRITE",
45531 + "CAP_AUDIT_CONTROL",
45532 + "CAP_SETFCAP",
45533 + "CAP_MAC_OVERRIDE",
45534 + "CAP_MAC_ADMIN",
45535 + "CAP_SYSLOG"
45536 +};
45537 +
45538 +EXPORT_SYMBOL(gr_is_capable);
45539 +EXPORT_SYMBOL(gr_is_capable_nolog);
45540 +
45541 +int
45542 +gr_is_capable(const int cap)
45543 +{
45544 + struct task_struct *task = current;
45545 + const struct cred *cred = current_cred();
45546 + struct acl_subject_label *curracl;
45547 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45548 + kernel_cap_t cap_audit = __cap_empty_set;
45549 +
45550 + if (!gr_acl_is_enabled())
45551 + return 1;
45552 +
45553 + curracl = task->acl;
45554 +
45555 + cap_drop = curracl->cap_lower;
45556 + cap_mask = curracl->cap_mask;
45557 + cap_audit = curracl->cap_invert_audit;
45558 +
45559 + while ((curracl = curracl->parent_subject)) {
45560 + /* if the cap isn't specified in the current computed mask but is specified in the
45561 + current level subject, and is lowered in the current level subject, then add
45562 + it to the set of dropped capabilities
45563 + otherwise, add the current level subject's mask to the current computed mask
45564 + */
45565 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45566 + cap_raise(cap_mask, cap);
45567 + if (cap_raised(curracl->cap_lower, cap))
45568 + cap_raise(cap_drop, cap);
45569 + if (cap_raised(curracl->cap_invert_audit, cap))
45570 + cap_raise(cap_audit, cap);
45571 + }
45572 + }
45573 +
45574 + if (!cap_raised(cap_drop, cap)) {
45575 + if (cap_raised(cap_audit, cap))
45576 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45577 + return 1;
45578 + }
45579 +
45580 + curracl = task->acl;
45581 +
45582 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45583 + && cap_raised(cred->cap_effective, cap)) {
45584 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45585 + task->role->roletype, cred->uid,
45586 + cred->gid, task->exec_file ?
45587 + gr_to_filename(task->exec_file->f_path.dentry,
45588 + task->exec_file->f_path.mnt) : curracl->filename,
45589 + curracl->filename, 0UL,
45590 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45591 + return 1;
45592 + }
45593 +
45594 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45595 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45596 + return 0;
45597 +}
45598 +
45599 +int
45600 +gr_is_capable_nolog(const int cap)
45601 +{
45602 + struct acl_subject_label *curracl;
45603 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45604 +
45605 + if (!gr_acl_is_enabled())
45606 + return 1;
45607 +
45608 + curracl = current->acl;
45609 +
45610 + cap_drop = curracl->cap_lower;
45611 + cap_mask = curracl->cap_mask;
45612 +
45613 + while ((curracl = curracl->parent_subject)) {
45614 + /* if the cap isn't specified in the current computed mask but is specified in the
45615 + current level subject, and is lowered in the current level subject, then add
45616 + it to the set of dropped capabilities
45617 + otherwise, add the current level subject's mask to the current computed mask
45618 + */
45619 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45620 + cap_raise(cap_mask, cap);
45621 + if (cap_raised(curracl->cap_lower, cap))
45622 + cap_raise(cap_drop, cap);
45623 + }
45624 + }
45625 +
45626 + if (!cap_raised(cap_drop, cap))
45627 + return 1;
45628 +
45629 + return 0;
45630 +}
45631 +
45632 diff -urNp linux-3.0.4/grsecurity/gracl_fs.c linux-3.0.4/grsecurity/gracl_fs.c
45633 --- linux-3.0.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45634 +++ linux-3.0.4/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
45635 @@ -0,0 +1,431 @@
45636 +#include <linux/kernel.h>
45637 +#include <linux/sched.h>
45638 +#include <linux/types.h>
45639 +#include <linux/fs.h>
45640 +#include <linux/file.h>
45641 +#include <linux/stat.h>
45642 +#include <linux/grsecurity.h>
45643 +#include <linux/grinternal.h>
45644 +#include <linux/gracl.h>
45645 +
45646 +__u32
45647 +gr_acl_handle_hidden_file(const struct dentry * dentry,
45648 + const struct vfsmount * mnt)
45649 +{
45650 + __u32 mode;
45651 +
45652 + if (unlikely(!dentry->d_inode))
45653 + return GR_FIND;
45654 +
45655 + mode =
45656 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45657 +
45658 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45659 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45660 + return mode;
45661 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45662 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45663 + return 0;
45664 + } else if (unlikely(!(mode & GR_FIND)))
45665 + return 0;
45666 +
45667 + return GR_FIND;
45668 +}
45669 +
45670 +__u32
45671 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45672 + const int fmode)
45673 +{
45674 + __u32 reqmode = GR_FIND;
45675 + __u32 mode;
45676 +
45677 + if (unlikely(!dentry->d_inode))
45678 + return reqmode;
45679 +
45680 + if (unlikely(fmode & O_APPEND))
45681 + reqmode |= GR_APPEND;
45682 + else if (unlikely(fmode & FMODE_WRITE))
45683 + reqmode |= GR_WRITE;
45684 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45685 + reqmode |= GR_READ;
45686 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45687 + reqmode &= ~GR_READ;
45688 + mode =
45689 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45690 + mnt);
45691 +
45692 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45693 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45694 + reqmode & GR_READ ? " reading" : "",
45695 + reqmode & GR_WRITE ? " writing" : reqmode &
45696 + GR_APPEND ? " appending" : "");
45697 + return reqmode;
45698 + } else
45699 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45700 + {
45701 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45702 + reqmode & GR_READ ? " reading" : "",
45703 + reqmode & GR_WRITE ? " writing" : reqmode &
45704 + GR_APPEND ? " appending" : "");
45705 + return 0;
45706 + } else if (unlikely((mode & reqmode) != reqmode))
45707 + return 0;
45708 +
45709 + return reqmode;
45710 +}
45711 +
45712 +__u32
45713 +gr_acl_handle_creat(const struct dentry * dentry,
45714 + const struct dentry * p_dentry,
45715 + const struct vfsmount * p_mnt, const int fmode,
45716 + const int imode)
45717 +{
45718 + __u32 reqmode = GR_WRITE | GR_CREATE;
45719 + __u32 mode;
45720 +
45721 + if (unlikely(fmode & O_APPEND))
45722 + reqmode |= GR_APPEND;
45723 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45724 + reqmode |= GR_READ;
45725 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45726 + reqmode |= GR_SETID;
45727 +
45728 + mode =
45729 + gr_check_create(dentry, p_dentry, p_mnt,
45730 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45731 +
45732 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45733 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45734 + reqmode & GR_READ ? " reading" : "",
45735 + reqmode & GR_WRITE ? " writing" : reqmode &
45736 + GR_APPEND ? " appending" : "");
45737 + return reqmode;
45738 + } else
45739 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45740 + {
45741 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45742 + reqmode & GR_READ ? " reading" : "",
45743 + reqmode & GR_WRITE ? " writing" : reqmode &
45744 + GR_APPEND ? " appending" : "");
45745 + return 0;
45746 + } else if (unlikely((mode & reqmode) != reqmode))
45747 + return 0;
45748 +
45749 + return reqmode;
45750 +}
45751 +
45752 +__u32
45753 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45754 + const int fmode)
45755 +{
45756 + __u32 mode, reqmode = GR_FIND;
45757 +
45758 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45759 + reqmode |= GR_EXEC;
45760 + if (fmode & S_IWOTH)
45761 + reqmode |= GR_WRITE;
45762 + if (fmode & S_IROTH)
45763 + reqmode |= GR_READ;
45764 +
45765 + mode =
45766 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45767 + mnt);
45768 +
45769 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45770 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45771 + reqmode & GR_READ ? " reading" : "",
45772 + reqmode & GR_WRITE ? " writing" : "",
45773 + reqmode & GR_EXEC ? " executing" : "");
45774 + return reqmode;
45775 + } else
45776 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45777 + {
45778 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45779 + reqmode & GR_READ ? " reading" : "",
45780 + reqmode & GR_WRITE ? " writing" : "",
45781 + reqmode & GR_EXEC ? " executing" : "");
45782 + return 0;
45783 + } else if (unlikely((mode & reqmode) != reqmode))
45784 + return 0;
45785 +
45786 + return reqmode;
45787 +}
45788 +
45789 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45790 +{
45791 + __u32 mode;
45792 +
45793 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45794 +
45795 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45796 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45797 + return mode;
45798 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45799 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45800 + return 0;
45801 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45802 + return 0;
45803 +
45804 + return (reqmode);
45805 +}
45806 +
45807 +__u32
45808 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45809 +{
45810 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45811 +}
45812 +
45813 +__u32
45814 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45815 +{
45816 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45817 +}
45818 +
45819 +__u32
45820 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45821 +{
45822 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45823 +}
45824 +
45825 +__u32
45826 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45827 +{
45828 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45829 +}
45830 +
45831 +__u32
45832 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45833 + mode_t mode)
45834 +{
45835 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45836 + return 1;
45837 +
45838 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45839 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45840 + GR_FCHMOD_ACL_MSG);
45841 + } else {
45842 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45843 + }
45844 +}
45845 +
45846 +__u32
45847 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45848 + mode_t mode)
45849 +{
45850 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45851 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45852 + GR_CHMOD_ACL_MSG);
45853 + } else {
45854 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45855 + }
45856 +}
45857 +
45858 +__u32
45859 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45860 +{
45861 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45862 +}
45863 +
45864 +__u32
45865 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45866 +{
45867 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45868 +}
45869 +
45870 +__u32
45871 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45872 +{
45873 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45874 +}
45875 +
45876 +__u32
45877 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45878 +{
45879 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45880 + GR_UNIXCONNECT_ACL_MSG);
45881 +}
45882 +
45883 +/* hardlinks require at minimum create permission,
45884 + any additional privilege required is based on the
45885 + privilege of the file being linked to
45886 +*/
45887 +__u32
45888 +gr_acl_handle_link(const struct dentry * new_dentry,
45889 + const struct dentry * parent_dentry,
45890 + const struct vfsmount * parent_mnt,
45891 + const struct dentry * old_dentry,
45892 + const struct vfsmount * old_mnt, const char *to)
45893 +{
45894 + __u32 mode;
45895 + __u32 needmode = GR_CREATE | GR_LINK;
45896 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
45897 +
45898 + mode =
45899 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
45900 + old_mnt);
45901 +
45902 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
45903 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45904 + return mode;
45905 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45906 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45907 + return 0;
45908 + } else if (unlikely((mode & needmode) != needmode))
45909 + return 0;
45910 +
45911 + return 1;
45912 +}
45913 +
45914 +__u32
45915 +gr_acl_handle_symlink(const struct dentry * new_dentry,
45916 + const struct dentry * parent_dentry,
45917 + const struct vfsmount * parent_mnt, const char *from)
45918 +{
45919 + __u32 needmode = GR_WRITE | GR_CREATE;
45920 + __u32 mode;
45921 +
45922 + mode =
45923 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45924 + GR_CREATE | GR_AUDIT_CREATE |
45925 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
45926 +
45927 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
45928 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45929 + return mode;
45930 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45931 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45932 + return 0;
45933 + } else if (unlikely((mode & needmode) != needmode))
45934 + return 0;
45935 +
45936 + return (GR_WRITE | GR_CREATE);
45937 +}
45938 +
45939 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
45940 +{
45941 + __u32 mode;
45942 +
45943 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45944 +
45945 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45946 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
45947 + return mode;
45948 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45949 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
45950 + return 0;
45951 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45952 + return 0;
45953 +
45954 + return (reqmode);
45955 +}
45956 +
45957 +__u32
45958 +gr_acl_handle_mknod(const struct dentry * new_dentry,
45959 + const struct dentry * parent_dentry,
45960 + const struct vfsmount * parent_mnt,
45961 + const int mode)
45962 +{
45963 + __u32 reqmode = GR_WRITE | GR_CREATE;
45964 + if (unlikely(mode & (S_ISUID | S_ISGID)))
45965 + reqmode |= GR_SETID;
45966 +
45967 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45968 + reqmode, GR_MKNOD_ACL_MSG);
45969 +}
45970 +
45971 +__u32
45972 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
45973 + const struct dentry *parent_dentry,
45974 + const struct vfsmount *parent_mnt)
45975 +{
45976 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45977 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
45978 +}
45979 +
45980 +#define RENAME_CHECK_SUCCESS(old, new) \
45981 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
45982 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
45983 +
45984 +int
45985 +gr_acl_handle_rename(struct dentry *new_dentry,
45986 + struct dentry *parent_dentry,
45987 + const struct vfsmount *parent_mnt,
45988 + struct dentry *old_dentry,
45989 + struct inode *old_parent_inode,
45990 + struct vfsmount *old_mnt, const char *newname)
45991 +{
45992 + __u32 comp1, comp2;
45993 + int error = 0;
45994 +
45995 + if (unlikely(!gr_acl_is_enabled()))
45996 + return 0;
45997 +
45998 + if (!new_dentry->d_inode) {
45999 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
46000 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
46001 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
46002 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
46003 + GR_DELETE | GR_AUDIT_DELETE |
46004 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46005 + GR_SUPPRESS, old_mnt);
46006 + } else {
46007 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
46008 + GR_CREATE | GR_DELETE |
46009 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
46010 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46011 + GR_SUPPRESS, parent_mnt);
46012 + comp2 =
46013 + gr_search_file(old_dentry,
46014 + GR_READ | GR_WRITE | GR_AUDIT_READ |
46015 + GR_DELETE | GR_AUDIT_DELETE |
46016 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
46017 + }
46018 +
46019 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
46020 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
46021 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46022 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
46023 + && !(comp2 & GR_SUPPRESS)) {
46024 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46025 + error = -EACCES;
46026 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
46027 + error = -EACCES;
46028 +
46029 + return error;
46030 +}
46031 +
46032 +void
46033 +gr_acl_handle_exit(void)
46034 +{
46035 + u16 id;
46036 + char *rolename;
46037 + struct file *exec_file;
46038 +
46039 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
46040 + !(current->role->roletype & GR_ROLE_PERSIST))) {
46041 + id = current->acl_role_id;
46042 + rolename = current->role->rolename;
46043 + gr_set_acls(1);
46044 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
46045 + }
46046 +
46047 + write_lock(&grsec_exec_file_lock);
46048 + exec_file = current->exec_file;
46049 + current->exec_file = NULL;
46050 + write_unlock(&grsec_exec_file_lock);
46051 +
46052 + if (exec_file)
46053 + fput(exec_file);
46054 +}
46055 +
46056 +int
46057 +gr_acl_handle_procpidmem(const struct task_struct *task)
46058 +{
46059 + if (unlikely(!gr_acl_is_enabled()))
46060 + return 0;
46061 +
46062 + if (task != current && task->acl->mode & GR_PROTPROCFD)
46063 + return -EACCES;
46064 +
46065 + return 0;
46066 +}
46067 diff -urNp linux-3.0.4/grsecurity/gracl_ip.c linux-3.0.4/grsecurity/gracl_ip.c
46068 --- linux-3.0.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46069 +++ linux-3.0.4/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
46070 @@ -0,0 +1,381 @@
46071 +#include <linux/kernel.h>
46072 +#include <asm/uaccess.h>
46073 +#include <asm/errno.h>
46074 +#include <net/sock.h>
46075 +#include <linux/file.h>
46076 +#include <linux/fs.h>
46077 +#include <linux/net.h>
46078 +#include <linux/in.h>
46079 +#include <linux/skbuff.h>
46080 +#include <linux/ip.h>
46081 +#include <linux/udp.h>
46082 +#include <linux/types.h>
46083 +#include <linux/sched.h>
46084 +#include <linux/netdevice.h>
46085 +#include <linux/inetdevice.h>
46086 +#include <linux/gracl.h>
46087 +#include <linux/grsecurity.h>
46088 +#include <linux/grinternal.h>
46089 +
46090 +#define GR_BIND 0x01
46091 +#define GR_CONNECT 0x02
46092 +#define GR_INVERT 0x04
46093 +#define GR_BINDOVERRIDE 0x08
46094 +#define GR_CONNECTOVERRIDE 0x10
46095 +#define GR_SOCK_FAMILY 0x20
46096 +
46097 +static const char * gr_protocols[IPPROTO_MAX] = {
46098 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46099 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46100 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46101 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46102 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46103 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46104 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46105 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46106 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46107 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46108 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46109 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46110 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46111 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46112 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46113 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46114 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46115 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46116 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46117 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46118 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46119 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46120 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46121 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46122 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46123 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46124 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46125 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46126 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46127 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46128 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46129 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46130 + };
46131 +
46132 +static const char * gr_socktypes[SOCK_MAX] = {
46133 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46134 + "unknown:7", "unknown:8", "unknown:9", "packet"
46135 + };
46136 +
46137 +static const char * gr_sockfamilies[AF_MAX+1] = {
46138 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46139 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46140 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46141 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46142 + };
46143 +
46144 +const char *
46145 +gr_proto_to_name(unsigned char proto)
46146 +{
46147 + return gr_protocols[proto];
46148 +}
46149 +
46150 +const char *
46151 +gr_socktype_to_name(unsigned char type)
46152 +{
46153 + return gr_socktypes[type];
46154 +}
46155 +
46156 +const char *
46157 +gr_sockfamily_to_name(unsigned char family)
46158 +{
46159 + return gr_sockfamilies[family];
46160 +}
46161 +
46162 +int
46163 +gr_search_socket(const int domain, const int type, const int protocol)
46164 +{
46165 + struct acl_subject_label *curr;
46166 + const struct cred *cred = current_cred();
46167 +
46168 + if (unlikely(!gr_acl_is_enabled()))
46169 + goto exit;
46170 +
46171 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
46172 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46173 + goto exit; // let the kernel handle it
46174 +
46175 + curr = current->acl;
46176 +
46177 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46178 + /* the family is allowed, if this is PF_INET allow it only if
46179 + the extra sock type/protocol checks pass */
46180 + if (domain == PF_INET)
46181 + goto inet_check;
46182 + goto exit;
46183 + } else {
46184 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46185 + __u32 fakeip = 0;
46186 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46187 + current->role->roletype, cred->uid,
46188 + cred->gid, current->exec_file ?
46189 + gr_to_filename(current->exec_file->f_path.dentry,
46190 + current->exec_file->f_path.mnt) :
46191 + curr->filename, curr->filename,
46192 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46193 + &current->signal->saved_ip);
46194 + goto exit;
46195 + }
46196 + goto exit_fail;
46197 + }
46198 +
46199 +inet_check:
46200 + /* the rest of this checking is for IPv4 only */
46201 + if (!curr->ips)
46202 + goto exit;
46203 +
46204 + if ((curr->ip_type & (1 << type)) &&
46205 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46206 + goto exit;
46207 +
46208 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46209 + /* we don't place acls on raw sockets , and sometimes
46210 + dgram/ip sockets are opened for ioctl and not
46211 + bind/connect, so we'll fake a bind learn log */
46212 + if (type == SOCK_RAW || type == SOCK_PACKET) {
46213 + __u32 fakeip = 0;
46214 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46215 + current->role->roletype, cred->uid,
46216 + cred->gid, current->exec_file ?
46217 + gr_to_filename(current->exec_file->f_path.dentry,
46218 + current->exec_file->f_path.mnt) :
46219 + curr->filename, curr->filename,
46220 + &fakeip, 0, type,
46221 + protocol, GR_CONNECT, &current->signal->saved_ip);
46222 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46223 + __u32 fakeip = 0;
46224 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46225 + current->role->roletype, cred->uid,
46226 + cred->gid, current->exec_file ?
46227 + gr_to_filename(current->exec_file->f_path.dentry,
46228 + current->exec_file->f_path.mnt) :
46229 + curr->filename, curr->filename,
46230 + &fakeip, 0, type,
46231 + protocol, GR_BIND, &current->signal->saved_ip);
46232 + }
46233 + /* we'll log when they use connect or bind */
46234 + goto exit;
46235 + }
46236 +
46237 +exit_fail:
46238 + if (domain == PF_INET)
46239 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46240 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
46241 + else
46242 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46243 + gr_socktype_to_name(type), protocol);
46244 +
46245 + return 0;
46246 +exit:
46247 + return 1;
46248 +}
46249 +
46250 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46251 +{
46252 + if ((ip->mode & mode) &&
46253 + (ip_port >= ip->low) &&
46254 + (ip_port <= ip->high) &&
46255 + ((ntohl(ip_addr) & our_netmask) ==
46256 + (ntohl(our_addr) & our_netmask))
46257 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46258 + && (ip->type & (1 << type))) {
46259 + if (ip->mode & GR_INVERT)
46260 + return 2; // specifically denied
46261 + else
46262 + return 1; // allowed
46263 + }
46264 +
46265 + return 0; // not specifically allowed, may continue parsing
46266 +}
46267 +
46268 +static int
46269 +gr_search_connectbind(const int full_mode, struct sock *sk,
46270 + struct sockaddr_in *addr, const int type)
46271 +{
46272 + char iface[IFNAMSIZ] = {0};
46273 + struct acl_subject_label *curr;
46274 + struct acl_ip_label *ip;
46275 + struct inet_sock *isk;
46276 + struct net_device *dev;
46277 + struct in_device *idev;
46278 + unsigned long i;
46279 + int ret;
46280 + int mode = full_mode & (GR_BIND | GR_CONNECT);
46281 + __u32 ip_addr = 0;
46282 + __u32 our_addr;
46283 + __u32 our_netmask;
46284 + char *p;
46285 + __u16 ip_port = 0;
46286 + const struct cred *cred = current_cred();
46287 +
46288 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46289 + return 0;
46290 +
46291 + curr = current->acl;
46292 + isk = inet_sk(sk);
46293 +
46294 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46295 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46296 + addr->sin_addr.s_addr = curr->inaddr_any_override;
46297 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46298 + struct sockaddr_in saddr;
46299 + int err;
46300 +
46301 + saddr.sin_family = AF_INET;
46302 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
46303 + saddr.sin_port = isk->inet_sport;
46304 +
46305 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46306 + if (err)
46307 + return err;
46308 +
46309 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46310 + if (err)
46311 + return err;
46312 + }
46313 +
46314 + if (!curr->ips)
46315 + return 0;
46316 +
46317 + ip_addr = addr->sin_addr.s_addr;
46318 + ip_port = ntohs(addr->sin_port);
46319 +
46320 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46321 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46322 + current->role->roletype, cred->uid,
46323 + cred->gid, current->exec_file ?
46324 + gr_to_filename(current->exec_file->f_path.dentry,
46325 + current->exec_file->f_path.mnt) :
46326 + curr->filename, curr->filename,
46327 + &ip_addr, ip_port, type,
46328 + sk->sk_protocol, mode, &current->signal->saved_ip);
46329 + return 0;
46330 + }
46331 +
46332 + for (i = 0; i < curr->ip_num; i++) {
46333 + ip = *(curr->ips + i);
46334 + if (ip->iface != NULL) {
46335 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
46336 + p = strchr(iface, ':');
46337 + if (p != NULL)
46338 + *p = '\0';
46339 + dev = dev_get_by_name(sock_net(sk), iface);
46340 + if (dev == NULL)
46341 + continue;
46342 + idev = in_dev_get(dev);
46343 + if (idev == NULL) {
46344 + dev_put(dev);
46345 + continue;
46346 + }
46347 + rcu_read_lock();
46348 + for_ifa(idev) {
46349 + if (!strcmp(ip->iface, ifa->ifa_label)) {
46350 + our_addr = ifa->ifa_address;
46351 + our_netmask = 0xffffffff;
46352 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46353 + if (ret == 1) {
46354 + rcu_read_unlock();
46355 + in_dev_put(idev);
46356 + dev_put(dev);
46357 + return 0;
46358 + } else if (ret == 2) {
46359 + rcu_read_unlock();
46360 + in_dev_put(idev);
46361 + dev_put(dev);
46362 + goto denied;
46363 + }
46364 + }
46365 + } endfor_ifa(idev);
46366 + rcu_read_unlock();
46367 + in_dev_put(idev);
46368 + dev_put(dev);
46369 + } else {
46370 + our_addr = ip->addr;
46371 + our_netmask = ip->netmask;
46372 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46373 + if (ret == 1)
46374 + return 0;
46375 + else if (ret == 2)
46376 + goto denied;
46377 + }
46378 + }
46379 +
46380 +denied:
46381 + if (mode == GR_BIND)
46382 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46383 + else if (mode == GR_CONNECT)
46384 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46385 +
46386 + return -EACCES;
46387 +}
46388 +
46389 +int
46390 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46391 +{
46392 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46393 +}
46394 +
46395 +int
46396 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46397 +{
46398 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46399 +}
46400 +
46401 +int gr_search_listen(struct socket *sock)
46402 +{
46403 + struct sock *sk = sock->sk;
46404 + struct sockaddr_in addr;
46405 +
46406 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46407 + addr.sin_port = inet_sk(sk)->inet_sport;
46408 +
46409 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46410 +}
46411 +
46412 +int gr_search_accept(struct socket *sock)
46413 +{
46414 + struct sock *sk = sock->sk;
46415 + struct sockaddr_in addr;
46416 +
46417 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46418 + addr.sin_port = inet_sk(sk)->inet_sport;
46419 +
46420 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46421 +}
46422 +
46423 +int
46424 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46425 +{
46426 + if (addr)
46427 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46428 + else {
46429 + struct sockaddr_in sin;
46430 + const struct inet_sock *inet = inet_sk(sk);
46431 +
46432 + sin.sin_addr.s_addr = inet->inet_daddr;
46433 + sin.sin_port = inet->inet_dport;
46434 +
46435 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46436 + }
46437 +}
46438 +
46439 +int
46440 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46441 +{
46442 + struct sockaddr_in sin;
46443 +
46444 + if (unlikely(skb->len < sizeof (struct udphdr)))
46445 + return 0; // skip this packet
46446 +
46447 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46448 + sin.sin_port = udp_hdr(skb)->source;
46449 +
46450 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46451 +}
46452 diff -urNp linux-3.0.4/grsecurity/gracl_learn.c linux-3.0.4/grsecurity/gracl_learn.c
46453 --- linux-3.0.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46454 +++ linux-3.0.4/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
46455 @@ -0,0 +1,207 @@
46456 +#include <linux/kernel.h>
46457 +#include <linux/mm.h>
46458 +#include <linux/sched.h>
46459 +#include <linux/poll.h>
46460 +#include <linux/string.h>
46461 +#include <linux/file.h>
46462 +#include <linux/types.h>
46463 +#include <linux/vmalloc.h>
46464 +#include <linux/grinternal.h>
46465 +
46466 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46467 + size_t count, loff_t *ppos);
46468 +extern int gr_acl_is_enabled(void);
46469 +
46470 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46471 +static int gr_learn_attached;
46472 +
46473 +/* use a 512k buffer */
46474 +#define LEARN_BUFFER_SIZE (512 * 1024)
46475 +
46476 +static DEFINE_SPINLOCK(gr_learn_lock);
46477 +static DEFINE_MUTEX(gr_learn_user_mutex);
46478 +
46479 +/* we need to maintain two buffers, so that the kernel context of grlearn
46480 + uses a semaphore around the userspace copying, and the other kernel contexts
46481 + use a spinlock when copying into the buffer, since they cannot sleep
46482 +*/
46483 +static char *learn_buffer;
46484 +static char *learn_buffer_user;
46485 +static int learn_buffer_len;
46486 +static int learn_buffer_user_len;
46487 +
46488 +static ssize_t
46489 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46490 +{
46491 + DECLARE_WAITQUEUE(wait, current);
46492 + ssize_t retval = 0;
46493 +
46494 + add_wait_queue(&learn_wait, &wait);
46495 + set_current_state(TASK_INTERRUPTIBLE);
46496 + do {
46497 + mutex_lock(&gr_learn_user_mutex);
46498 + spin_lock(&gr_learn_lock);
46499 + if (learn_buffer_len)
46500 + break;
46501 + spin_unlock(&gr_learn_lock);
46502 + mutex_unlock(&gr_learn_user_mutex);
46503 + if (file->f_flags & O_NONBLOCK) {
46504 + retval = -EAGAIN;
46505 + goto out;
46506 + }
46507 + if (signal_pending(current)) {
46508 + retval = -ERESTARTSYS;
46509 + goto out;
46510 + }
46511 +
46512 + schedule();
46513 + } while (1);
46514 +
46515 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46516 + learn_buffer_user_len = learn_buffer_len;
46517 + retval = learn_buffer_len;
46518 + learn_buffer_len = 0;
46519 +
46520 + spin_unlock(&gr_learn_lock);
46521 +
46522 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46523 + retval = -EFAULT;
46524 +
46525 + mutex_unlock(&gr_learn_user_mutex);
46526 +out:
46527 + set_current_state(TASK_RUNNING);
46528 + remove_wait_queue(&learn_wait, &wait);
46529 + return retval;
46530 +}
46531 +
46532 +static unsigned int
46533 +poll_learn(struct file * file, poll_table * wait)
46534 +{
46535 + poll_wait(file, &learn_wait, wait);
46536 +
46537 + if (learn_buffer_len)
46538 + return (POLLIN | POLLRDNORM);
46539 +
46540 + return 0;
46541 +}
46542 +
46543 +void
46544 +gr_clear_learn_entries(void)
46545 +{
46546 + char *tmp;
46547 +
46548 + mutex_lock(&gr_learn_user_mutex);
46549 + spin_lock(&gr_learn_lock);
46550 + tmp = learn_buffer;
46551 + learn_buffer = NULL;
46552 + spin_unlock(&gr_learn_lock);
46553 + if (tmp)
46554 + vfree(tmp);
46555 + if (learn_buffer_user != NULL) {
46556 + vfree(learn_buffer_user);
46557 + learn_buffer_user = NULL;
46558 + }
46559 + learn_buffer_len = 0;
46560 + mutex_unlock(&gr_learn_user_mutex);
46561 +
46562 + return;
46563 +}
46564 +
46565 +void
46566 +gr_add_learn_entry(const char *fmt, ...)
46567 +{
46568 + va_list args;
46569 + unsigned int len;
46570 +
46571 + if (!gr_learn_attached)
46572 + return;
46573 +
46574 + spin_lock(&gr_learn_lock);
46575 +
46576 + /* leave a gap at the end so we know when it's "full" but don't have to
46577 + compute the exact length of the string we're trying to append
46578 + */
46579 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46580 + spin_unlock(&gr_learn_lock);
46581 + wake_up_interruptible(&learn_wait);
46582 + return;
46583 + }
46584 + if (learn_buffer == NULL) {
46585 + spin_unlock(&gr_learn_lock);
46586 + return;
46587 + }
46588 +
46589 + va_start(args, fmt);
46590 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46591 + va_end(args);
46592 +
46593 + learn_buffer_len += len + 1;
46594 +
46595 + spin_unlock(&gr_learn_lock);
46596 + wake_up_interruptible(&learn_wait);
46597 +
46598 + return;
46599 +}
46600 +
46601 +static int
46602 +open_learn(struct inode *inode, struct file *file)
46603 +{
46604 + if (file->f_mode & FMODE_READ && gr_learn_attached)
46605 + return -EBUSY;
46606 + if (file->f_mode & FMODE_READ) {
46607 + int retval = 0;
46608 + mutex_lock(&gr_learn_user_mutex);
46609 + if (learn_buffer == NULL)
46610 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46611 + if (learn_buffer_user == NULL)
46612 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46613 + if (learn_buffer == NULL) {
46614 + retval = -ENOMEM;
46615 + goto out_error;
46616 + }
46617 + if (learn_buffer_user == NULL) {
46618 + retval = -ENOMEM;
46619 + goto out_error;
46620 + }
46621 + learn_buffer_len = 0;
46622 + learn_buffer_user_len = 0;
46623 + gr_learn_attached = 1;
46624 +out_error:
46625 + mutex_unlock(&gr_learn_user_mutex);
46626 + return retval;
46627 + }
46628 + return 0;
46629 +}
46630 +
46631 +static int
46632 +close_learn(struct inode *inode, struct file *file)
46633 +{
46634 + if (file->f_mode & FMODE_READ) {
46635 + char *tmp = NULL;
46636 + mutex_lock(&gr_learn_user_mutex);
46637 + spin_lock(&gr_learn_lock);
46638 + tmp = learn_buffer;
46639 + learn_buffer = NULL;
46640 + spin_unlock(&gr_learn_lock);
46641 + if (tmp)
46642 + vfree(tmp);
46643 + if (learn_buffer_user != NULL) {
46644 + vfree(learn_buffer_user);
46645 + learn_buffer_user = NULL;
46646 + }
46647 + learn_buffer_len = 0;
46648 + learn_buffer_user_len = 0;
46649 + gr_learn_attached = 0;
46650 + mutex_unlock(&gr_learn_user_mutex);
46651 + }
46652 +
46653 + return 0;
46654 +}
46655 +
46656 +const struct file_operations grsec_fops = {
46657 + .read = read_learn,
46658 + .write = write_grsec_handler,
46659 + .open = open_learn,
46660 + .release = close_learn,
46661 + .poll = poll_learn,
46662 +};
46663 diff -urNp linux-3.0.4/grsecurity/gracl_res.c linux-3.0.4/grsecurity/gracl_res.c
46664 --- linux-3.0.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46665 +++ linux-3.0.4/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
46666 @@ -0,0 +1,68 @@
46667 +#include <linux/kernel.h>
46668 +#include <linux/sched.h>
46669 +#include <linux/gracl.h>
46670 +#include <linux/grinternal.h>
46671 +
46672 +static const char *restab_log[] = {
46673 + [RLIMIT_CPU] = "RLIMIT_CPU",
46674 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46675 + [RLIMIT_DATA] = "RLIMIT_DATA",
46676 + [RLIMIT_STACK] = "RLIMIT_STACK",
46677 + [RLIMIT_CORE] = "RLIMIT_CORE",
46678 + [RLIMIT_RSS] = "RLIMIT_RSS",
46679 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
46680 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46681 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46682 + [RLIMIT_AS] = "RLIMIT_AS",
46683 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46684 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46685 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46686 + [RLIMIT_NICE] = "RLIMIT_NICE",
46687 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46688 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46689 + [GR_CRASH_RES] = "RLIMIT_CRASH"
46690 +};
46691 +
46692 +void
46693 +gr_log_resource(const struct task_struct *task,
46694 + const int res, const unsigned long wanted, const int gt)
46695 +{
46696 + const struct cred *cred;
46697 + unsigned long rlim;
46698 +
46699 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
46700 + return;
46701 +
46702 + // not yet supported resource
46703 + if (unlikely(!restab_log[res]))
46704 + return;
46705 +
46706 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46707 + rlim = task_rlimit_max(task, res);
46708 + else
46709 + rlim = task_rlimit(task, res);
46710 +
46711 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46712 + return;
46713 +
46714 + rcu_read_lock();
46715 + cred = __task_cred(task);
46716 +
46717 + if (res == RLIMIT_NPROC &&
46718 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46719 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46720 + goto out_rcu_unlock;
46721 + else if (res == RLIMIT_MEMLOCK &&
46722 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46723 + goto out_rcu_unlock;
46724 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46725 + goto out_rcu_unlock;
46726 + rcu_read_unlock();
46727 +
46728 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46729 +
46730 + return;
46731 +out_rcu_unlock:
46732 + rcu_read_unlock();
46733 + return;
46734 +}
46735 diff -urNp linux-3.0.4/grsecurity/gracl_segv.c linux-3.0.4/grsecurity/gracl_segv.c
46736 --- linux-3.0.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46737 +++ linux-3.0.4/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
46738 @@ -0,0 +1,299 @@
46739 +#include <linux/kernel.h>
46740 +#include <linux/mm.h>
46741 +#include <asm/uaccess.h>
46742 +#include <asm/errno.h>
46743 +#include <asm/mman.h>
46744 +#include <net/sock.h>
46745 +#include <linux/file.h>
46746 +#include <linux/fs.h>
46747 +#include <linux/net.h>
46748 +#include <linux/in.h>
46749 +#include <linux/slab.h>
46750 +#include <linux/types.h>
46751 +#include <linux/sched.h>
46752 +#include <linux/timer.h>
46753 +#include <linux/gracl.h>
46754 +#include <linux/grsecurity.h>
46755 +#include <linux/grinternal.h>
46756 +
46757 +static struct crash_uid *uid_set;
46758 +static unsigned short uid_used;
46759 +static DEFINE_SPINLOCK(gr_uid_lock);
46760 +extern rwlock_t gr_inode_lock;
46761 +extern struct acl_subject_label *
46762 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46763 + struct acl_role_label *role);
46764 +
46765 +#ifdef CONFIG_BTRFS_FS
46766 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46767 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46768 +#endif
46769 +
46770 +static inline dev_t __get_dev(const struct dentry *dentry)
46771 +{
46772 +#ifdef CONFIG_BTRFS_FS
46773 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46774 + return get_btrfs_dev_from_inode(dentry->d_inode);
46775 + else
46776 +#endif
46777 + return dentry->d_inode->i_sb->s_dev;
46778 +}
46779 +
46780 +int
46781 +gr_init_uidset(void)
46782 +{
46783 + uid_set =
46784 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46785 + uid_used = 0;
46786 +
46787 + return uid_set ? 1 : 0;
46788 +}
46789 +
46790 +void
46791 +gr_free_uidset(void)
46792 +{
46793 + if (uid_set)
46794 + kfree(uid_set);
46795 +
46796 + return;
46797 +}
46798 +
46799 +int
46800 +gr_find_uid(const uid_t uid)
46801 +{
46802 + struct crash_uid *tmp = uid_set;
46803 + uid_t buid;
46804 + int low = 0, high = uid_used - 1, mid;
46805 +
46806 + while (high >= low) {
46807 + mid = (low + high) >> 1;
46808 + buid = tmp[mid].uid;
46809 + if (buid == uid)
46810 + return mid;
46811 + if (buid > uid)
46812 + high = mid - 1;
46813 + if (buid < uid)
46814 + low = mid + 1;
46815 + }
46816 +
46817 + return -1;
46818 +}
46819 +
46820 +static __inline__ void
46821 +gr_insertsort(void)
46822 +{
46823 + unsigned short i, j;
46824 + struct crash_uid index;
46825 +
46826 + for (i = 1; i < uid_used; i++) {
46827 + index = uid_set[i];
46828 + j = i;
46829 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46830 + uid_set[j] = uid_set[j - 1];
46831 + j--;
46832 + }
46833 + uid_set[j] = index;
46834 + }
46835 +
46836 + return;
46837 +}
46838 +
46839 +static __inline__ void
46840 +gr_insert_uid(const uid_t uid, const unsigned long expires)
46841 +{
46842 + int loc;
46843 +
46844 + if (uid_used == GR_UIDTABLE_MAX)
46845 + return;
46846 +
46847 + loc = gr_find_uid(uid);
46848 +
46849 + if (loc >= 0) {
46850 + uid_set[loc].expires = expires;
46851 + return;
46852 + }
46853 +
46854 + uid_set[uid_used].uid = uid;
46855 + uid_set[uid_used].expires = expires;
46856 + uid_used++;
46857 +
46858 + gr_insertsort();
46859 +
46860 + return;
46861 +}
46862 +
46863 +void
46864 +gr_remove_uid(const unsigned short loc)
46865 +{
46866 + unsigned short i;
46867 +
46868 + for (i = loc + 1; i < uid_used; i++)
46869 + uid_set[i - 1] = uid_set[i];
46870 +
46871 + uid_used--;
46872 +
46873 + return;
46874 +}
46875 +
46876 +int
46877 +gr_check_crash_uid(const uid_t uid)
46878 +{
46879 + int loc;
46880 + int ret = 0;
46881 +
46882 + if (unlikely(!gr_acl_is_enabled()))
46883 + return 0;
46884 +
46885 + spin_lock(&gr_uid_lock);
46886 + loc = gr_find_uid(uid);
46887 +
46888 + if (loc < 0)
46889 + goto out_unlock;
46890 +
46891 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
46892 + gr_remove_uid(loc);
46893 + else
46894 + ret = 1;
46895 +
46896 +out_unlock:
46897 + spin_unlock(&gr_uid_lock);
46898 + return ret;
46899 +}
46900 +
46901 +static __inline__ int
46902 +proc_is_setxid(const struct cred *cred)
46903 +{
46904 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
46905 + cred->uid != cred->fsuid)
46906 + return 1;
46907 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
46908 + cred->gid != cred->fsgid)
46909 + return 1;
46910 +
46911 + return 0;
46912 +}
46913 +
46914 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
46915 +
46916 +void
46917 +gr_handle_crash(struct task_struct *task, const int sig)
46918 +{
46919 + struct acl_subject_label *curr;
46920 + struct acl_subject_label *curr2;
46921 + struct task_struct *tsk, *tsk2;
46922 + const struct cred *cred;
46923 + const struct cred *cred2;
46924 +
46925 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
46926 + return;
46927 +
46928 + if (unlikely(!gr_acl_is_enabled()))
46929 + return;
46930 +
46931 + curr = task->acl;
46932 +
46933 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
46934 + return;
46935 +
46936 + if (time_before_eq(curr->expires, get_seconds())) {
46937 + curr->expires = 0;
46938 + curr->crashes = 0;
46939 + }
46940 +
46941 + curr->crashes++;
46942 +
46943 + if (!curr->expires)
46944 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
46945 +
46946 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46947 + time_after(curr->expires, get_seconds())) {
46948 + rcu_read_lock();
46949 + cred = __task_cred(task);
46950 + if (cred->uid && proc_is_setxid(cred)) {
46951 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46952 + spin_lock(&gr_uid_lock);
46953 + gr_insert_uid(cred->uid, curr->expires);
46954 + spin_unlock(&gr_uid_lock);
46955 + curr->expires = 0;
46956 + curr->crashes = 0;
46957 + read_lock(&tasklist_lock);
46958 + do_each_thread(tsk2, tsk) {
46959 + cred2 = __task_cred(tsk);
46960 + if (tsk != task && cred2->uid == cred->uid)
46961 + gr_fake_force_sig(SIGKILL, tsk);
46962 + } while_each_thread(tsk2, tsk);
46963 + read_unlock(&tasklist_lock);
46964 + } else {
46965 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46966 + read_lock(&tasklist_lock);
46967 + do_each_thread(tsk2, tsk) {
46968 + if (likely(tsk != task)) {
46969 + curr2 = tsk->acl;
46970 +
46971 + if (curr2->device == curr->device &&
46972 + curr2->inode == curr->inode)
46973 + gr_fake_force_sig(SIGKILL, tsk);
46974 + }
46975 + } while_each_thread(tsk2, tsk);
46976 + read_unlock(&tasklist_lock);
46977 + }
46978 + rcu_read_unlock();
46979 + }
46980 +
46981 + return;
46982 +}
46983 +
46984 +int
46985 +gr_check_crash_exec(const struct file *filp)
46986 +{
46987 + struct acl_subject_label *curr;
46988 +
46989 + if (unlikely(!gr_acl_is_enabled()))
46990 + return 0;
46991 +
46992 + read_lock(&gr_inode_lock);
46993 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
46994 + __get_dev(filp->f_path.dentry),
46995 + current->role);
46996 + read_unlock(&gr_inode_lock);
46997 +
46998 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
46999 + (!curr->crashes && !curr->expires))
47000 + return 0;
47001 +
47002 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47003 + time_after(curr->expires, get_seconds()))
47004 + return 1;
47005 + else if (time_before_eq(curr->expires, get_seconds())) {
47006 + curr->crashes = 0;
47007 + curr->expires = 0;
47008 + }
47009 +
47010 + return 0;
47011 +}
47012 +
47013 +void
47014 +gr_handle_alertkill(struct task_struct *task)
47015 +{
47016 + struct acl_subject_label *curracl;
47017 + __u32 curr_ip;
47018 + struct task_struct *p, *p2;
47019 +
47020 + if (unlikely(!gr_acl_is_enabled()))
47021 + return;
47022 +
47023 + curracl = task->acl;
47024 + curr_ip = task->signal->curr_ip;
47025 +
47026 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
47027 + read_lock(&tasklist_lock);
47028 + do_each_thread(p2, p) {
47029 + if (p->signal->curr_ip == curr_ip)
47030 + gr_fake_force_sig(SIGKILL, p);
47031 + } while_each_thread(p2, p);
47032 + read_unlock(&tasklist_lock);
47033 + } else if (curracl->mode & GR_KILLPROC)
47034 + gr_fake_force_sig(SIGKILL, task);
47035 +
47036 + return;
47037 +}
47038 diff -urNp linux-3.0.4/grsecurity/gracl_shm.c linux-3.0.4/grsecurity/gracl_shm.c
47039 --- linux-3.0.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
47040 +++ linux-3.0.4/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
47041 @@ -0,0 +1,40 @@
47042 +#include <linux/kernel.h>
47043 +#include <linux/mm.h>
47044 +#include <linux/sched.h>
47045 +#include <linux/file.h>
47046 +#include <linux/ipc.h>
47047 +#include <linux/gracl.h>
47048 +#include <linux/grsecurity.h>
47049 +#include <linux/grinternal.h>
47050 +
47051 +int
47052 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47053 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47054 +{
47055 + struct task_struct *task;
47056 +
47057 + if (!gr_acl_is_enabled())
47058 + return 1;
47059 +
47060 + rcu_read_lock();
47061 + read_lock(&tasklist_lock);
47062 +
47063 + task = find_task_by_vpid(shm_cprid);
47064 +
47065 + if (unlikely(!task))
47066 + task = find_task_by_vpid(shm_lapid);
47067 +
47068 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47069 + (task->pid == shm_lapid)) &&
47070 + (task->acl->mode & GR_PROTSHM) &&
47071 + (task->acl != current->acl))) {
47072 + read_unlock(&tasklist_lock);
47073 + rcu_read_unlock();
47074 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47075 + return 0;
47076 + }
47077 + read_unlock(&tasklist_lock);
47078 + rcu_read_unlock();
47079 +
47080 + return 1;
47081 +}
47082 diff -urNp linux-3.0.4/grsecurity/grsec_chdir.c linux-3.0.4/grsecurity/grsec_chdir.c
47083 --- linux-3.0.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47084 +++ linux-3.0.4/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
47085 @@ -0,0 +1,19 @@
47086 +#include <linux/kernel.h>
47087 +#include <linux/sched.h>
47088 +#include <linux/fs.h>
47089 +#include <linux/file.h>
47090 +#include <linux/grsecurity.h>
47091 +#include <linux/grinternal.h>
47092 +
47093 +void
47094 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47095 +{
47096 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47097 + if ((grsec_enable_chdir && grsec_enable_group &&
47098 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47099 + !grsec_enable_group)) {
47100 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47101 + }
47102 +#endif
47103 + return;
47104 +}
47105 diff -urNp linux-3.0.4/grsecurity/grsec_chroot.c linux-3.0.4/grsecurity/grsec_chroot.c
47106 --- linux-3.0.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47107 +++ linux-3.0.4/grsecurity/grsec_chroot.c 2011-08-23 21:48:14.000000000 -0400
47108 @@ -0,0 +1,349 @@
47109 +#include <linux/kernel.h>
47110 +#include <linux/module.h>
47111 +#include <linux/sched.h>
47112 +#include <linux/file.h>
47113 +#include <linux/fs.h>
47114 +#include <linux/mount.h>
47115 +#include <linux/types.h>
47116 +#include <linux/pid_namespace.h>
47117 +#include <linux/grsecurity.h>
47118 +#include <linux/grinternal.h>
47119 +
47120 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47121 +{
47122 +#ifdef CONFIG_GRKERNSEC
47123 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47124 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47125 + task->gr_is_chrooted = 1;
47126 + else
47127 + task->gr_is_chrooted = 0;
47128 +
47129 + task->gr_chroot_dentry = path->dentry;
47130 +#endif
47131 + return;
47132 +}
47133 +
47134 +void gr_clear_chroot_entries(struct task_struct *task)
47135 +{
47136 +#ifdef CONFIG_GRKERNSEC
47137 + task->gr_is_chrooted = 0;
47138 + task->gr_chroot_dentry = NULL;
47139 +#endif
47140 + return;
47141 +}
47142 +
47143 +int
47144 +gr_handle_chroot_unix(const pid_t pid)
47145 +{
47146 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47147 + struct task_struct *p;
47148 +
47149 + if (unlikely(!grsec_enable_chroot_unix))
47150 + return 1;
47151 +
47152 + if (likely(!proc_is_chrooted(current)))
47153 + return 1;
47154 +
47155 + rcu_read_lock();
47156 + read_lock(&tasklist_lock);
47157 + p = find_task_by_vpid_unrestricted(pid);
47158 + if (unlikely(p && !have_same_root(current, p))) {
47159 + read_unlock(&tasklist_lock);
47160 + rcu_read_unlock();
47161 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47162 + return 0;
47163 + }
47164 + read_unlock(&tasklist_lock);
47165 + rcu_read_unlock();
47166 +#endif
47167 + return 1;
47168 +}
47169 +
47170 +int
47171 +gr_handle_chroot_nice(void)
47172 +{
47173 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47174 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47175 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47176 + return -EPERM;
47177 + }
47178 +#endif
47179 + return 0;
47180 +}
47181 +
47182 +int
47183 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47184 +{
47185 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47186 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47187 + && proc_is_chrooted(current)) {
47188 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47189 + return -EACCES;
47190 + }
47191 +#endif
47192 + return 0;
47193 +}
47194 +
47195 +int
47196 +gr_handle_chroot_rawio(const struct inode *inode)
47197 +{
47198 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47199 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47200 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47201 + return 1;
47202 +#endif
47203 + return 0;
47204 +}
47205 +
47206 +int
47207 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47208 +{
47209 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47210 + struct task_struct *p;
47211 + int ret = 0;
47212 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47213 + return ret;
47214 +
47215 + read_lock(&tasklist_lock);
47216 + do_each_pid_task(pid, type, p) {
47217 + if (!have_same_root(current, p)) {
47218 + ret = 1;
47219 + goto out;
47220 + }
47221 + } while_each_pid_task(pid, type, p);
47222 +out:
47223 + read_unlock(&tasklist_lock);
47224 + return ret;
47225 +#endif
47226 + return 0;
47227 +}
47228 +
47229 +int
47230 +gr_pid_is_chrooted(struct task_struct *p)
47231 +{
47232 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47233 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47234 + return 0;
47235 +
47236 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47237 + !have_same_root(current, p)) {
47238 + return 1;
47239 + }
47240 +#endif
47241 + return 0;
47242 +}
47243 +
47244 +EXPORT_SYMBOL(gr_pid_is_chrooted);
47245 +
47246 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47247 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47248 +{
47249 + struct path path, currentroot;
47250 + int ret = 0;
47251 +
47252 + path.dentry = (struct dentry *)u_dentry;
47253 + path.mnt = (struct vfsmount *)u_mnt;
47254 + get_fs_root(current->fs, &currentroot);
47255 + if (path_is_under(&path, &currentroot))
47256 + ret = 1;
47257 + path_put(&currentroot);
47258 +
47259 + return ret;
47260 +}
47261 +#endif
47262 +
47263 +int
47264 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47265 +{
47266 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47267 + if (!grsec_enable_chroot_fchdir)
47268 + return 1;
47269 +
47270 + if (!proc_is_chrooted(current))
47271 + return 1;
47272 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47273 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47274 + return 0;
47275 + }
47276 +#endif
47277 + return 1;
47278 +}
47279 +
47280 +int
47281 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47282 + const time_t shm_createtime)
47283 +{
47284 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47285 + struct task_struct *p;
47286 + time_t starttime;
47287 +
47288 + if (unlikely(!grsec_enable_chroot_shmat))
47289 + return 1;
47290 +
47291 + if (likely(!proc_is_chrooted(current)))
47292 + return 1;
47293 +
47294 + rcu_read_lock();
47295 + read_lock(&tasklist_lock);
47296 +
47297 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47298 + starttime = p->start_time.tv_sec;
47299 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47300 + if (have_same_root(current, p)) {
47301 + goto allow;
47302 + } else {
47303 + read_unlock(&tasklist_lock);
47304 + rcu_read_unlock();
47305 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47306 + return 0;
47307 + }
47308 + }
47309 + /* creator exited, pid reuse, fall through to next check */
47310 + }
47311 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47312 + if (unlikely(!have_same_root(current, p))) {
47313 + read_unlock(&tasklist_lock);
47314 + rcu_read_unlock();
47315 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47316 + return 0;
47317 + }
47318 + }
47319 +
47320 +allow:
47321 + read_unlock(&tasklist_lock);
47322 + rcu_read_unlock();
47323 +#endif
47324 + return 1;
47325 +}
47326 +
47327 +void
47328 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47329 +{
47330 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47331 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47332 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47333 +#endif
47334 + return;
47335 +}
47336 +
47337 +int
47338 +gr_handle_chroot_mknod(const struct dentry *dentry,
47339 + const struct vfsmount *mnt, const int mode)
47340 +{
47341 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47342 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47343 + proc_is_chrooted(current)) {
47344 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47345 + return -EPERM;
47346 + }
47347 +#endif
47348 + return 0;
47349 +}
47350 +
47351 +int
47352 +gr_handle_chroot_mount(const struct dentry *dentry,
47353 + const struct vfsmount *mnt, const char *dev_name)
47354 +{
47355 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47356 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47357 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47358 + return -EPERM;
47359 + }
47360 +#endif
47361 + return 0;
47362 +}
47363 +
47364 +int
47365 +gr_handle_chroot_pivot(void)
47366 +{
47367 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47368 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47369 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47370 + return -EPERM;
47371 + }
47372 +#endif
47373 + return 0;
47374 +}
47375 +
47376 +int
47377 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47378 +{
47379 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47380 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47381 + !gr_is_outside_chroot(dentry, mnt)) {
47382 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47383 + return -EPERM;
47384 + }
47385 +#endif
47386 + return 0;
47387 +}
47388 +
47389 +int
47390 +gr_handle_chroot_caps(struct path *path)
47391 +{
47392 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47393 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47394 + (init_task.fs->root.dentry != path->dentry) &&
47395 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47396 +
47397 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47398 + const struct cred *old = current_cred();
47399 + struct cred *new = prepare_creds();
47400 + if (new == NULL)
47401 + return 1;
47402 +
47403 + new->cap_permitted = cap_drop(old->cap_permitted,
47404 + chroot_caps);
47405 + new->cap_inheritable = cap_drop(old->cap_inheritable,
47406 + chroot_caps);
47407 + new->cap_effective = cap_drop(old->cap_effective,
47408 + chroot_caps);
47409 +
47410 + commit_creds(new);
47411 +
47412 + return 0;
47413 + }
47414 +#endif
47415 + return 0;
47416 +}
47417 +
47418 +int
47419 +gr_handle_chroot_sysctl(const int op)
47420 +{
47421 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47422 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47423 + proc_is_chrooted(current))
47424 + return -EACCES;
47425 +#endif
47426 + return 0;
47427 +}
47428 +
47429 +void
47430 +gr_handle_chroot_chdir(struct path *path)
47431 +{
47432 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47433 + if (grsec_enable_chroot_chdir)
47434 + set_fs_pwd(current->fs, path);
47435 +#endif
47436 + return;
47437 +}
47438 +
47439 +int
47440 +gr_handle_chroot_chmod(const struct dentry *dentry,
47441 + const struct vfsmount *mnt, const int mode)
47442 +{
47443 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47444 + /* allow chmod +s on directories, but not files */
47445 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47446 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47447 + proc_is_chrooted(current)) {
47448 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47449 + return -EPERM;
47450 + }
47451 +#endif
47452 + return 0;
47453 +}
47454 +
47455 +#ifdef CONFIG_SECURITY
47456 +EXPORT_SYMBOL(gr_handle_chroot_caps);
47457 +#endif
47458 diff -urNp linux-3.0.4/grsecurity/grsec_disabled.c linux-3.0.4/grsecurity/grsec_disabled.c
47459 --- linux-3.0.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47460 +++ linux-3.0.4/grsecurity/grsec_disabled.c 2011-08-23 21:48:14.000000000 -0400
47461 @@ -0,0 +1,447 @@
47462 +#include <linux/kernel.h>
47463 +#include <linux/module.h>
47464 +#include <linux/sched.h>
47465 +#include <linux/file.h>
47466 +#include <linux/fs.h>
47467 +#include <linux/kdev_t.h>
47468 +#include <linux/net.h>
47469 +#include <linux/in.h>
47470 +#include <linux/ip.h>
47471 +#include <linux/skbuff.h>
47472 +#include <linux/sysctl.h>
47473 +
47474 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47475 +void
47476 +pax_set_initial_flags(struct linux_binprm *bprm)
47477 +{
47478 + return;
47479 +}
47480 +#endif
47481 +
47482 +#ifdef CONFIG_SYSCTL
47483 +__u32
47484 +gr_handle_sysctl(const struct ctl_table * table, const int op)
47485 +{
47486 + return 0;
47487 +}
47488 +#endif
47489 +
47490 +#ifdef CONFIG_TASKSTATS
47491 +int gr_is_taskstats_denied(int pid)
47492 +{
47493 + return 0;
47494 +}
47495 +#endif
47496 +
47497 +int
47498 +gr_acl_is_enabled(void)
47499 +{
47500 + return 0;
47501 +}
47502 +
47503 +int
47504 +gr_handle_rawio(const struct inode *inode)
47505 +{
47506 + return 0;
47507 +}
47508 +
47509 +void
47510 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47511 +{
47512 + return;
47513 +}
47514 +
47515 +int
47516 +gr_handle_ptrace(struct task_struct *task, const long request)
47517 +{
47518 + return 0;
47519 +}
47520 +
47521 +int
47522 +gr_handle_proc_ptrace(struct task_struct *task)
47523 +{
47524 + return 0;
47525 +}
47526 +
47527 +void
47528 +gr_learn_resource(const struct task_struct *task,
47529 + const int res, const unsigned long wanted, const int gt)
47530 +{
47531 + return;
47532 +}
47533 +
47534 +int
47535 +gr_set_acls(const int type)
47536 +{
47537 + return 0;
47538 +}
47539 +
47540 +int
47541 +gr_check_hidden_task(const struct task_struct *tsk)
47542 +{
47543 + return 0;
47544 +}
47545 +
47546 +int
47547 +gr_check_protected_task(const struct task_struct *task)
47548 +{
47549 + return 0;
47550 +}
47551 +
47552 +int
47553 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47554 +{
47555 + return 0;
47556 +}
47557 +
47558 +void
47559 +gr_copy_label(struct task_struct *tsk)
47560 +{
47561 + return;
47562 +}
47563 +
47564 +void
47565 +gr_set_pax_flags(struct task_struct *task)
47566 +{
47567 + return;
47568 +}
47569 +
47570 +int
47571 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47572 + const int unsafe_share)
47573 +{
47574 + return 0;
47575 +}
47576 +
47577 +void
47578 +gr_handle_delete(const ino_t ino, const dev_t dev)
47579 +{
47580 + return;
47581 +}
47582 +
47583 +void
47584 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47585 +{
47586 + return;
47587 +}
47588 +
47589 +void
47590 +gr_handle_crash(struct task_struct *task, const int sig)
47591 +{
47592 + return;
47593 +}
47594 +
47595 +int
47596 +gr_check_crash_exec(const struct file *filp)
47597 +{
47598 + return 0;
47599 +}
47600 +
47601 +int
47602 +gr_check_crash_uid(const uid_t uid)
47603 +{
47604 + return 0;
47605 +}
47606 +
47607 +void
47608 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47609 + struct dentry *old_dentry,
47610 + struct dentry *new_dentry,
47611 + struct vfsmount *mnt, const __u8 replace)
47612 +{
47613 + return;
47614 +}
47615 +
47616 +int
47617 +gr_search_socket(const int family, const int type, const int protocol)
47618 +{
47619 + return 1;
47620 +}
47621 +
47622 +int
47623 +gr_search_connectbind(const int mode, const struct socket *sock,
47624 + const struct sockaddr_in *addr)
47625 +{
47626 + return 0;
47627 +}
47628 +
47629 +int
47630 +gr_is_capable(const int cap)
47631 +{
47632 + return 1;
47633 +}
47634 +
47635 +int
47636 +gr_is_capable_nolog(const int cap)
47637 +{
47638 + return 1;
47639 +}
47640 +
47641 +void
47642 +gr_handle_alertkill(struct task_struct *task)
47643 +{
47644 + return;
47645 +}
47646 +
47647 +__u32
47648 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47649 +{
47650 + return 1;
47651 +}
47652 +
47653 +__u32
47654 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47655 + const struct vfsmount * mnt)
47656 +{
47657 + return 1;
47658 +}
47659 +
47660 +__u32
47661 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47662 + const int fmode)
47663 +{
47664 + return 1;
47665 +}
47666 +
47667 +__u32
47668 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47669 +{
47670 + return 1;
47671 +}
47672 +
47673 +__u32
47674 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47675 +{
47676 + return 1;
47677 +}
47678 +
47679 +int
47680 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47681 + unsigned int *vm_flags)
47682 +{
47683 + return 1;
47684 +}
47685 +
47686 +__u32
47687 +gr_acl_handle_truncate(const struct dentry * dentry,
47688 + const struct vfsmount * mnt)
47689 +{
47690 + return 1;
47691 +}
47692 +
47693 +__u32
47694 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47695 +{
47696 + return 1;
47697 +}
47698 +
47699 +__u32
47700 +gr_acl_handle_access(const struct dentry * dentry,
47701 + const struct vfsmount * mnt, const int fmode)
47702 +{
47703 + return 1;
47704 +}
47705 +
47706 +__u32
47707 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47708 + mode_t mode)
47709 +{
47710 + return 1;
47711 +}
47712 +
47713 +__u32
47714 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47715 + mode_t mode)
47716 +{
47717 + return 1;
47718 +}
47719 +
47720 +__u32
47721 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47722 +{
47723 + return 1;
47724 +}
47725 +
47726 +__u32
47727 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47728 +{
47729 + return 1;
47730 +}
47731 +
47732 +void
47733 +grsecurity_init(void)
47734 +{
47735 + return;
47736 +}
47737 +
47738 +__u32
47739 +gr_acl_handle_mknod(const struct dentry * new_dentry,
47740 + const struct dentry * parent_dentry,
47741 + const struct vfsmount * parent_mnt,
47742 + const int mode)
47743 +{
47744 + return 1;
47745 +}
47746 +
47747 +__u32
47748 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
47749 + const struct dentry * parent_dentry,
47750 + const struct vfsmount * parent_mnt)
47751 +{
47752 + return 1;
47753 +}
47754 +
47755 +__u32
47756 +gr_acl_handle_symlink(const struct dentry * new_dentry,
47757 + const struct dentry * parent_dentry,
47758 + const struct vfsmount * parent_mnt, const char *from)
47759 +{
47760 + return 1;
47761 +}
47762 +
47763 +__u32
47764 +gr_acl_handle_link(const struct dentry * new_dentry,
47765 + const struct dentry * parent_dentry,
47766 + const struct vfsmount * parent_mnt,
47767 + const struct dentry * old_dentry,
47768 + const struct vfsmount * old_mnt, const char *to)
47769 +{
47770 + return 1;
47771 +}
47772 +
47773 +int
47774 +gr_acl_handle_rename(const struct dentry *new_dentry,
47775 + const struct dentry *parent_dentry,
47776 + const struct vfsmount *parent_mnt,
47777 + const struct dentry *old_dentry,
47778 + const struct inode *old_parent_inode,
47779 + const struct vfsmount *old_mnt, const char *newname)
47780 +{
47781 + return 0;
47782 +}
47783 +
47784 +int
47785 +gr_acl_handle_filldir(const struct file *file, const char *name,
47786 + const int namelen, const ino_t ino)
47787 +{
47788 + return 1;
47789 +}
47790 +
47791 +int
47792 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47793 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47794 +{
47795 + return 1;
47796 +}
47797 +
47798 +int
47799 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47800 +{
47801 + return 0;
47802 +}
47803 +
47804 +int
47805 +gr_search_accept(const struct socket *sock)
47806 +{
47807 + return 0;
47808 +}
47809 +
47810 +int
47811 +gr_search_listen(const struct socket *sock)
47812 +{
47813 + return 0;
47814 +}
47815 +
47816 +int
47817 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47818 +{
47819 + return 0;
47820 +}
47821 +
47822 +__u32
47823 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47824 +{
47825 + return 1;
47826 +}
47827 +
47828 +__u32
47829 +gr_acl_handle_creat(const struct dentry * dentry,
47830 + const struct dentry * p_dentry,
47831 + const struct vfsmount * p_mnt, const int fmode,
47832 + const int imode)
47833 +{
47834 + return 1;
47835 +}
47836 +
47837 +void
47838 +gr_acl_handle_exit(void)
47839 +{
47840 + return;
47841 +}
47842 +
47843 +int
47844 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47845 +{
47846 + return 1;
47847 +}
47848 +
47849 +void
47850 +gr_set_role_label(const uid_t uid, const gid_t gid)
47851 +{
47852 + return;
47853 +}
47854 +
47855 +int
47856 +gr_acl_handle_procpidmem(const struct task_struct *task)
47857 +{
47858 + return 0;
47859 +}
47860 +
47861 +int
47862 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47863 +{
47864 + return 0;
47865 +}
47866 +
47867 +int
47868 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47869 +{
47870 + return 0;
47871 +}
47872 +
47873 +void
47874 +gr_set_kernel_label(struct task_struct *task)
47875 +{
47876 + return;
47877 +}
47878 +
47879 +int
47880 +gr_check_user_change(int real, int effective, int fs)
47881 +{
47882 + return 0;
47883 +}
47884 +
47885 +int
47886 +gr_check_group_change(int real, int effective, int fs)
47887 +{
47888 + return 0;
47889 +}
47890 +
47891 +int gr_acl_enable_at_secure(void)
47892 +{
47893 + return 0;
47894 +}
47895 +
47896 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47897 +{
47898 + return dentry->d_inode->i_sb->s_dev;
47899 +}
47900 +
47901 +EXPORT_SYMBOL(gr_is_capable);
47902 +EXPORT_SYMBOL(gr_is_capable_nolog);
47903 +EXPORT_SYMBOL(gr_learn_resource);
47904 +EXPORT_SYMBOL(gr_set_kernel_label);
47905 +#ifdef CONFIG_SECURITY
47906 +EXPORT_SYMBOL(gr_check_user_change);
47907 +EXPORT_SYMBOL(gr_check_group_change);
47908 +#endif
47909 diff -urNp linux-3.0.4/grsecurity/grsec_exec.c linux-3.0.4/grsecurity/grsec_exec.c
47910 --- linux-3.0.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
47911 +++ linux-3.0.4/grsecurity/grsec_exec.c 2011-08-25 17:25:59.000000000 -0400
47912 @@ -0,0 +1,72 @@
47913 +#include <linux/kernel.h>
47914 +#include <linux/sched.h>
47915 +#include <linux/file.h>
47916 +#include <linux/binfmts.h>
47917 +#include <linux/fs.h>
47918 +#include <linux/types.h>
47919 +#include <linux/grdefs.h>
47920 +#include <linux/grsecurity.h>
47921 +#include <linux/grinternal.h>
47922 +#include <linux/capability.h>
47923 +
47924 +#include <asm/uaccess.h>
47925 +
47926 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47927 +static char gr_exec_arg_buf[132];
47928 +static DEFINE_MUTEX(gr_exec_arg_mutex);
47929 +#endif
47930 +
47931 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
47932 +
47933 +void
47934 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
47935 +{
47936 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47937 + char *grarg = gr_exec_arg_buf;
47938 + unsigned int i, x, execlen = 0;
47939 + char c;
47940 +
47941 + if (!((grsec_enable_execlog && grsec_enable_group &&
47942 + in_group_p(grsec_audit_gid))
47943 + || (grsec_enable_execlog && !grsec_enable_group)))
47944 + return;
47945 +
47946 + mutex_lock(&gr_exec_arg_mutex);
47947 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
47948 +
47949 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
47950 + const char __user *p;
47951 + unsigned int len;
47952 +
47953 + p = get_user_arg_ptr(argv, i);
47954 + if (IS_ERR(p))
47955 + goto log;
47956 +
47957 + len = strnlen_user(p, 128 - execlen);
47958 + if (len > 128 - execlen)
47959 + len = 128 - execlen;
47960 + else if (len > 0)
47961 + len--;
47962 + if (copy_from_user(grarg + execlen, p, len))
47963 + goto log;
47964 +
47965 + /* rewrite unprintable characters */
47966 + for (x = 0; x < len; x++) {
47967 + c = *(grarg + execlen + x);
47968 + if (c < 32 || c > 126)
47969 + *(grarg + execlen + x) = ' ';
47970 + }
47971 +
47972 + execlen += len;
47973 + *(grarg + execlen) = ' ';
47974 + *(grarg + execlen + 1) = '\0';
47975 + execlen++;
47976 + }
47977 +
47978 + log:
47979 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
47980 + bprm->file->f_path.mnt, grarg);
47981 + mutex_unlock(&gr_exec_arg_mutex);
47982 +#endif
47983 + return;
47984 +}
47985 diff -urNp linux-3.0.4/grsecurity/grsec_fifo.c linux-3.0.4/grsecurity/grsec_fifo.c
47986 --- linux-3.0.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
47987 +++ linux-3.0.4/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
47988 @@ -0,0 +1,24 @@
47989 +#include <linux/kernel.h>
47990 +#include <linux/sched.h>
47991 +#include <linux/fs.h>
47992 +#include <linux/file.h>
47993 +#include <linux/grinternal.h>
47994 +
47995 +int
47996 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
47997 + const struct dentry *dir, const int flag, const int acc_mode)
47998 +{
47999 +#ifdef CONFIG_GRKERNSEC_FIFO
48000 + const struct cred *cred = current_cred();
48001 +
48002 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
48003 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
48004 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
48005 + (cred->fsuid != dentry->d_inode->i_uid)) {
48006 + if (!inode_permission(dentry->d_inode, acc_mode))
48007 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
48008 + return -EACCES;
48009 + }
48010 +#endif
48011 + return 0;
48012 +}
48013 diff -urNp linux-3.0.4/grsecurity/grsec_fork.c linux-3.0.4/grsecurity/grsec_fork.c
48014 --- linux-3.0.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48015 +++ linux-3.0.4/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
48016 @@ -0,0 +1,23 @@
48017 +#include <linux/kernel.h>
48018 +#include <linux/sched.h>
48019 +#include <linux/grsecurity.h>
48020 +#include <linux/grinternal.h>
48021 +#include <linux/errno.h>
48022 +
48023 +void
48024 +gr_log_forkfail(const int retval)
48025 +{
48026 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48027 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48028 + switch (retval) {
48029 + case -EAGAIN:
48030 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48031 + break;
48032 + case -ENOMEM:
48033 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48034 + break;
48035 + }
48036 + }
48037 +#endif
48038 + return;
48039 +}
48040 diff -urNp linux-3.0.4/grsecurity/grsec_init.c linux-3.0.4/grsecurity/grsec_init.c
48041 --- linux-3.0.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48042 +++ linux-3.0.4/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
48043 @@ -0,0 +1,269 @@
48044 +#include <linux/kernel.h>
48045 +#include <linux/sched.h>
48046 +#include <linux/mm.h>
48047 +#include <linux/gracl.h>
48048 +#include <linux/slab.h>
48049 +#include <linux/vmalloc.h>
48050 +#include <linux/percpu.h>
48051 +#include <linux/module.h>
48052 +
48053 +int grsec_enable_brute;
48054 +int grsec_enable_link;
48055 +int grsec_enable_dmesg;
48056 +int grsec_enable_harden_ptrace;
48057 +int grsec_enable_fifo;
48058 +int grsec_enable_execlog;
48059 +int grsec_enable_signal;
48060 +int grsec_enable_forkfail;
48061 +int grsec_enable_audit_ptrace;
48062 +int grsec_enable_time;
48063 +int grsec_enable_audit_textrel;
48064 +int grsec_enable_group;
48065 +int grsec_audit_gid;
48066 +int grsec_enable_chdir;
48067 +int grsec_enable_mount;
48068 +int grsec_enable_rofs;
48069 +int grsec_enable_chroot_findtask;
48070 +int grsec_enable_chroot_mount;
48071 +int grsec_enable_chroot_shmat;
48072 +int grsec_enable_chroot_fchdir;
48073 +int grsec_enable_chroot_double;
48074 +int grsec_enable_chroot_pivot;
48075 +int grsec_enable_chroot_chdir;
48076 +int grsec_enable_chroot_chmod;
48077 +int grsec_enable_chroot_mknod;
48078 +int grsec_enable_chroot_nice;
48079 +int grsec_enable_chroot_execlog;
48080 +int grsec_enable_chroot_caps;
48081 +int grsec_enable_chroot_sysctl;
48082 +int grsec_enable_chroot_unix;
48083 +int grsec_enable_tpe;
48084 +int grsec_tpe_gid;
48085 +int grsec_enable_blackhole;
48086 +#ifdef CONFIG_IPV6_MODULE
48087 +EXPORT_SYMBOL(grsec_enable_blackhole);
48088 +#endif
48089 +int grsec_lastack_retries;
48090 +int grsec_enable_tpe_all;
48091 +int grsec_enable_tpe_invert;
48092 +int grsec_enable_socket_all;
48093 +int grsec_socket_all_gid;
48094 +int grsec_enable_socket_client;
48095 +int grsec_socket_client_gid;
48096 +int grsec_enable_socket_server;
48097 +int grsec_socket_server_gid;
48098 +int grsec_resource_logging;
48099 +int grsec_disable_privio;
48100 +int grsec_enable_log_rwxmaps;
48101 +int grsec_lock;
48102 +
48103 +DEFINE_SPINLOCK(grsec_alert_lock);
48104 +unsigned long grsec_alert_wtime = 0;
48105 +unsigned long grsec_alert_fyet = 0;
48106 +
48107 +DEFINE_SPINLOCK(grsec_audit_lock);
48108 +
48109 +DEFINE_RWLOCK(grsec_exec_file_lock);
48110 +
48111 +char *gr_shared_page[4];
48112 +
48113 +char *gr_alert_log_fmt;
48114 +char *gr_audit_log_fmt;
48115 +char *gr_alert_log_buf;
48116 +char *gr_audit_log_buf;
48117 +
48118 +extern struct gr_arg *gr_usermode;
48119 +extern unsigned char *gr_system_salt;
48120 +extern unsigned char *gr_system_sum;
48121 +
48122 +void __init
48123 +grsecurity_init(void)
48124 +{
48125 + int j;
48126 + /* create the per-cpu shared pages */
48127 +
48128 +#ifdef CONFIG_X86
48129 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48130 +#endif
48131 +
48132 + for (j = 0; j < 4; j++) {
48133 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48134 + if (gr_shared_page[j] == NULL) {
48135 + panic("Unable to allocate grsecurity shared page");
48136 + return;
48137 + }
48138 + }
48139 +
48140 + /* allocate log buffers */
48141 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48142 + if (!gr_alert_log_fmt) {
48143 + panic("Unable to allocate grsecurity alert log format buffer");
48144 + return;
48145 + }
48146 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48147 + if (!gr_audit_log_fmt) {
48148 + panic("Unable to allocate grsecurity audit log format buffer");
48149 + return;
48150 + }
48151 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48152 + if (!gr_alert_log_buf) {
48153 + panic("Unable to allocate grsecurity alert log buffer");
48154 + return;
48155 + }
48156 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48157 + if (!gr_audit_log_buf) {
48158 + panic("Unable to allocate grsecurity audit log buffer");
48159 + return;
48160 + }
48161 +
48162 + /* allocate memory for authentication structure */
48163 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48164 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48165 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48166 +
48167 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48168 + panic("Unable to allocate grsecurity authentication structure");
48169 + return;
48170 + }
48171 +
48172 +
48173 +#ifdef CONFIG_GRKERNSEC_IO
48174 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48175 + grsec_disable_privio = 1;
48176 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48177 + grsec_disable_privio = 1;
48178 +#else
48179 + grsec_disable_privio = 0;
48180 +#endif
48181 +#endif
48182 +
48183 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48184 + /* for backward compatibility, tpe_invert always defaults to on if
48185 + enabled in the kernel
48186 + */
48187 + grsec_enable_tpe_invert = 1;
48188 +#endif
48189 +
48190 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48191 +#ifndef CONFIG_GRKERNSEC_SYSCTL
48192 + grsec_lock = 1;
48193 +#endif
48194 +
48195 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48196 + grsec_enable_audit_textrel = 1;
48197 +#endif
48198 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48199 + grsec_enable_log_rwxmaps = 1;
48200 +#endif
48201 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48202 + grsec_enable_group = 1;
48203 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48204 +#endif
48205 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48206 + grsec_enable_chdir = 1;
48207 +#endif
48208 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48209 + grsec_enable_harden_ptrace = 1;
48210 +#endif
48211 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48212 + grsec_enable_mount = 1;
48213 +#endif
48214 +#ifdef CONFIG_GRKERNSEC_LINK
48215 + grsec_enable_link = 1;
48216 +#endif
48217 +#ifdef CONFIG_GRKERNSEC_BRUTE
48218 + grsec_enable_brute = 1;
48219 +#endif
48220 +#ifdef CONFIG_GRKERNSEC_DMESG
48221 + grsec_enable_dmesg = 1;
48222 +#endif
48223 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48224 + grsec_enable_blackhole = 1;
48225 + grsec_lastack_retries = 4;
48226 +#endif
48227 +#ifdef CONFIG_GRKERNSEC_FIFO
48228 + grsec_enable_fifo = 1;
48229 +#endif
48230 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48231 + grsec_enable_execlog = 1;
48232 +#endif
48233 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48234 + grsec_enable_signal = 1;
48235 +#endif
48236 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48237 + grsec_enable_forkfail = 1;
48238 +#endif
48239 +#ifdef CONFIG_GRKERNSEC_TIME
48240 + grsec_enable_time = 1;
48241 +#endif
48242 +#ifdef CONFIG_GRKERNSEC_RESLOG
48243 + grsec_resource_logging = 1;
48244 +#endif
48245 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48246 + grsec_enable_chroot_findtask = 1;
48247 +#endif
48248 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48249 + grsec_enable_chroot_unix = 1;
48250 +#endif
48251 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48252 + grsec_enable_chroot_mount = 1;
48253 +#endif
48254 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48255 + grsec_enable_chroot_fchdir = 1;
48256 +#endif
48257 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48258 + grsec_enable_chroot_shmat = 1;
48259 +#endif
48260 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48261 + grsec_enable_audit_ptrace = 1;
48262 +#endif
48263 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48264 + grsec_enable_chroot_double = 1;
48265 +#endif
48266 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48267 + grsec_enable_chroot_pivot = 1;
48268 +#endif
48269 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48270 + grsec_enable_chroot_chdir = 1;
48271 +#endif
48272 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48273 + grsec_enable_chroot_chmod = 1;
48274 +#endif
48275 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48276 + grsec_enable_chroot_mknod = 1;
48277 +#endif
48278 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48279 + grsec_enable_chroot_nice = 1;
48280 +#endif
48281 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48282 + grsec_enable_chroot_execlog = 1;
48283 +#endif
48284 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48285 + grsec_enable_chroot_caps = 1;
48286 +#endif
48287 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48288 + grsec_enable_chroot_sysctl = 1;
48289 +#endif
48290 +#ifdef CONFIG_GRKERNSEC_TPE
48291 + grsec_enable_tpe = 1;
48292 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48293 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
48294 + grsec_enable_tpe_all = 1;
48295 +#endif
48296 +#endif
48297 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48298 + grsec_enable_socket_all = 1;
48299 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48300 +#endif
48301 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48302 + grsec_enable_socket_client = 1;
48303 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48304 +#endif
48305 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48306 + grsec_enable_socket_server = 1;
48307 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48308 +#endif
48309 +#endif
48310 +
48311 + return;
48312 +}
48313 diff -urNp linux-3.0.4/grsecurity/grsec_link.c linux-3.0.4/grsecurity/grsec_link.c
48314 --- linux-3.0.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48315 +++ linux-3.0.4/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
48316 @@ -0,0 +1,43 @@
48317 +#include <linux/kernel.h>
48318 +#include <linux/sched.h>
48319 +#include <linux/fs.h>
48320 +#include <linux/file.h>
48321 +#include <linux/grinternal.h>
48322 +
48323 +int
48324 +gr_handle_follow_link(const struct inode *parent,
48325 + const struct inode *inode,
48326 + const struct dentry *dentry, const struct vfsmount *mnt)
48327 +{
48328 +#ifdef CONFIG_GRKERNSEC_LINK
48329 + const struct cred *cred = current_cred();
48330 +
48331 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48332 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48333 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48334 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48335 + return -EACCES;
48336 + }
48337 +#endif
48338 + return 0;
48339 +}
48340 +
48341 +int
48342 +gr_handle_hardlink(const struct dentry *dentry,
48343 + const struct vfsmount *mnt,
48344 + struct inode *inode, const int mode, const char *to)
48345 +{
48346 +#ifdef CONFIG_GRKERNSEC_LINK
48347 + const struct cred *cred = current_cred();
48348 +
48349 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48350 + (!S_ISREG(mode) || (mode & S_ISUID) ||
48351 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48352 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48353 + !capable(CAP_FOWNER) && cred->uid) {
48354 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48355 + return -EPERM;
48356 + }
48357 +#endif
48358 + return 0;
48359 +}
48360 diff -urNp linux-3.0.4/grsecurity/grsec_log.c linux-3.0.4/grsecurity/grsec_log.c
48361 --- linux-3.0.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48362 +++ linux-3.0.4/grsecurity/grsec_log.c 2011-08-23 21:48:14.000000000 -0400
48363 @@ -0,0 +1,310 @@
48364 +#include <linux/kernel.h>
48365 +#include <linux/sched.h>
48366 +#include <linux/file.h>
48367 +#include <linux/tty.h>
48368 +#include <linux/fs.h>
48369 +#include <linux/grinternal.h>
48370 +
48371 +#ifdef CONFIG_TREE_PREEMPT_RCU
48372 +#define DISABLE_PREEMPT() preempt_disable()
48373 +#define ENABLE_PREEMPT() preempt_enable()
48374 +#else
48375 +#define DISABLE_PREEMPT()
48376 +#define ENABLE_PREEMPT()
48377 +#endif
48378 +
48379 +#define BEGIN_LOCKS(x) \
48380 + DISABLE_PREEMPT(); \
48381 + rcu_read_lock(); \
48382 + read_lock(&tasklist_lock); \
48383 + read_lock(&grsec_exec_file_lock); \
48384 + if (x != GR_DO_AUDIT) \
48385 + spin_lock(&grsec_alert_lock); \
48386 + else \
48387 + spin_lock(&grsec_audit_lock)
48388 +
48389 +#define END_LOCKS(x) \
48390 + if (x != GR_DO_AUDIT) \
48391 + spin_unlock(&grsec_alert_lock); \
48392 + else \
48393 + spin_unlock(&grsec_audit_lock); \
48394 + read_unlock(&grsec_exec_file_lock); \
48395 + read_unlock(&tasklist_lock); \
48396 + rcu_read_unlock(); \
48397 + ENABLE_PREEMPT(); \
48398 + if (x == GR_DONT_AUDIT) \
48399 + gr_handle_alertkill(current)
48400 +
48401 +enum {
48402 + FLOODING,
48403 + NO_FLOODING
48404 +};
48405 +
48406 +extern char *gr_alert_log_fmt;
48407 +extern char *gr_audit_log_fmt;
48408 +extern char *gr_alert_log_buf;
48409 +extern char *gr_audit_log_buf;
48410 +
48411 +static int gr_log_start(int audit)
48412 +{
48413 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48414 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48415 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48416 +
48417 + if (audit == GR_DO_AUDIT)
48418 + goto set_fmt;
48419 +
48420 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48421 + grsec_alert_wtime = jiffies;
48422 + grsec_alert_fyet = 0;
48423 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48424 + grsec_alert_fyet++;
48425 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48426 + grsec_alert_wtime = jiffies;
48427 + grsec_alert_fyet++;
48428 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48429 + return FLOODING;
48430 + } else return FLOODING;
48431 +
48432 +set_fmt:
48433 + memset(buf, 0, PAGE_SIZE);
48434 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
48435 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48436 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48437 + } else if (current->signal->curr_ip) {
48438 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48439 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48440 + } else if (gr_acl_is_enabled()) {
48441 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48442 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48443 + } else {
48444 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
48445 + strcpy(buf, fmt);
48446 + }
48447 +
48448 + return NO_FLOODING;
48449 +}
48450 +
48451 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48452 + __attribute__ ((format (printf, 2, 0)));
48453 +
48454 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48455 +{
48456 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48457 + unsigned int len = strlen(buf);
48458 +
48459 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48460 +
48461 + return;
48462 +}
48463 +
48464 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48465 + __attribute__ ((format (printf, 2, 3)));
48466 +
48467 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48468 +{
48469 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48470 + unsigned int len = strlen(buf);
48471 + va_list ap;
48472 +
48473 + va_start(ap, msg);
48474 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48475 + va_end(ap);
48476 +
48477 + return;
48478 +}
48479 +
48480 +static void gr_log_end(int audit)
48481 +{
48482 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48483 + unsigned int len = strlen(buf);
48484 +
48485 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48486 + printk("%s\n", buf);
48487 +
48488 + return;
48489 +}
48490 +
48491 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48492 +{
48493 + int logtype;
48494 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48495 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48496 + void *voidptr = NULL;
48497 + int num1 = 0, num2 = 0;
48498 + unsigned long ulong1 = 0, ulong2 = 0;
48499 + struct dentry *dentry = NULL;
48500 + struct vfsmount *mnt = NULL;
48501 + struct file *file = NULL;
48502 + struct task_struct *task = NULL;
48503 + const struct cred *cred, *pcred;
48504 + va_list ap;
48505 +
48506 + BEGIN_LOCKS(audit);
48507 + logtype = gr_log_start(audit);
48508 + if (logtype == FLOODING) {
48509 + END_LOCKS(audit);
48510 + return;
48511 + }
48512 + va_start(ap, argtypes);
48513 + switch (argtypes) {
48514 + case GR_TTYSNIFF:
48515 + task = va_arg(ap, struct task_struct *);
48516 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48517 + break;
48518 + case GR_SYSCTL_HIDDEN:
48519 + str1 = va_arg(ap, char *);
48520 + gr_log_middle_varargs(audit, msg, result, str1);
48521 + break;
48522 + case GR_RBAC:
48523 + dentry = va_arg(ap, struct dentry *);
48524 + mnt = va_arg(ap, struct vfsmount *);
48525 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48526 + break;
48527 + case GR_RBAC_STR:
48528 + dentry = va_arg(ap, struct dentry *);
48529 + mnt = va_arg(ap, struct vfsmount *);
48530 + str1 = va_arg(ap, char *);
48531 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48532 + break;
48533 + case GR_STR_RBAC:
48534 + str1 = va_arg(ap, char *);
48535 + dentry = va_arg(ap, struct dentry *);
48536 + mnt = va_arg(ap, struct vfsmount *);
48537 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48538 + break;
48539 + case GR_RBAC_MODE2:
48540 + dentry = va_arg(ap, struct dentry *);
48541 + mnt = va_arg(ap, struct vfsmount *);
48542 + str1 = va_arg(ap, char *);
48543 + str2 = va_arg(ap, char *);
48544 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48545 + break;
48546 + case GR_RBAC_MODE3:
48547 + dentry = va_arg(ap, struct dentry *);
48548 + mnt = va_arg(ap, struct vfsmount *);
48549 + str1 = va_arg(ap, char *);
48550 + str2 = va_arg(ap, char *);
48551 + str3 = va_arg(ap, char *);
48552 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48553 + break;
48554 + case GR_FILENAME:
48555 + dentry = va_arg(ap, struct dentry *);
48556 + mnt = va_arg(ap, struct vfsmount *);
48557 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48558 + break;
48559 + case GR_STR_FILENAME:
48560 + str1 = va_arg(ap, char *);
48561 + dentry = va_arg(ap, struct dentry *);
48562 + mnt = va_arg(ap, struct vfsmount *);
48563 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48564 + break;
48565 + case GR_FILENAME_STR:
48566 + dentry = va_arg(ap, struct dentry *);
48567 + mnt = va_arg(ap, struct vfsmount *);
48568 + str1 = va_arg(ap, char *);
48569 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48570 + break;
48571 + case GR_FILENAME_TWO_INT:
48572 + dentry = va_arg(ap, struct dentry *);
48573 + mnt = va_arg(ap, struct vfsmount *);
48574 + num1 = va_arg(ap, int);
48575 + num2 = va_arg(ap, int);
48576 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48577 + break;
48578 + case GR_FILENAME_TWO_INT_STR:
48579 + dentry = va_arg(ap, struct dentry *);
48580 + mnt = va_arg(ap, struct vfsmount *);
48581 + num1 = va_arg(ap, int);
48582 + num2 = va_arg(ap, int);
48583 + str1 = va_arg(ap, char *);
48584 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48585 + break;
48586 + case GR_TEXTREL:
48587 + file = va_arg(ap, struct file *);
48588 + ulong1 = va_arg(ap, unsigned long);
48589 + ulong2 = va_arg(ap, unsigned long);
48590 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48591 + break;
48592 + case GR_PTRACE:
48593 + task = va_arg(ap, struct task_struct *);
48594 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48595 + break;
48596 + case GR_RESOURCE:
48597 + task = va_arg(ap, struct task_struct *);
48598 + cred = __task_cred(task);
48599 + pcred = __task_cred(task->real_parent);
48600 + ulong1 = va_arg(ap, unsigned long);
48601 + str1 = va_arg(ap, char *);
48602 + ulong2 = va_arg(ap, unsigned long);
48603 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48604 + break;
48605 + case GR_CAP:
48606 + task = va_arg(ap, struct task_struct *);
48607 + cred = __task_cred(task);
48608 + pcred = __task_cred(task->real_parent);
48609 + str1 = va_arg(ap, char *);
48610 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48611 + break;
48612 + case GR_SIG:
48613 + str1 = va_arg(ap, char *);
48614 + voidptr = va_arg(ap, void *);
48615 + gr_log_middle_varargs(audit, msg, str1, voidptr);
48616 + break;
48617 + case GR_SIG2:
48618 + task = va_arg(ap, struct task_struct *);
48619 + cred = __task_cred(task);
48620 + pcred = __task_cred(task->real_parent);
48621 + num1 = va_arg(ap, int);
48622 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48623 + break;
48624 + case GR_CRASH1:
48625 + task = va_arg(ap, struct task_struct *);
48626 + cred = __task_cred(task);
48627 + pcred = __task_cred(task->real_parent);
48628 + ulong1 = va_arg(ap, unsigned long);
48629 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48630 + break;
48631 + case GR_CRASH2:
48632 + task = va_arg(ap, struct task_struct *);
48633 + cred = __task_cred(task);
48634 + pcred = __task_cred(task->real_parent);
48635 + ulong1 = va_arg(ap, unsigned long);
48636 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48637 + break;
48638 + case GR_RWXMAP:
48639 + file = va_arg(ap, struct file *);
48640 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48641 + break;
48642 + case GR_PSACCT:
48643 + {
48644 + unsigned int wday, cday;
48645 + __u8 whr, chr;
48646 + __u8 wmin, cmin;
48647 + __u8 wsec, csec;
48648 + char cur_tty[64] = { 0 };
48649 + char parent_tty[64] = { 0 };
48650 +
48651 + task = va_arg(ap, struct task_struct *);
48652 + wday = va_arg(ap, unsigned int);
48653 + cday = va_arg(ap, unsigned int);
48654 + whr = va_arg(ap, int);
48655 + chr = va_arg(ap, int);
48656 + wmin = va_arg(ap, int);
48657 + cmin = va_arg(ap, int);
48658 + wsec = va_arg(ap, int);
48659 + csec = va_arg(ap, int);
48660 + ulong1 = va_arg(ap, unsigned long);
48661 + cred = __task_cred(task);
48662 + pcred = __task_cred(task->real_parent);
48663 +
48664 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48665 + }
48666 + break;
48667 + default:
48668 + gr_log_middle(audit, msg, ap);
48669 + }
48670 + va_end(ap);
48671 + gr_log_end(audit);
48672 + END_LOCKS(audit);
48673 +}
48674 diff -urNp linux-3.0.4/grsecurity/grsec_mem.c linux-3.0.4/grsecurity/grsec_mem.c
48675 --- linux-3.0.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48676 +++ linux-3.0.4/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
48677 @@ -0,0 +1,33 @@
48678 +#include <linux/kernel.h>
48679 +#include <linux/sched.h>
48680 +#include <linux/mm.h>
48681 +#include <linux/mman.h>
48682 +#include <linux/grinternal.h>
48683 +
48684 +void
48685 +gr_handle_ioperm(void)
48686 +{
48687 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48688 + return;
48689 +}
48690 +
48691 +void
48692 +gr_handle_iopl(void)
48693 +{
48694 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48695 + return;
48696 +}
48697 +
48698 +void
48699 +gr_handle_mem_readwrite(u64 from, u64 to)
48700 +{
48701 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48702 + return;
48703 +}
48704 +
48705 +void
48706 +gr_handle_vm86(void)
48707 +{
48708 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48709 + return;
48710 +}
48711 diff -urNp linux-3.0.4/grsecurity/grsec_mount.c linux-3.0.4/grsecurity/grsec_mount.c
48712 --- linux-3.0.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48713 +++ linux-3.0.4/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
48714 @@ -0,0 +1,62 @@
48715 +#include <linux/kernel.h>
48716 +#include <linux/sched.h>
48717 +#include <linux/mount.h>
48718 +#include <linux/grsecurity.h>
48719 +#include <linux/grinternal.h>
48720 +
48721 +void
48722 +gr_log_remount(const char *devname, const int retval)
48723 +{
48724 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48725 + if (grsec_enable_mount && (retval >= 0))
48726 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48727 +#endif
48728 + return;
48729 +}
48730 +
48731 +void
48732 +gr_log_unmount(const char *devname, const int retval)
48733 +{
48734 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48735 + if (grsec_enable_mount && (retval >= 0))
48736 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48737 +#endif
48738 + return;
48739 +}
48740 +
48741 +void
48742 +gr_log_mount(const char *from, const char *to, const int retval)
48743 +{
48744 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48745 + if (grsec_enable_mount && (retval >= 0))
48746 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48747 +#endif
48748 + return;
48749 +}
48750 +
48751 +int
48752 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48753 +{
48754 +#ifdef CONFIG_GRKERNSEC_ROFS
48755 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48756 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48757 + return -EPERM;
48758 + } else
48759 + return 0;
48760 +#endif
48761 + return 0;
48762 +}
48763 +
48764 +int
48765 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48766 +{
48767 +#ifdef CONFIG_GRKERNSEC_ROFS
48768 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48769 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48770 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48771 + return -EPERM;
48772 + } else
48773 + return 0;
48774 +#endif
48775 + return 0;
48776 +}
48777 diff -urNp linux-3.0.4/grsecurity/grsec_pax.c linux-3.0.4/grsecurity/grsec_pax.c
48778 --- linux-3.0.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48779 +++ linux-3.0.4/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
48780 @@ -0,0 +1,36 @@
48781 +#include <linux/kernel.h>
48782 +#include <linux/sched.h>
48783 +#include <linux/mm.h>
48784 +#include <linux/file.h>
48785 +#include <linux/grinternal.h>
48786 +#include <linux/grsecurity.h>
48787 +
48788 +void
48789 +gr_log_textrel(struct vm_area_struct * vma)
48790 +{
48791 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48792 + if (grsec_enable_audit_textrel)
48793 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48794 +#endif
48795 + return;
48796 +}
48797 +
48798 +void
48799 +gr_log_rwxmmap(struct file *file)
48800 +{
48801 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48802 + if (grsec_enable_log_rwxmaps)
48803 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
48804 +#endif
48805 + return;
48806 +}
48807 +
48808 +void
48809 +gr_log_rwxmprotect(struct file *file)
48810 +{
48811 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48812 + if (grsec_enable_log_rwxmaps)
48813 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
48814 +#endif
48815 + return;
48816 +}
48817 diff -urNp linux-3.0.4/grsecurity/grsec_ptrace.c linux-3.0.4/grsecurity/grsec_ptrace.c
48818 --- linux-3.0.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
48819 +++ linux-3.0.4/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
48820 @@ -0,0 +1,14 @@
48821 +#include <linux/kernel.h>
48822 +#include <linux/sched.h>
48823 +#include <linux/grinternal.h>
48824 +#include <linux/grsecurity.h>
48825 +
48826 +void
48827 +gr_audit_ptrace(struct task_struct *task)
48828 +{
48829 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48830 + if (grsec_enable_audit_ptrace)
48831 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
48832 +#endif
48833 + return;
48834 +}
48835 diff -urNp linux-3.0.4/grsecurity/grsec_sig.c linux-3.0.4/grsecurity/grsec_sig.c
48836 --- linux-3.0.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
48837 +++ linux-3.0.4/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
48838 @@ -0,0 +1,206 @@
48839 +#include <linux/kernel.h>
48840 +#include <linux/sched.h>
48841 +#include <linux/delay.h>
48842 +#include <linux/grsecurity.h>
48843 +#include <linux/grinternal.h>
48844 +#include <linux/hardirq.h>
48845 +
48846 +char *signames[] = {
48847 + [SIGSEGV] = "Segmentation fault",
48848 + [SIGILL] = "Illegal instruction",
48849 + [SIGABRT] = "Abort",
48850 + [SIGBUS] = "Invalid alignment/Bus error"
48851 +};
48852 +
48853 +void
48854 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
48855 +{
48856 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48857 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
48858 + (sig == SIGABRT) || (sig == SIGBUS))) {
48859 + if (t->pid == current->pid) {
48860 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
48861 + } else {
48862 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
48863 + }
48864 + }
48865 +#endif
48866 + return;
48867 +}
48868 +
48869 +int
48870 +gr_handle_signal(const struct task_struct *p, const int sig)
48871 +{
48872 +#ifdef CONFIG_GRKERNSEC
48873 + if (current->pid > 1 && gr_check_protected_task(p)) {
48874 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
48875 + return -EPERM;
48876 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
48877 + return -EPERM;
48878 + }
48879 +#endif
48880 + return 0;
48881 +}
48882 +
48883 +#ifdef CONFIG_GRKERNSEC
48884 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
48885 +
48886 +int gr_fake_force_sig(int sig, struct task_struct *t)
48887 +{
48888 + unsigned long int flags;
48889 + int ret, blocked, ignored;
48890 + struct k_sigaction *action;
48891 +
48892 + spin_lock_irqsave(&t->sighand->siglock, flags);
48893 + action = &t->sighand->action[sig-1];
48894 + ignored = action->sa.sa_handler == SIG_IGN;
48895 + blocked = sigismember(&t->blocked, sig);
48896 + if (blocked || ignored) {
48897 + action->sa.sa_handler = SIG_DFL;
48898 + if (blocked) {
48899 + sigdelset(&t->blocked, sig);
48900 + recalc_sigpending_and_wake(t);
48901 + }
48902 + }
48903 + if (action->sa.sa_handler == SIG_DFL)
48904 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
48905 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
48906 +
48907 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
48908 +
48909 + return ret;
48910 +}
48911 +#endif
48912 +
48913 +#ifdef CONFIG_GRKERNSEC_BRUTE
48914 +#define GR_USER_BAN_TIME (15 * 60)
48915 +
48916 +static int __get_dumpable(unsigned long mm_flags)
48917 +{
48918 + int ret;
48919 +
48920 + ret = mm_flags & MMF_DUMPABLE_MASK;
48921 + return (ret >= 2) ? 2 : ret;
48922 +}
48923 +#endif
48924 +
48925 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
48926 +{
48927 +#ifdef CONFIG_GRKERNSEC_BRUTE
48928 + uid_t uid = 0;
48929 +
48930 + if (!grsec_enable_brute)
48931 + return;
48932 +
48933 + rcu_read_lock();
48934 + read_lock(&tasklist_lock);
48935 + read_lock(&grsec_exec_file_lock);
48936 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
48937 + p->real_parent->brute = 1;
48938 + else {
48939 + const struct cred *cred = __task_cred(p), *cred2;
48940 + struct task_struct *tsk, *tsk2;
48941 +
48942 + if (!__get_dumpable(mm_flags) && cred->uid) {
48943 + struct user_struct *user;
48944 +
48945 + uid = cred->uid;
48946 +
48947 + /* this is put upon execution past expiration */
48948 + user = find_user(uid);
48949 + if (user == NULL)
48950 + goto unlock;
48951 + user->banned = 1;
48952 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
48953 + if (user->ban_expires == ~0UL)
48954 + user->ban_expires--;
48955 +
48956 + do_each_thread(tsk2, tsk) {
48957 + cred2 = __task_cred(tsk);
48958 + if (tsk != p && cred2->uid == uid)
48959 + gr_fake_force_sig(SIGKILL, tsk);
48960 + } while_each_thread(tsk2, tsk);
48961 + }
48962 + }
48963 +unlock:
48964 + read_unlock(&grsec_exec_file_lock);
48965 + read_unlock(&tasklist_lock);
48966 + rcu_read_unlock();
48967 +
48968 + if (uid)
48969 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
48970 +
48971 +#endif
48972 + return;
48973 +}
48974 +
48975 +void gr_handle_brute_check(void)
48976 +{
48977 +#ifdef CONFIG_GRKERNSEC_BRUTE
48978 + if (current->brute)
48979 + msleep(30 * 1000);
48980 +#endif
48981 + return;
48982 +}
48983 +
48984 +void gr_handle_kernel_exploit(void)
48985 +{
48986 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
48987 + const struct cred *cred;
48988 + struct task_struct *tsk, *tsk2;
48989 + struct user_struct *user;
48990 + uid_t uid;
48991 +
48992 + if (in_irq() || in_serving_softirq() || in_nmi())
48993 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
48994 +
48995 + uid = current_uid();
48996 +
48997 + if (uid == 0)
48998 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
48999 + else {
49000 + /* kill all the processes of this user, hold a reference
49001 + to their creds struct, and prevent them from creating
49002 + another process until system reset
49003 + */
49004 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
49005 + /* we intentionally leak this ref */
49006 + user = get_uid(current->cred->user);
49007 + if (user) {
49008 + user->banned = 1;
49009 + user->ban_expires = ~0UL;
49010 + }
49011 +
49012 + read_lock(&tasklist_lock);
49013 + do_each_thread(tsk2, tsk) {
49014 + cred = __task_cred(tsk);
49015 + if (cred->uid == uid)
49016 + gr_fake_force_sig(SIGKILL, tsk);
49017 + } while_each_thread(tsk2, tsk);
49018 + read_unlock(&tasklist_lock);
49019 + }
49020 +#endif
49021 +}
49022 +
49023 +int __gr_process_user_ban(struct user_struct *user)
49024 +{
49025 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49026 + if (unlikely(user->banned)) {
49027 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49028 + user->banned = 0;
49029 + user->ban_expires = 0;
49030 + free_uid(user);
49031 + } else
49032 + return -EPERM;
49033 + }
49034 +#endif
49035 + return 0;
49036 +}
49037 +
49038 +int gr_process_user_ban(void)
49039 +{
49040 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49041 + return __gr_process_user_ban(current->cred->user);
49042 +#endif
49043 + return 0;
49044 +}
49045 diff -urNp linux-3.0.4/grsecurity/grsec_sock.c linux-3.0.4/grsecurity/grsec_sock.c
49046 --- linux-3.0.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49047 +++ linux-3.0.4/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
49048 @@ -0,0 +1,244 @@
49049 +#include <linux/kernel.h>
49050 +#include <linux/module.h>
49051 +#include <linux/sched.h>
49052 +#include <linux/file.h>
49053 +#include <linux/net.h>
49054 +#include <linux/in.h>
49055 +#include <linux/ip.h>
49056 +#include <net/sock.h>
49057 +#include <net/inet_sock.h>
49058 +#include <linux/grsecurity.h>
49059 +#include <linux/grinternal.h>
49060 +#include <linux/gracl.h>
49061 +
49062 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49063 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49064 +
49065 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
49066 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
49067 +
49068 +#ifdef CONFIG_UNIX_MODULE
49069 +EXPORT_SYMBOL(gr_acl_handle_unix);
49070 +EXPORT_SYMBOL(gr_acl_handle_mknod);
49071 +EXPORT_SYMBOL(gr_handle_chroot_unix);
49072 +EXPORT_SYMBOL(gr_handle_create);
49073 +#endif
49074 +
49075 +#ifdef CONFIG_GRKERNSEC
49076 +#define gr_conn_table_size 32749
49077 +struct conn_table_entry {
49078 + struct conn_table_entry *next;
49079 + struct signal_struct *sig;
49080 +};
49081 +
49082 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49083 +DEFINE_SPINLOCK(gr_conn_table_lock);
49084 +
49085 +extern const char * gr_socktype_to_name(unsigned char type);
49086 +extern const char * gr_proto_to_name(unsigned char proto);
49087 +extern const char * gr_sockfamily_to_name(unsigned char family);
49088 +
49089 +static __inline__ int
49090 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49091 +{
49092 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49093 +}
49094 +
49095 +static __inline__ int
49096 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49097 + __u16 sport, __u16 dport)
49098 +{
49099 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49100 + sig->gr_sport == sport && sig->gr_dport == dport))
49101 + return 1;
49102 + else
49103 + return 0;
49104 +}
49105 +
49106 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49107 +{
49108 + struct conn_table_entry **match;
49109 + unsigned int index;
49110 +
49111 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49112 + sig->gr_sport, sig->gr_dport,
49113 + gr_conn_table_size);
49114 +
49115 + newent->sig = sig;
49116 +
49117 + match = &gr_conn_table[index];
49118 + newent->next = *match;
49119 + *match = newent;
49120 +
49121 + return;
49122 +}
49123 +
49124 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49125 +{
49126 + struct conn_table_entry *match, *last = NULL;
49127 + unsigned int index;
49128 +
49129 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49130 + sig->gr_sport, sig->gr_dport,
49131 + gr_conn_table_size);
49132 +
49133 + match = gr_conn_table[index];
49134 + while (match && !conn_match(match->sig,
49135 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49136 + sig->gr_dport)) {
49137 + last = match;
49138 + match = match->next;
49139 + }
49140 +
49141 + if (match) {
49142 + if (last)
49143 + last->next = match->next;
49144 + else
49145 + gr_conn_table[index] = NULL;
49146 + kfree(match);
49147 + }
49148 +
49149 + return;
49150 +}
49151 +
49152 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49153 + __u16 sport, __u16 dport)
49154 +{
49155 + struct conn_table_entry *match;
49156 + unsigned int index;
49157 +
49158 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49159 +
49160 + match = gr_conn_table[index];
49161 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49162 + match = match->next;
49163 +
49164 + if (match)
49165 + return match->sig;
49166 + else
49167 + return NULL;
49168 +}
49169 +
49170 +#endif
49171 +
49172 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49173 +{
49174 +#ifdef CONFIG_GRKERNSEC
49175 + struct signal_struct *sig = task->signal;
49176 + struct conn_table_entry *newent;
49177 +
49178 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49179 + if (newent == NULL)
49180 + return;
49181 + /* no bh lock needed since we are called with bh disabled */
49182 + spin_lock(&gr_conn_table_lock);
49183 + gr_del_task_from_ip_table_nolock(sig);
49184 + sig->gr_saddr = inet->inet_rcv_saddr;
49185 + sig->gr_daddr = inet->inet_daddr;
49186 + sig->gr_sport = inet->inet_sport;
49187 + sig->gr_dport = inet->inet_dport;
49188 + gr_add_to_task_ip_table_nolock(sig, newent);
49189 + spin_unlock(&gr_conn_table_lock);
49190 +#endif
49191 + return;
49192 +}
49193 +
49194 +void gr_del_task_from_ip_table(struct task_struct *task)
49195 +{
49196 +#ifdef CONFIG_GRKERNSEC
49197 + spin_lock_bh(&gr_conn_table_lock);
49198 + gr_del_task_from_ip_table_nolock(task->signal);
49199 + spin_unlock_bh(&gr_conn_table_lock);
49200 +#endif
49201 + return;
49202 +}
49203 +
49204 +void
49205 +gr_attach_curr_ip(const struct sock *sk)
49206 +{
49207 +#ifdef CONFIG_GRKERNSEC
49208 + struct signal_struct *p, *set;
49209 + const struct inet_sock *inet = inet_sk(sk);
49210 +
49211 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49212 + return;
49213 +
49214 + set = current->signal;
49215 +
49216 + spin_lock_bh(&gr_conn_table_lock);
49217 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49218 + inet->inet_dport, inet->inet_sport);
49219 + if (unlikely(p != NULL)) {
49220 + set->curr_ip = p->curr_ip;
49221 + set->used_accept = 1;
49222 + gr_del_task_from_ip_table_nolock(p);
49223 + spin_unlock_bh(&gr_conn_table_lock);
49224 + return;
49225 + }
49226 + spin_unlock_bh(&gr_conn_table_lock);
49227 +
49228 + set->curr_ip = inet->inet_daddr;
49229 + set->used_accept = 1;
49230 +#endif
49231 + return;
49232 +}
49233 +
49234 +int
49235 +gr_handle_sock_all(const int family, const int type, const int protocol)
49236 +{
49237 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49238 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49239 + (family != AF_UNIX)) {
49240 + if (family == AF_INET)
49241 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49242 + else
49243 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49244 + return -EACCES;
49245 + }
49246 +#endif
49247 + return 0;
49248 +}
49249 +
49250 +int
49251 +gr_handle_sock_server(const struct sockaddr *sck)
49252 +{
49253 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49254 + if (grsec_enable_socket_server &&
49255 + in_group_p(grsec_socket_server_gid) &&
49256 + sck && (sck->sa_family != AF_UNIX) &&
49257 + (sck->sa_family != AF_LOCAL)) {
49258 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49259 + return -EACCES;
49260 + }
49261 +#endif
49262 + return 0;
49263 +}
49264 +
49265 +int
49266 +gr_handle_sock_server_other(const struct sock *sck)
49267 +{
49268 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49269 + if (grsec_enable_socket_server &&
49270 + in_group_p(grsec_socket_server_gid) &&
49271 + sck && (sck->sk_family != AF_UNIX) &&
49272 + (sck->sk_family != AF_LOCAL)) {
49273 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49274 + return -EACCES;
49275 + }
49276 +#endif
49277 + return 0;
49278 +}
49279 +
49280 +int
49281 +gr_handle_sock_client(const struct sockaddr *sck)
49282 +{
49283 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49284 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49285 + sck && (sck->sa_family != AF_UNIX) &&
49286 + (sck->sa_family != AF_LOCAL)) {
49287 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49288 + return -EACCES;
49289 + }
49290 +#endif
49291 + return 0;
49292 +}
49293 diff -urNp linux-3.0.4/grsecurity/grsec_sysctl.c linux-3.0.4/grsecurity/grsec_sysctl.c
49294 --- linux-3.0.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49295 +++ linux-3.0.4/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
49296 @@ -0,0 +1,433 @@
49297 +#include <linux/kernel.h>
49298 +#include <linux/sched.h>
49299 +#include <linux/sysctl.h>
49300 +#include <linux/grsecurity.h>
49301 +#include <linux/grinternal.h>
49302 +
49303 +int
49304 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49305 +{
49306 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49307 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49308 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49309 + return -EACCES;
49310 + }
49311 +#endif
49312 + return 0;
49313 +}
49314 +
49315 +#ifdef CONFIG_GRKERNSEC_ROFS
49316 +static int __maybe_unused one = 1;
49317 +#endif
49318 +
49319 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49320 +struct ctl_table grsecurity_table[] = {
49321 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49322 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49323 +#ifdef CONFIG_GRKERNSEC_IO
49324 + {
49325 + .procname = "disable_priv_io",
49326 + .data = &grsec_disable_privio,
49327 + .maxlen = sizeof(int),
49328 + .mode = 0600,
49329 + .proc_handler = &proc_dointvec,
49330 + },
49331 +#endif
49332 +#endif
49333 +#ifdef CONFIG_GRKERNSEC_LINK
49334 + {
49335 + .procname = "linking_restrictions",
49336 + .data = &grsec_enable_link,
49337 + .maxlen = sizeof(int),
49338 + .mode = 0600,
49339 + .proc_handler = &proc_dointvec,
49340 + },
49341 +#endif
49342 +#ifdef CONFIG_GRKERNSEC_BRUTE
49343 + {
49344 + .procname = "deter_bruteforce",
49345 + .data = &grsec_enable_brute,
49346 + .maxlen = sizeof(int),
49347 + .mode = 0600,
49348 + .proc_handler = &proc_dointvec,
49349 + },
49350 +#endif
49351 +#ifdef CONFIG_GRKERNSEC_FIFO
49352 + {
49353 + .procname = "fifo_restrictions",
49354 + .data = &grsec_enable_fifo,
49355 + .maxlen = sizeof(int),
49356 + .mode = 0600,
49357 + .proc_handler = &proc_dointvec,
49358 + },
49359 +#endif
49360 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49361 + {
49362 + .procname = "ip_blackhole",
49363 + .data = &grsec_enable_blackhole,
49364 + .maxlen = sizeof(int),
49365 + .mode = 0600,
49366 + .proc_handler = &proc_dointvec,
49367 + },
49368 + {
49369 + .procname = "lastack_retries",
49370 + .data = &grsec_lastack_retries,
49371 + .maxlen = sizeof(int),
49372 + .mode = 0600,
49373 + .proc_handler = &proc_dointvec,
49374 + },
49375 +#endif
49376 +#ifdef CONFIG_GRKERNSEC_EXECLOG
49377 + {
49378 + .procname = "exec_logging",
49379 + .data = &grsec_enable_execlog,
49380 + .maxlen = sizeof(int),
49381 + .mode = 0600,
49382 + .proc_handler = &proc_dointvec,
49383 + },
49384 +#endif
49385 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49386 + {
49387 + .procname = "rwxmap_logging",
49388 + .data = &grsec_enable_log_rwxmaps,
49389 + .maxlen = sizeof(int),
49390 + .mode = 0600,
49391 + .proc_handler = &proc_dointvec,
49392 + },
49393 +#endif
49394 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49395 + {
49396 + .procname = "signal_logging",
49397 + .data = &grsec_enable_signal,
49398 + .maxlen = sizeof(int),
49399 + .mode = 0600,
49400 + .proc_handler = &proc_dointvec,
49401 + },
49402 +#endif
49403 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
49404 + {
49405 + .procname = "forkfail_logging",
49406 + .data = &grsec_enable_forkfail,
49407 + .maxlen = sizeof(int),
49408 + .mode = 0600,
49409 + .proc_handler = &proc_dointvec,
49410 + },
49411 +#endif
49412 +#ifdef CONFIG_GRKERNSEC_TIME
49413 + {
49414 + .procname = "timechange_logging",
49415 + .data = &grsec_enable_time,
49416 + .maxlen = sizeof(int),
49417 + .mode = 0600,
49418 + .proc_handler = &proc_dointvec,
49419 + },
49420 +#endif
49421 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49422 + {
49423 + .procname = "chroot_deny_shmat",
49424 + .data = &grsec_enable_chroot_shmat,
49425 + .maxlen = sizeof(int),
49426 + .mode = 0600,
49427 + .proc_handler = &proc_dointvec,
49428 + },
49429 +#endif
49430 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49431 + {
49432 + .procname = "chroot_deny_unix",
49433 + .data = &grsec_enable_chroot_unix,
49434 + .maxlen = sizeof(int),
49435 + .mode = 0600,
49436 + .proc_handler = &proc_dointvec,
49437 + },
49438 +#endif
49439 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49440 + {
49441 + .procname = "chroot_deny_mount",
49442 + .data = &grsec_enable_chroot_mount,
49443 + .maxlen = sizeof(int),
49444 + .mode = 0600,
49445 + .proc_handler = &proc_dointvec,
49446 + },
49447 +#endif
49448 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49449 + {
49450 + .procname = "chroot_deny_fchdir",
49451 + .data = &grsec_enable_chroot_fchdir,
49452 + .maxlen = sizeof(int),
49453 + .mode = 0600,
49454 + .proc_handler = &proc_dointvec,
49455 + },
49456 +#endif
49457 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49458 + {
49459 + .procname = "chroot_deny_chroot",
49460 + .data = &grsec_enable_chroot_double,
49461 + .maxlen = sizeof(int),
49462 + .mode = 0600,
49463 + .proc_handler = &proc_dointvec,
49464 + },
49465 +#endif
49466 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49467 + {
49468 + .procname = "chroot_deny_pivot",
49469 + .data = &grsec_enable_chroot_pivot,
49470 + .maxlen = sizeof(int),
49471 + .mode = 0600,
49472 + .proc_handler = &proc_dointvec,
49473 + },
49474 +#endif
49475 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49476 + {
49477 + .procname = "chroot_enforce_chdir",
49478 + .data = &grsec_enable_chroot_chdir,
49479 + .maxlen = sizeof(int),
49480 + .mode = 0600,
49481 + .proc_handler = &proc_dointvec,
49482 + },
49483 +#endif
49484 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49485 + {
49486 + .procname = "chroot_deny_chmod",
49487 + .data = &grsec_enable_chroot_chmod,
49488 + .maxlen = sizeof(int),
49489 + .mode = 0600,
49490 + .proc_handler = &proc_dointvec,
49491 + },
49492 +#endif
49493 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49494 + {
49495 + .procname = "chroot_deny_mknod",
49496 + .data = &grsec_enable_chroot_mknod,
49497 + .maxlen = sizeof(int),
49498 + .mode = 0600,
49499 + .proc_handler = &proc_dointvec,
49500 + },
49501 +#endif
49502 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49503 + {
49504 + .procname = "chroot_restrict_nice",
49505 + .data = &grsec_enable_chroot_nice,
49506 + .maxlen = sizeof(int),
49507 + .mode = 0600,
49508 + .proc_handler = &proc_dointvec,
49509 + },
49510 +#endif
49511 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49512 + {
49513 + .procname = "chroot_execlog",
49514 + .data = &grsec_enable_chroot_execlog,
49515 + .maxlen = sizeof(int),
49516 + .mode = 0600,
49517 + .proc_handler = &proc_dointvec,
49518 + },
49519 +#endif
49520 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49521 + {
49522 + .procname = "chroot_caps",
49523 + .data = &grsec_enable_chroot_caps,
49524 + .maxlen = sizeof(int),
49525 + .mode = 0600,
49526 + .proc_handler = &proc_dointvec,
49527 + },
49528 +#endif
49529 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49530 + {
49531 + .procname = "chroot_deny_sysctl",
49532 + .data = &grsec_enable_chroot_sysctl,
49533 + .maxlen = sizeof(int),
49534 + .mode = 0600,
49535 + .proc_handler = &proc_dointvec,
49536 + },
49537 +#endif
49538 +#ifdef CONFIG_GRKERNSEC_TPE
49539 + {
49540 + .procname = "tpe",
49541 + .data = &grsec_enable_tpe,
49542 + .maxlen = sizeof(int),
49543 + .mode = 0600,
49544 + .proc_handler = &proc_dointvec,
49545 + },
49546 + {
49547 + .procname = "tpe_gid",
49548 + .data = &grsec_tpe_gid,
49549 + .maxlen = sizeof(int),
49550 + .mode = 0600,
49551 + .proc_handler = &proc_dointvec,
49552 + },
49553 +#endif
49554 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49555 + {
49556 + .procname = "tpe_invert",
49557 + .data = &grsec_enable_tpe_invert,
49558 + .maxlen = sizeof(int),
49559 + .mode = 0600,
49560 + .proc_handler = &proc_dointvec,
49561 + },
49562 +#endif
49563 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49564 + {
49565 + .procname = "tpe_restrict_all",
49566 + .data = &grsec_enable_tpe_all,
49567 + .maxlen = sizeof(int),
49568 + .mode = 0600,
49569 + .proc_handler = &proc_dointvec,
49570 + },
49571 +#endif
49572 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49573 + {
49574 + .procname = "socket_all",
49575 + .data = &grsec_enable_socket_all,
49576 + .maxlen = sizeof(int),
49577 + .mode = 0600,
49578 + .proc_handler = &proc_dointvec,
49579 + },
49580 + {
49581 + .procname = "socket_all_gid",
49582 + .data = &grsec_socket_all_gid,
49583 + .maxlen = sizeof(int),
49584 + .mode = 0600,
49585 + .proc_handler = &proc_dointvec,
49586 + },
49587 +#endif
49588 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49589 + {
49590 + .procname = "socket_client",
49591 + .data = &grsec_enable_socket_client,
49592 + .maxlen = sizeof(int),
49593 + .mode = 0600,
49594 + .proc_handler = &proc_dointvec,
49595 + },
49596 + {
49597 + .procname = "socket_client_gid",
49598 + .data = &grsec_socket_client_gid,
49599 + .maxlen = sizeof(int),
49600 + .mode = 0600,
49601 + .proc_handler = &proc_dointvec,
49602 + },
49603 +#endif
49604 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49605 + {
49606 + .procname = "socket_server",
49607 + .data = &grsec_enable_socket_server,
49608 + .maxlen = sizeof(int),
49609 + .mode = 0600,
49610 + .proc_handler = &proc_dointvec,
49611 + },
49612 + {
49613 + .procname = "socket_server_gid",
49614 + .data = &grsec_socket_server_gid,
49615 + .maxlen = sizeof(int),
49616 + .mode = 0600,
49617 + .proc_handler = &proc_dointvec,
49618 + },
49619 +#endif
49620 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49621 + {
49622 + .procname = "audit_group",
49623 + .data = &grsec_enable_group,
49624 + .maxlen = sizeof(int),
49625 + .mode = 0600,
49626 + .proc_handler = &proc_dointvec,
49627 + },
49628 + {
49629 + .procname = "audit_gid",
49630 + .data = &grsec_audit_gid,
49631 + .maxlen = sizeof(int),
49632 + .mode = 0600,
49633 + .proc_handler = &proc_dointvec,
49634 + },
49635 +#endif
49636 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49637 + {
49638 + .procname = "audit_chdir",
49639 + .data = &grsec_enable_chdir,
49640 + .maxlen = sizeof(int),
49641 + .mode = 0600,
49642 + .proc_handler = &proc_dointvec,
49643 + },
49644 +#endif
49645 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49646 + {
49647 + .procname = "audit_mount",
49648 + .data = &grsec_enable_mount,
49649 + .maxlen = sizeof(int),
49650 + .mode = 0600,
49651 + .proc_handler = &proc_dointvec,
49652 + },
49653 +#endif
49654 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49655 + {
49656 + .procname = "audit_textrel",
49657 + .data = &grsec_enable_audit_textrel,
49658 + .maxlen = sizeof(int),
49659 + .mode = 0600,
49660 + .proc_handler = &proc_dointvec,
49661 + },
49662 +#endif
49663 +#ifdef CONFIG_GRKERNSEC_DMESG
49664 + {
49665 + .procname = "dmesg",
49666 + .data = &grsec_enable_dmesg,
49667 + .maxlen = sizeof(int),
49668 + .mode = 0600,
49669 + .proc_handler = &proc_dointvec,
49670 + },
49671 +#endif
49672 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49673 + {
49674 + .procname = "chroot_findtask",
49675 + .data = &grsec_enable_chroot_findtask,
49676 + .maxlen = sizeof(int),
49677 + .mode = 0600,
49678 + .proc_handler = &proc_dointvec,
49679 + },
49680 +#endif
49681 +#ifdef CONFIG_GRKERNSEC_RESLOG
49682 + {
49683 + .procname = "resource_logging",
49684 + .data = &grsec_resource_logging,
49685 + .maxlen = sizeof(int),
49686 + .mode = 0600,
49687 + .proc_handler = &proc_dointvec,
49688 + },
49689 +#endif
49690 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49691 + {
49692 + .procname = "audit_ptrace",
49693 + .data = &grsec_enable_audit_ptrace,
49694 + .maxlen = sizeof(int),
49695 + .mode = 0600,
49696 + .proc_handler = &proc_dointvec,
49697 + },
49698 +#endif
49699 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49700 + {
49701 + .procname = "harden_ptrace",
49702 + .data = &grsec_enable_harden_ptrace,
49703 + .maxlen = sizeof(int),
49704 + .mode = 0600,
49705 + .proc_handler = &proc_dointvec,
49706 + },
49707 +#endif
49708 + {
49709 + .procname = "grsec_lock",
49710 + .data = &grsec_lock,
49711 + .maxlen = sizeof(int),
49712 + .mode = 0600,
49713 + .proc_handler = &proc_dointvec,
49714 + },
49715 +#endif
49716 +#ifdef CONFIG_GRKERNSEC_ROFS
49717 + {
49718 + .procname = "romount_protect",
49719 + .data = &grsec_enable_rofs,
49720 + .maxlen = sizeof(int),
49721 + .mode = 0600,
49722 + .proc_handler = &proc_dointvec_minmax,
49723 + .extra1 = &one,
49724 + .extra2 = &one,
49725 + },
49726 +#endif
49727 + { }
49728 +};
49729 +#endif
49730 diff -urNp linux-3.0.4/grsecurity/grsec_time.c linux-3.0.4/grsecurity/grsec_time.c
49731 --- linux-3.0.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49732 +++ linux-3.0.4/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
49733 @@ -0,0 +1,16 @@
49734 +#include <linux/kernel.h>
49735 +#include <linux/sched.h>
49736 +#include <linux/grinternal.h>
49737 +#include <linux/module.h>
49738 +
49739 +void
49740 +gr_log_timechange(void)
49741 +{
49742 +#ifdef CONFIG_GRKERNSEC_TIME
49743 + if (grsec_enable_time)
49744 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49745 +#endif
49746 + return;
49747 +}
49748 +
49749 +EXPORT_SYMBOL(gr_log_timechange);
49750 diff -urNp linux-3.0.4/grsecurity/grsec_tpe.c linux-3.0.4/grsecurity/grsec_tpe.c
49751 --- linux-3.0.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49752 +++ linux-3.0.4/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
49753 @@ -0,0 +1,39 @@
49754 +#include <linux/kernel.h>
49755 +#include <linux/sched.h>
49756 +#include <linux/file.h>
49757 +#include <linux/fs.h>
49758 +#include <linux/grinternal.h>
49759 +
49760 +extern int gr_acl_tpe_check(void);
49761 +
49762 +int
49763 +gr_tpe_allow(const struct file *file)
49764 +{
49765 +#ifdef CONFIG_GRKERNSEC
49766 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49767 + const struct cred *cred = current_cred();
49768 +
49769 + if (cred->uid && ((grsec_enable_tpe &&
49770 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49771 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49772 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49773 +#else
49774 + in_group_p(grsec_tpe_gid)
49775 +#endif
49776 + ) || gr_acl_tpe_check()) &&
49777 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49778 + (inode->i_mode & S_IWOTH))))) {
49779 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49780 + return 0;
49781 + }
49782 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49783 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49784 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49785 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49786 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49787 + return 0;
49788 + }
49789 +#endif
49790 +#endif
49791 + return 1;
49792 +}
49793 diff -urNp linux-3.0.4/grsecurity/grsum.c linux-3.0.4/grsecurity/grsum.c
49794 --- linux-3.0.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
49795 +++ linux-3.0.4/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
49796 @@ -0,0 +1,61 @@
49797 +#include <linux/err.h>
49798 +#include <linux/kernel.h>
49799 +#include <linux/sched.h>
49800 +#include <linux/mm.h>
49801 +#include <linux/scatterlist.h>
49802 +#include <linux/crypto.h>
49803 +#include <linux/gracl.h>
49804 +
49805 +
49806 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
49807 +#error "crypto and sha256 must be built into the kernel"
49808 +#endif
49809 +
49810 +int
49811 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
49812 +{
49813 + char *p;
49814 + struct crypto_hash *tfm;
49815 + struct hash_desc desc;
49816 + struct scatterlist sg;
49817 + unsigned char temp_sum[GR_SHA_LEN];
49818 + volatile int retval = 0;
49819 + volatile int dummy = 0;
49820 + unsigned int i;
49821 +
49822 + sg_init_table(&sg, 1);
49823 +
49824 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
49825 + if (IS_ERR(tfm)) {
49826 + /* should never happen, since sha256 should be built in */
49827 + return 1;
49828 + }
49829 +
49830 + desc.tfm = tfm;
49831 + desc.flags = 0;
49832 +
49833 + crypto_hash_init(&desc);
49834 +
49835 + p = salt;
49836 + sg_set_buf(&sg, p, GR_SALT_LEN);
49837 + crypto_hash_update(&desc, &sg, sg.length);
49838 +
49839 + p = entry->pw;
49840 + sg_set_buf(&sg, p, strlen(p));
49841 +
49842 + crypto_hash_update(&desc, &sg, sg.length);
49843 +
49844 + crypto_hash_final(&desc, temp_sum);
49845 +
49846 + memset(entry->pw, 0, GR_PW_LEN);
49847 +
49848 + for (i = 0; i < GR_SHA_LEN; i++)
49849 + if (sum[i] != temp_sum[i])
49850 + retval = 1;
49851 + else
49852 + dummy = 1; // waste a cycle
49853 +
49854 + crypto_free_hash(tfm);
49855 +
49856 + return retval;
49857 +}
49858 diff -urNp linux-3.0.4/grsecurity/Kconfig linux-3.0.4/grsecurity/Kconfig
49859 --- linux-3.0.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
49860 +++ linux-3.0.4/grsecurity/Kconfig 2011-08-25 17:25:34.000000000 -0400
49861 @@ -0,0 +1,1038 @@
49862 +#
49863 +# grecurity configuration
49864 +#
49865 +
49866 +menu "Grsecurity"
49867 +
49868 +config GRKERNSEC
49869 + bool "Grsecurity"
49870 + select CRYPTO
49871 + select CRYPTO_SHA256
49872 + help
49873 + If you say Y here, you will be able to configure many features
49874 + that will enhance the security of your system. It is highly
49875 + recommended that you say Y here and read through the help
49876 + for each option so that you fully understand the features and
49877 + can evaluate their usefulness for your machine.
49878 +
49879 +choice
49880 + prompt "Security Level"
49881 + depends on GRKERNSEC
49882 + default GRKERNSEC_CUSTOM
49883 +
49884 +config GRKERNSEC_LOW
49885 + bool "Low"
49886 + select GRKERNSEC_LINK
49887 + select GRKERNSEC_FIFO
49888 + select GRKERNSEC_RANDNET
49889 + select GRKERNSEC_DMESG
49890 + select GRKERNSEC_CHROOT
49891 + select GRKERNSEC_CHROOT_CHDIR
49892 +
49893 + help
49894 + If you choose this option, several of the grsecurity options will
49895 + be enabled that will give you greater protection against a number
49896 + of attacks, while assuring that none of your software will have any
49897 + conflicts with the additional security measures. If you run a lot
49898 + of unusual software, or you are having problems with the higher
49899 + security levels, you should say Y here. With this option, the
49900 + following features are enabled:
49901 +
49902 + - Linking restrictions
49903 + - FIFO restrictions
49904 + - Restricted dmesg
49905 + - Enforced chdir("/") on chroot
49906 + - Runtime module disabling
49907 +
49908 +config GRKERNSEC_MEDIUM
49909 + bool "Medium"
49910 + select PAX
49911 + select PAX_EI_PAX
49912 + select PAX_PT_PAX_FLAGS
49913 + select PAX_HAVE_ACL_FLAGS
49914 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49915 + select GRKERNSEC_CHROOT
49916 + select GRKERNSEC_CHROOT_SYSCTL
49917 + select GRKERNSEC_LINK
49918 + select GRKERNSEC_FIFO
49919 + select GRKERNSEC_DMESG
49920 + select GRKERNSEC_RANDNET
49921 + select GRKERNSEC_FORKFAIL
49922 + select GRKERNSEC_TIME
49923 + select GRKERNSEC_SIGNAL
49924 + select GRKERNSEC_CHROOT
49925 + select GRKERNSEC_CHROOT_UNIX
49926 + select GRKERNSEC_CHROOT_MOUNT
49927 + select GRKERNSEC_CHROOT_PIVOT
49928 + select GRKERNSEC_CHROOT_DOUBLE
49929 + select GRKERNSEC_CHROOT_CHDIR
49930 + select GRKERNSEC_CHROOT_MKNOD
49931 + select GRKERNSEC_PROC
49932 + select GRKERNSEC_PROC_USERGROUP
49933 + select PAX_RANDUSTACK
49934 + select PAX_ASLR
49935 + select PAX_RANDMMAP
49936 + select PAX_REFCOUNT if (X86 || SPARC64)
49937 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49938 +
49939 + help
49940 + If you say Y here, several features in addition to those included
49941 + in the low additional security level will be enabled. These
49942 + features provide even more security to your system, though in rare
49943 + cases they may be incompatible with very old or poorly written
49944 + software. If you enable this option, make sure that your auth
49945 + service (identd) is running as gid 1001. With this option,
49946 + the following features (in addition to those provided in the
49947 + low additional security level) will be enabled:
49948 +
49949 + - Failed fork logging
49950 + - Time change logging
49951 + - Signal logging
49952 + - Deny mounts in chroot
49953 + - Deny double chrooting
49954 + - Deny sysctl writes in chroot
49955 + - Deny mknod in chroot
49956 + - Deny access to abstract AF_UNIX sockets out of chroot
49957 + - Deny pivot_root in chroot
49958 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
49959 + - /proc restrictions with special GID set to 10 (usually wheel)
49960 + - Address Space Layout Randomization (ASLR)
49961 + - Prevent exploitation of most refcount overflows
49962 + - Bounds checking of copying between the kernel and userland
49963 +
49964 +config GRKERNSEC_HIGH
49965 + bool "High"
49966 + select GRKERNSEC_LINK
49967 + select GRKERNSEC_FIFO
49968 + select GRKERNSEC_DMESG
49969 + select GRKERNSEC_FORKFAIL
49970 + select GRKERNSEC_TIME
49971 + select GRKERNSEC_SIGNAL
49972 + select GRKERNSEC_CHROOT
49973 + select GRKERNSEC_CHROOT_SHMAT
49974 + select GRKERNSEC_CHROOT_UNIX
49975 + select GRKERNSEC_CHROOT_MOUNT
49976 + select GRKERNSEC_CHROOT_FCHDIR
49977 + select GRKERNSEC_CHROOT_PIVOT
49978 + select GRKERNSEC_CHROOT_DOUBLE
49979 + select GRKERNSEC_CHROOT_CHDIR
49980 + select GRKERNSEC_CHROOT_MKNOD
49981 + select GRKERNSEC_CHROOT_CAPS
49982 + select GRKERNSEC_CHROOT_SYSCTL
49983 + select GRKERNSEC_CHROOT_FINDTASK
49984 + select GRKERNSEC_SYSFS_RESTRICT
49985 + select GRKERNSEC_PROC
49986 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49987 + select GRKERNSEC_HIDESYM
49988 + select GRKERNSEC_BRUTE
49989 + select GRKERNSEC_PROC_USERGROUP
49990 + select GRKERNSEC_KMEM
49991 + select GRKERNSEC_RESLOG
49992 + select GRKERNSEC_RANDNET
49993 + select GRKERNSEC_PROC_ADD
49994 + select GRKERNSEC_CHROOT_CHMOD
49995 + select GRKERNSEC_CHROOT_NICE
49996 + select GRKERNSEC_AUDIT_MOUNT
49997 + select GRKERNSEC_MODHARDEN if (MODULES)
49998 + select GRKERNSEC_HARDEN_PTRACE
49999 + select GRKERNSEC_VM86 if (X86_32)
50000 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50001 + select PAX
50002 + select PAX_RANDUSTACK
50003 + select PAX_ASLR
50004 + select PAX_RANDMMAP
50005 + select PAX_NOEXEC
50006 + select PAX_MPROTECT
50007 + select PAX_EI_PAX
50008 + select PAX_PT_PAX_FLAGS
50009 + select PAX_HAVE_ACL_FLAGS
50010 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50011 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
50012 + select PAX_RANDKSTACK if (X86_TSC && X86)
50013 + select PAX_SEGMEXEC if (X86_32)
50014 + select PAX_PAGEEXEC
50015 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50016 + select PAX_EMUTRAMP if (PARISC)
50017 + select PAX_EMUSIGRT if (PARISC)
50018 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50019 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50020 + select PAX_REFCOUNT if (X86 || SPARC64)
50021 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50022 + help
50023 + If you say Y here, many of the features of grsecurity will be
50024 + enabled, which will protect you against many kinds of attacks
50025 + against your system. The heightened security comes at a cost
50026 + of an increased chance of incompatibilities with rare software
50027 + on your machine. Since this security level enables PaX, you should
50028 + view <http://pax.grsecurity.net> and read about the PaX
50029 + project. While you are there, download chpax and run it on
50030 + binaries that cause problems with PaX. Also remember that
50031 + since the /proc restrictions are enabled, you must run your
50032 + identd as gid 1001. This security level enables the following
50033 + features in addition to those listed in the low and medium
50034 + security levels:
50035 +
50036 + - Additional /proc restrictions
50037 + - Chmod restrictions in chroot
50038 + - No signals, ptrace, or viewing of processes outside of chroot
50039 + - Capability restrictions in chroot
50040 + - Deny fchdir out of chroot
50041 + - Priority restrictions in chroot
50042 + - Segmentation-based implementation of PaX
50043 + - Mprotect restrictions
50044 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50045 + - Kernel stack randomization
50046 + - Mount/unmount/remount logging
50047 + - Kernel symbol hiding
50048 + - Prevention of memory exhaustion-based exploits
50049 + - Hardening of module auto-loading
50050 + - Ptrace restrictions
50051 + - Restricted vm86 mode
50052 + - Restricted sysfs/debugfs
50053 + - Active kernel exploit response
50054 +
50055 +config GRKERNSEC_CUSTOM
50056 + bool "Custom"
50057 + help
50058 + If you say Y here, you will be able to configure every grsecurity
50059 + option, which allows you to enable many more features that aren't
50060 + covered in the basic security levels. These additional features
50061 + include TPE, socket restrictions, and the sysctl system for
50062 + grsecurity. It is advised that you read through the help for
50063 + each option to determine its usefulness in your situation.
50064 +
50065 +endchoice
50066 +
50067 +menu "Address Space Protection"
50068 +depends on GRKERNSEC
50069 +
50070 +config GRKERNSEC_KMEM
50071 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50072 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50073 + help
50074 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50075 + be written to via mmap or otherwise to modify the running kernel.
50076 + /dev/port will also not be allowed to be opened. If you have module
50077 + support disabled, enabling this will close up four ways that are
50078 + currently used to insert malicious code into the running kernel.
50079 + Even with all these features enabled, we still highly recommend that
50080 + you use the RBAC system, as it is still possible for an attacker to
50081 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50082 + If you are not using XFree86, you may be able to stop this additional
50083 + case by enabling the 'Disable privileged I/O' option. Though nothing
50084 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50085 + but only to video memory, which is the only writing we allow in this
50086 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50087 + not be allowed to mprotect it with PROT_WRITE later.
50088 + It is highly recommended that you say Y here if you meet all the
50089 + conditions above.
50090 +
50091 +config GRKERNSEC_VM86
50092 + bool "Restrict VM86 mode"
50093 + depends on X86_32
50094 +
50095 + help
50096 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50097 + make use of a special execution mode on 32bit x86 processors called
50098 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50099 + video cards and will still work with this option enabled. The purpose
50100 + of the option is to prevent exploitation of emulation errors in
50101 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50102 + Nearly all users should be able to enable this option.
50103 +
50104 +config GRKERNSEC_IO
50105 + bool "Disable privileged I/O"
50106 + depends on X86
50107 + select RTC_CLASS
50108 + select RTC_INTF_DEV
50109 + select RTC_DRV_CMOS
50110 +
50111 + help
50112 + If you say Y here, all ioperm and iopl calls will return an error.
50113 + Ioperm and iopl can be used to modify the running kernel.
50114 + Unfortunately, some programs need this access to operate properly,
50115 + the most notable of which are XFree86 and hwclock. hwclock can be
50116 + remedied by having RTC support in the kernel, so real-time
50117 + clock support is enabled if this option is enabled, to ensure
50118 + that hwclock operates correctly. XFree86 still will not
50119 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50120 + IF YOU USE XFree86. If you use XFree86 and you still want to
50121 + protect your kernel against modification, use the RBAC system.
50122 +
50123 +config GRKERNSEC_PROC_MEMMAP
50124 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50125 + default y if (PAX_NOEXEC || PAX_ASLR)
50126 + depends on PAX_NOEXEC || PAX_ASLR
50127 + help
50128 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50129 + give no information about the addresses of its mappings if
50130 + PaX features that rely on random addresses are enabled on the task.
50131 + If you use PaX it is greatly recommended that you say Y here as it
50132 + closes up a hole that makes the full ASLR useless for suid
50133 + binaries.
50134 +
50135 +config GRKERNSEC_BRUTE
50136 + bool "Deter exploit bruteforcing"
50137 + help
50138 + If you say Y here, attempts to bruteforce exploits against forking
50139 + daemons such as apache or sshd, as well as against suid/sgid binaries
50140 + will be deterred. When a child of a forking daemon is killed by PaX
50141 + or crashes due to an illegal instruction or other suspicious signal,
50142 + the parent process will be delayed 30 seconds upon every subsequent
50143 + fork until the administrator is able to assess the situation and
50144 + restart the daemon.
50145 + In the suid/sgid case, the attempt is logged, the user has all their
50146 + processes terminated, and they are prevented from executing any further
50147 + processes for 15 minutes.
50148 + It is recommended that you also enable signal logging in the auditing
50149 + section so that logs are generated when a process triggers a suspicious
50150 + signal.
50151 + If the sysctl option is enabled, a sysctl option with name
50152 + "deter_bruteforce" is created.
50153 +
50154 +
50155 +config GRKERNSEC_MODHARDEN
50156 + bool "Harden module auto-loading"
50157 + depends on MODULES
50158 + help
50159 + If you say Y here, module auto-loading in response to use of some
50160 + feature implemented by an unloaded module will be restricted to
50161 + root users. Enabling this option helps defend against attacks
50162 + by unprivileged users who abuse the auto-loading behavior to
50163 + cause a vulnerable module to load that is then exploited.
50164 +
50165 + If this option prevents a legitimate use of auto-loading for a
50166 + non-root user, the administrator can execute modprobe manually
50167 + with the exact name of the module mentioned in the alert log.
50168 + Alternatively, the administrator can add the module to the list
50169 + of modules loaded at boot by modifying init scripts.
50170 +
50171 + Modification of init scripts will most likely be needed on
50172 + Ubuntu servers with encrypted home directory support enabled,
50173 + as the first non-root user logging in will cause the ecb(aes),
50174 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50175 +
50176 +config GRKERNSEC_HIDESYM
50177 + bool "Hide kernel symbols"
50178 + help
50179 + If you say Y here, getting information on loaded modules, and
50180 + displaying all kernel symbols through a syscall will be restricted
50181 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50182 + /proc/kallsyms will be restricted to the root user. The RBAC
50183 + system can hide that entry even from root.
50184 +
50185 + This option also prevents leaking of kernel addresses through
50186 + several /proc entries.
50187 +
50188 + Note that this option is only effective provided the following
50189 + conditions are met:
50190 + 1) The kernel using grsecurity is not precompiled by some distribution
50191 + 2) You have also enabled GRKERNSEC_DMESG
50192 + 3) You are using the RBAC system and hiding other files such as your
50193 + kernel image and System.map. Alternatively, enabling this option
50194 + causes the permissions on /boot, /lib/modules, and the kernel
50195 + source directory to change at compile time to prevent
50196 + reading by non-root users.
50197 + If the above conditions are met, this option will aid in providing a
50198 + useful protection against local kernel exploitation of overflows
50199 + and arbitrary read/write vulnerabilities.
50200 +
50201 +config GRKERNSEC_KERN_LOCKOUT
50202 + bool "Active kernel exploit response"
50203 + depends on X86 || ARM || PPC || SPARC
50204 + help
50205 + If you say Y here, when a PaX alert is triggered due to suspicious
50206 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50207 + or an OOPs occurs due to bad memory accesses, instead of just
50208 + terminating the offending process (and potentially allowing
50209 + a subsequent exploit from the same user), we will take one of two
50210 + actions:
50211 + If the user was root, we will panic the system
50212 + If the user was non-root, we will log the attempt, terminate
50213 + all processes owned by the user, then prevent them from creating
50214 + any new processes until the system is restarted
50215 + This deters repeated kernel exploitation/bruteforcing attempts
50216 + and is useful for later forensics.
50217 +
50218 +endmenu
50219 +menu "Role Based Access Control Options"
50220 +depends on GRKERNSEC
50221 +
50222 +config GRKERNSEC_RBAC_DEBUG
50223 + bool
50224 +
50225 +config GRKERNSEC_NO_RBAC
50226 + bool "Disable RBAC system"
50227 + help
50228 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50229 + preventing the RBAC system from being enabled. You should only say Y
50230 + here if you have no intention of using the RBAC system, so as to prevent
50231 + an attacker with root access from misusing the RBAC system to hide files
50232 + and processes when loadable module support and /dev/[k]mem have been
50233 + locked down.
50234 +
50235 +config GRKERNSEC_ACL_HIDEKERN
50236 + bool "Hide kernel processes"
50237 + help
50238 + If you say Y here, all kernel threads will be hidden to all
50239 + processes but those whose subject has the "view hidden processes"
50240 + flag.
50241 +
50242 +config GRKERNSEC_ACL_MAXTRIES
50243 + int "Maximum tries before password lockout"
50244 + default 3
50245 + help
50246 + This option enforces the maximum number of times a user can attempt
50247 + to authorize themselves with the grsecurity RBAC system before being
50248 + denied the ability to attempt authorization again for a specified time.
50249 + The lower the number, the harder it will be to brute-force a password.
50250 +
50251 +config GRKERNSEC_ACL_TIMEOUT
50252 + int "Time to wait after max password tries, in seconds"
50253 + default 30
50254 + help
50255 + This option specifies the time the user must wait after attempting to
50256 + authorize to the RBAC system with the maximum number of invalid
50257 + passwords. The higher the number, the harder it will be to brute-force
50258 + a password.
50259 +
50260 +endmenu
50261 +menu "Filesystem Protections"
50262 +depends on GRKERNSEC
50263 +
50264 +config GRKERNSEC_PROC
50265 + bool "Proc restrictions"
50266 + help
50267 + If you say Y here, the permissions of the /proc filesystem
50268 + will be altered to enhance system security and privacy. You MUST
50269 + choose either a user only restriction or a user and group restriction.
50270 + Depending upon the option you choose, you can either restrict users to
50271 + see only the processes they themselves run, or choose a group that can
50272 + view all processes and files normally restricted to root if you choose
50273 + the "restrict to user only" option. NOTE: If you're running identd as
50274 + a non-root user, you will have to run it as the group you specify here.
50275 +
50276 +config GRKERNSEC_PROC_USER
50277 + bool "Restrict /proc to user only"
50278 + depends on GRKERNSEC_PROC
50279 + help
50280 + If you say Y here, non-root users will only be able to view their own
50281 + processes, and restricts them from viewing network-related information,
50282 + and viewing kernel symbol and module information.
50283 +
50284 +config GRKERNSEC_PROC_USERGROUP
50285 + bool "Allow special group"
50286 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50287 + help
50288 + If you say Y here, you will be able to select a group that will be
50289 + able to view all processes and network-related information. If you've
50290 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50291 + remain hidden. This option is useful if you want to run identd as
50292 + a non-root user.
50293 +
50294 +config GRKERNSEC_PROC_GID
50295 + int "GID for special group"
50296 + depends on GRKERNSEC_PROC_USERGROUP
50297 + default 1001
50298 +
50299 +config GRKERNSEC_PROC_ADD
50300 + bool "Additional restrictions"
50301 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50302 + help
50303 + If you say Y here, additional restrictions will be placed on
50304 + /proc that keep normal users from viewing device information and
50305 + slabinfo information that could be useful for exploits.
50306 +
50307 +config GRKERNSEC_LINK
50308 + bool "Linking restrictions"
50309 + help
50310 + If you say Y here, /tmp race exploits will be prevented, since users
50311 + will no longer be able to follow symlinks owned by other users in
50312 + world-writable +t directories (e.g. /tmp), unless the owner of the
50313 + symlink is the owner of the directory. users will also not be
50314 + able to hardlink to files they do not own. If the sysctl option is
50315 + enabled, a sysctl option with name "linking_restrictions" is created.
50316 +
50317 +config GRKERNSEC_FIFO
50318 + bool "FIFO restrictions"
50319 + help
50320 + If you say Y here, users will not be able to write to FIFOs they don't
50321 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50322 + the FIFO is the same owner of the directory it's held in. If the sysctl
50323 + option is enabled, a sysctl option with name "fifo_restrictions" is
50324 + created.
50325 +
50326 +config GRKERNSEC_SYSFS_RESTRICT
50327 + bool "Sysfs/debugfs restriction"
50328 + depends on SYSFS
50329 + help
50330 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50331 + any filesystem normally mounted under it (e.g. debugfs) will only
50332 + be accessible by root. These filesystems generally provide access
50333 + to hardware and debug information that isn't appropriate for unprivileged
50334 + users of the system. Sysfs and debugfs have also become a large source
50335 + of new vulnerabilities, ranging from infoleaks to local compromise.
50336 + There has been very little oversight with an eye toward security involved
50337 + in adding new exporters of information to these filesystems, so their
50338 + use is discouraged.
50339 + This option is equivalent to a chmod 0700 of the mount paths.
50340 +
50341 +config GRKERNSEC_ROFS
50342 + bool "Runtime read-only mount protection"
50343 + help
50344 + If you say Y here, a sysctl option with name "romount_protect" will
50345 + be created. By setting this option to 1 at runtime, filesystems
50346 + will be protected in the following ways:
50347 + * No new writable mounts will be allowed
50348 + * Existing read-only mounts won't be able to be remounted read/write
50349 + * Write operations will be denied on all block devices
50350 + This option acts independently of grsec_lock: once it is set to 1,
50351 + it cannot be turned off. Therefore, please be mindful of the resulting
50352 + behavior if this option is enabled in an init script on a read-only
50353 + filesystem. This feature is mainly intended for secure embedded systems.
50354 +
50355 +config GRKERNSEC_CHROOT
50356 + bool "Chroot jail restrictions"
50357 + help
50358 + If you say Y here, you will be able to choose several options that will
50359 + make breaking out of a chrooted jail much more difficult. If you
50360 + encounter no software incompatibilities with the following options, it
50361 + is recommended that you enable each one.
50362 +
50363 +config GRKERNSEC_CHROOT_MOUNT
50364 + bool "Deny mounts"
50365 + depends on GRKERNSEC_CHROOT
50366 + help
50367 + If you say Y here, processes inside a chroot will not be able to
50368 + mount or remount filesystems. If the sysctl option is enabled, a
50369 + sysctl option with name "chroot_deny_mount" is created.
50370 +
50371 +config GRKERNSEC_CHROOT_DOUBLE
50372 + bool "Deny double-chroots"
50373 + depends on GRKERNSEC_CHROOT
50374 + help
50375 + If you say Y here, processes inside a chroot will not be able to chroot
50376 + again outside the chroot. This is a widely used method of breaking
50377 + out of a chroot jail and should not be allowed. If the sysctl
50378 + option is enabled, a sysctl option with name
50379 + "chroot_deny_chroot" is created.
50380 +
50381 +config GRKERNSEC_CHROOT_PIVOT
50382 + bool "Deny pivot_root in chroot"
50383 + depends on GRKERNSEC_CHROOT
50384 + help
50385 + If you say Y here, processes inside a chroot will not be able to use
50386 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50387 + works similar to chroot in that it changes the root filesystem. This
50388 + function could be misused in a chrooted process to attempt to break out
50389 + of the chroot, and therefore should not be allowed. If the sysctl
50390 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50391 + created.
50392 +
50393 +config GRKERNSEC_CHROOT_CHDIR
50394 + bool "Enforce chdir(\"/\") on all chroots"
50395 + depends on GRKERNSEC_CHROOT
50396 + help
50397 + If you say Y here, the current working directory of all newly-chrooted
50398 + applications will be set to the the root directory of the chroot.
50399 + The man page on chroot(2) states:
50400 + Note that this call does not change the current working
50401 + directory, so that `.' can be outside the tree rooted at
50402 + `/'. In particular, the super-user can escape from a
50403 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50404 +
50405 + It is recommended that you say Y here, since it's not known to break
50406 + any software. If the sysctl option is enabled, a sysctl option with
50407 + name "chroot_enforce_chdir" is created.
50408 +
50409 +config GRKERNSEC_CHROOT_CHMOD
50410 + bool "Deny (f)chmod +s"
50411 + depends on GRKERNSEC_CHROOT
50412 + help
50413 + If you say Y here, processes inside a chroot will not be able to chmod
50414 + or fchmod files to make them have suid or sgid bits. This protects
50415 + against another published method of breaking a chroot. If the sysctl
50416 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50417 + created.
50418 +
50419 +config GRKERNSEC_CHROOT_FCHDIR
50420 + bool "Deny fchdir out of chroot"
50421 + depends on GRKERNSEC_CHROOT
50422 + help
50423 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50424 + to a file descriptor of the chrooting process that points to a directory
50425 + outside the filesystem will be stopped. If the sysctl option
50426 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50427 +
50428 +config GRKERNSEC_CHROOT_MKNOD
50429 + bool "Deny mknod"
50430 + depends on GRKERNSEC_CHROOT
50431 + help
50432 + If you say Y here, processes inside a chroot will not be allowed to
50433 + mknod. The problem with using mknod inside a chroot is that it
50434 + would allow an attacker to create a device entry that is the same
50435 + as one on the physical root of your system, which could range from
50436 + anything from the console device to a device for your harddrive (which
50437 + they could then use to wipe the drive or steal data). It is recommended
50438 + that you say Y here, unless you run into software incompatibilities.
50439 + If the sysctl option is enabled, a sysctl option with name
50440 + "chroot_deny_mknod" is created.
50441 +
50442 +config GRKERNSEC_CHROOT_SHMAT
50443 + bool "Deny shmat() out of chroot"
50444 + depends on GRKERNSEC_CHROOT
50445 + help
50446 + If you say Y here, processes inside a chroot will not be able to attach
50447 + to shared memory segments that were created outside of the chroot jail.
50448 + It is recommended that you say Y here. If the sysctl option is enabled,
50449 + a sysctl option with name "chroot_deny_shmat" is created.
50450 +
50451 +config GRKERNSEC_CHROOT_UNIX
50452 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50453 + depends on GRKERNSEC_CHROOT
50454 + help
50455 + If you say Y here, processes inside a chroot will not be able to
50456 + connect to abstract (meaning not belonging to a filesystem) Unix
50457 + domain sockets that were bound outside of a chroot. It is recommended
50458 + that you say Y here. If the sysctl option is enabled, a sysctl option
50459 + with name "chroot_deny_unix" is created.
50460 +
50461 +config GRKERNSEC_CHROOT_FINDTASK
50462 + bool "Protect outside processes"
50463 + depends on GRKERNSEC_CHROOT
50464 + help
50465 + If you say Y here, processes inside a chroot will not be able to
50466 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50467 + getsid, or view any process outside of the chroot. If the sysctl
50468 + option is enabled, a sysctl option with name "chroot_findtask" is
50469 + created.
50470 +
50471 +config GRKERNSEC_CHROOT_NICE
50472 + bool "Restrict priority changes"
50473 + depends on GRKERNSEC_CHROOT
50474 + help
50475 + If you say Y here, processes inside a chroot will not be able to raise
50476 + the priority of processes in the chroot, or alter the priority of
50477 + processes outside the chroot. This provides more security than simply
50478 + removing CAP_SYS_NICE from the process' capability set. If the
50479 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50480 + is created.
50481 +
50482 +config GRKERNSEC_CHROOT_SYSCTL
50483 + bool "Deny sysctl writes"
50484 + depends on GRKERNSEC_CHROOT
50485 + help
50486 + If you say Y here, an attacker in a chroot will not be able to
50487 + write to sysctl entries, either by sysctl(2) or through a /proc
50488 + interface. It is strongly recommended that you say Y here. If the
50489 + sysctl option is enabled, a sysctl option with name
50490 + "chroot_deny_sysctl" is created.
50491 +
50492 +config GRKERNSEC_CHROOT_CAPS
50493 + bool "Capability restrictions"
50494 + depends on GRKERNSEC_CHROOT
50495 + help
50496 + If you say Y here, the capabilities on all root processes within a
50497 + chroot jail will be lowered to stop module insertion, raw i/o,
50498 + system and net admin tasks, rebooting the system, modifying immutable
50499 + files, modifying IPC owned by another, and changing the system time.
50500 + This is left an option because it can break some apps. Disable this
50501 + if your chrooted apps are having problems performing those kinds of
50502 + tasks. If the sysctl option is enabled, a sysctl option with
50503 + name "chroot_caps" is created.
50504 +
50505 +endmenu
50506 +menu "Kernel Auditing"
50507 +depends on GRKERNSEC
50508 +
50509 +config GRKERNSEC_AUDIT_GROUP
50510 + bool "Single group for auditing"
50511 + help
50512 + If you say Y here, the exec, chdir, and (un)mount logging features
50513 + will only operate on a group you specify. This option is recommended
50514 + if you only want to watch certain users instead of having a large
50515 + amount of logs from the entire system. If the sysctl option is enabled,
50516 + a sysctl option with name "audit_group" is created.
50517 +
50518 +config GRKERNSEC_AUDIT_GID
50519 + int "GID for auditing"
50520 + depends on GRKERNSEC_AUDIT_GROUP
50521 + default 1007
50522 +
50523 +config GRKERNSEC_EXECLOG
50524 + bool "Exec logging"
50525 + help
50526 + If you say Y here, all execve() calls will be logged (since the
50527 + other exec*() calls are frontends to execve(), all execution
50528 + will be logged). Useful for shell-servers that like to keep track
50529 + of their users. If the sysctl option is enabled, a sysctl option with
50530 + name "exec_logging" is created.
50531 + WARNING: This option when enabled will produce a LOT of logs, especially
50532 + on an active system.
50533 +
50534 +config GRKERNSEC_RESLOG
50535 + bool "Resource logging"
50536 + help
50537 + If you say Y here, all attempts to overstep resource limits will
50538 + be logged with the resource name, the requested size, and the current
50539 + limit. It is highly recommended that you say Y here. If the sysctl
50540 + option is enabled, a sysctl option with name "resource_logging" is
50541 + created. If the RBAC system is enabled, the sysctl value is ignored.
50542 +
50543 +config GRKERNSEC_CHROOT_EXECLOG
50544 + bool "Log execs within chroot"
50545 + help
50546 + If you say Y here, all executions inside a chroot jail will be logged
50547 + to syslog. This can cause a large amount of logs if certain
50548 + applications (eg. djb's daemontools) are installed on the system, and
50549 + is therefore left as an option. If the sysctl option is enabled, a
50550 + sysctl option with name "chroot_execlog" is created.
50551 +
50552 +config GRKERNSEC_AUDIT_PTRACE
50553 + bool "Ptrace logging"
50554 + help
50555 + If you say Y here, all attempts to attach to a process via ptrace
50556 + will be logged. If the sysctl option is enabled, a sysctl option
50557 + with name "audit_ptrace" is created.
50558 +
50559 +config GRKERNSEC_AUDIT_CHDIR
50560 + bool "Chdir logging"
50561 + help
50562 + If you say Y here, all chdir() calls will be logged. If the sysctl
50563 + option is enabled, a sysctl option with name "audit_chdir" is created.
50564 +
50565 +config GRKERNSEC_AUDIT_MOUNT
50566 + bool "(Un)Mount logging"
50567 + help
50568 + If you say Y here, all mounts and unmounts will be logged. If the
50569 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50570 + created.
50571 +
50572 +config GRKERNSEC_SIGNAL
50573 + bool "Signal logging"
50574 + help
50575 + If you say Y here, certain important signals will be logged, such as
50576 + SIGSEGV, which will as a result inform you of when a error in a program
50577 + occurred, which in some cases could mean a possible exploit attempt.
50578 + If the sysctl option is enabled, a sysctl option with name
50579 + "signal_logging" is created.
50580 +
50581 +config GRKERNSEC_FORKFAIL
50582 + bool "Fork failure logging"
50583 + help
50584 + If you say Y here, all failed fork() attempts will be logged.
50585 + This could suggest a fork bomb, or someone attempting to overstep
50586 + their process limit. If the sysctl option is enabled, a sysctl option
50587 + with name "forkfail_logging" is created.
50588 +
50589 +config GRKERNSEC_TIME
50590 + bool "Time change logging"
50591 + help
50592 + If you say Y here, any changes of the system clock will be logged.
50593 + If the sysctl option is enabled, a sysctl option with name
50594 + "timechange_logging" is created.
50595 +
50596 +config GRKERNSEC_PROC_IPADDR
50597 + bool "/proc/<pid>/ipaddr support"
50598 + help
50599 + If you say Y here, a new entry will be added to each /proc/<pid>
50600 + directory that contains the IP address of the person using the task.
50601 + The IP is carried across local TCP and AF_UNIX stream sockets.
50602 + This information can be useful for IDS/IPSes to perform remote response
50603 + to a local attack. The entry is readable by only the owner of the
50604 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50605 + the RBAC system), and thus does not create privacy concerns.
50606 +
50607 +config GRKERNSEC_RWXMAP_LOG
50608 + bool 'Denied RWX mmap/mprotect logging'
50609 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50610 + help
50611 + If you say Y here, calls to mmap() and mprotect() with explicit
50612 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50613 + denied by the PAX_MPROTECT feature. If the sysctl option is
50614 + enabled, a sysctl option with name "rwxmap_logging" is created.
50615 +
50616 +config GRKERNSEC_AUDIT_TEXTREL
50617 + bool 'ELF text relocations logging (READ HELP)'
50618 + depends on PAX_MPROTECT
50619 + help
50620 + If you say Y here, text relocations will be logged with the filename
50621 + of the offending library or binary. The purpose of the feature is
50622 + to help Linux distribution developers get rid of libraries and
50623 + binaries that need text relocations which hinder the future progress
50624 + of PaX. Only Linux distribution developers should say Y here, and
50625 + never on a production machine, as this option creates an information
50626 + leak that could aid an attacker in defeating the randomization of
50627 + a single memory region. If the sysctl option is enabled, a sysctl
50628 + option with name "audit_textrel" is created.
50629 +
50630 +endmenu
50631 +
50632 +menu "Executable Protections"
50633 +depends on GRKERNSEC
50634 +
50635 +config GRKERNSEC_DMESG
50636 + bool "Dmesg(8) restriction"
50637 + help
50638 + If you say Y here, non-root users will not be able to use dmesg(8)
50639 + to view up to the last 4kb of messages in the kernel's log buffer.
50640 + The kernel's log buffer often contains kernel addresses and other
50641 + identifying information useful to an attacker in fingerprinting a
50642 + system for a targeted exploit.
50643 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50644 + created.
50645 +
50646 +config GRKERNSEC_HARDEN_PTRACE
50647 + bool "Deter ptrace-based process snooping"
50648 + help
50649 + If you say Y here, TTY sniffers and other malicious monitoring
50650 + programs implemented through ptrace will be defeated. If you
50651 + have been using the RBAC system, this option has already been
50652 + enabled for several years for all users, with the ability to make
50653 + fine-grained exceptions.
50654 +
50655 + This option only affects the ability of non-root users to ptrace
50656 + processes that are not a descendent of the ptracing process.
50657 + This means that strace ./binary and gdb ./binary will still work,
50658 + but attaching to arbitrary processes will not. If the sysctl
50659 + option is enabled, a sysctl option with name "harden_ptrace" is
50660 + created.
50661 +
50662 +config GRKERNSEC_TPE
50663 + bool "Trusted Path Execution (TPE)"
50664 + help
50665 + If you say Y here, you will be able to choose a gid to add to the
50666 + supplementary groups of users you want to mark as "untrusted."
50667 + These users will not be able to execute any files that are not in
50668 + root-owned directories writable only by root. If the sysctl option
50669 + is enabled, a sysctl option with name "tpe" is created.
50670 +
50671 +config GRKERNSEC_TPE_ALL
50672 + bool "Partially restrict all non-root users"
50673 + depends on GRKERNSEC_TPE
50674 + help
50675 + If you say Y here, all non-root users will be covered under
50676 + a weaker TPE restriction. This is separate from, and in addition to,
50677 + the main TPE options that you have selected elsewhere. Thus, if a
50678 + "trusted" GID is chosen, this restriction applies to even that GID.
50679 + Under this restriction, all non-root users will only be allowed to
50680 + execute files in directories they own that are not group or
50681 + world-writable, or in directories owned by root and writable only by
50682 + root. If the sysctl option is enabled, a sysctl option with name
50683 + "tpe_restrict_all" is created.
50684 +
50685 +config GRKERNSEC_TPE_INVERT
50686 + bool "Invert GID option"
50687 + depends on GRKERNSEC_TPE
50688 + help
50689 + If you say Y here, the group you specify in the TPE configuration will
50690 + decide what group TPE restrictions will be *disabled* for. This
50691 + option is useful if you want TPE restrictions to be applied to most
50692 + users on the system. If the sysctl option is enabled, a sysctl option
50693 + with name "tpe_invert" is created. Unlike other sysctl options, this
50694 + entry will default to on for backward-compatibility.
50695 +
50696 +config GRKERNSEC_TPE_GID
50697 + int "GID for untrusted users"
50698 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50699 + default 1005
50700 + help
50701 + Setting this GID determines what group TPE restrictions will be
50702 + *enabled* for. If the sysctl option is enabled, a sysctl option
50703 + with name "tpe_gid" is created.
50704 +
50705 +config GRKERNSEC_TPE_GID
50706 + int "GID for trusted users"
50707 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50708 + default 1005
50709 + help
50710 + Setting this GID determines what group TPE restrictions will be
50711 + *disabled* for. If the sysctl option is enabled, a sysctl option
50712 + with name "tpe_gid" is created.
50713 +
50714 +endmenu
50715 +menu "Network Protections"
50716 +depends on GRKERNSEC
50717 +
50718 +config GRKERNSEC_RANDNET
50719 + bool "Larger entropy pools"
50720 + help
50721 + If you say Y here, the entropy pools used for many features of Linux
50722 + and grsecurity will be doubled in size. Since several grsecurity
50723 + features use additional randomness, it is recommended that you say Y
50724 + here. Saying Y here has a similar effect as modifying
50725 + /proc/sys/kernel/random/poolsize.
50726 +
50727 +config GRKERNSEC_BLACKHOLE
50728 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50729 + depends on NET
50730 + help
50731 + If you say Y here, neither TCP resets nor ICMP
50732 + destination-unreachable packets will be sent in response to packets
50733 + sent to ports for which no associated listening process exists.
50734 + This feature supports both IPV4 and IPV6 and exempts the
50735 + loopback interface from blackholing. Enabling this feature
50736 + makes a host more resilient to DoS attacks and reduces network
50737 + visibility against scanners.
50738 +
50739 + The blackhole feature as-implemented is equivalent to the FreeBSD
50740 + blackhole feature, as it prevents RST responses to all packets, not
50741 + just SYNs. Under most application behavior this causes no
50742 + problems, but applications (like haproxy) may not close certain
50743 + connections in a way that cleanly terminates them on the remote
50744 + end, leaving the remote host in LAST_ACK state. Because of this
50745 + side-effect and to prevent intentional LAST_ACK DoSes, this
50746 + feature also adds automatic mitigation against such attacks.
50747 + The mitigation drastically reduces the amount of time a socket
50748 + can spend in LAST_ACK state. If you're using haproxy and not
50749 + all servers it connects to have this option enabled, consider
50750 + disabling this feature on the haproxy host.
50751 +
50752 + If the sysctl option is enabled, two sysctl options with names
50753 + "ip_blackhole" and "lastack_retries" will be created.
50754 + While "ip_blackhole" takes the standard zero/non-zero on/off
50755 + toggle, "lastack_retries" uses the same kinds of values as
50756 + "tcp_retries1" and "tcp_retries2". The default value of 4
50757 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50758 + state.
50759 +
50760 +config GRKERNSEC_SOCKET
50761 + bool "Socket restrictions"
50762 + depends on NET
50763 + help
50764 + If you say Y here, you will be able to choose from several options.
50765 + If you assign a GID on your system and add it to the supplementary
50766 + groups of users you want to restrict socket access to, this patch
50767 + will perform up to three things, based on the option(s) you choose.
50768 +
50769 +config GRKERNSEC_SOCKET_ALL
50770 + bool "Deny any sockets to group"
50771 + depends on GRKERNSEC_SOCKET
50772 + help
50773 + If you say Y here, you will be able to choose a GID of whose users will
50774 + be unable to connect to other hosts from your machine or run server
50775 + applications from your machine. If the sysctl option is enabled, a
50776 + sysctl option with name "socket_all" is created.
50777 +
50778 +config GRKERNSEC_SOCKET_ALL_GID
50779 + int "GID to deny all sockets for"
50780 + depends on GRKERNSEC_SOCKET_ALL
50781 + default 1004
50782 + help
50783 + Here you can choose the GID to disable socket access for. Remember to
50784 + add the users you want socket access disabled for to the GID
50785 + specified here. If the sysctl option is enabled, a sysctl option
50786 + with name "socket_all_gid" is created.
50787 +
50788 +config GRKERNSEC_SOCKET_CLIENT
50789 + bool "Deny client sockets to group"
50790 + depends on GRKERNSEC_SOCKET
50791 + help
50792 + If you say Y here, you will be able to choose a GID of whose users will
50793 + be unable to connect to other hosts from your machine, but will be
50794 + able to run servers. If this option is enabled, all users in the group
50795 + you specify will have to use passive mode when initiating ftp transfers
50796 + from the shell on your machine. If the sysctl option is enabled, a
50797 + sysctl option with name "socket_client" is created.
50798 +
50799 +config GRKERNSEC_SOCKET_CLIENT_GID
50800 + int "GID to deny client sockets for"
50801 + depends on GRKERNSEC_SOCKET_CLIENT
50802 + default 1003
50803 + help
50804 + Here you can choose the GID to disable client socket access for.
50805 + Remember to add the users you want client socket access disabled for to
50806 + the GID specified here. If the sysctl option is enabled, a sysctl
50807 + option with name "socket_client_gid" is created.
50808 +
50809 +config GRKERNSEC_SOCKET_SERVER
50810 + bool "Deny server sockets to group"
50811 + depends on GRKERNSEC_SOCKET
50812 + help
50813 + If you say Y here, you will be able to choose a GID of whose users will
50814 + be unable to run server applications from your machine. If the sysctl
50815 + option is enabled, a sysctl option with name "socket_server" is created.
50816 +
50817 +config GRKERNSEC_SOCKET_SERVER_GID
50818 + int "GID to deny server sockets for"
50819 + depends on GRKERNSEC_SOCKET_SERVER
50820 + default 1002
50821 + help
50822 + Here you can choose the GID to disable server socket access for.
50823 + Remember to add the users you want server socket access disabled for to
50824 + the GID specified here. If the sysctl option is enabled, a sysctl
50825 + option with name "socket_server_gid" is created.
50826 +
50827 +endmenu
50828 +menu "Sysctl support"
50829 +depends on GRKERNSEC && SYSCTL
50830 +
50831 +config GRKERNSEC_SYSCTL
50832 + bool "Sysctl support"
50833 + help
50834 + If you say Y here, you will be able to change the options that
50835 + grsecurity runs with at bootup, without having to recompile your
50836 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50837 + to enable (1) or disable (0) various features. All the sysctl entries
50838 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50839 + All features enabled in the kernel configuration are disabled at boot
50840 + if you do not say Y to the "Turn on features by default" option.
50841 + All options should be set at startup, and the grsec_lock entry should
50842 + be set to a non-zero value after all the options are set.
50843 + *THIS IS EXTREMELY IMPORTANT*
50844 +
50845 +config GRKERNSEC_SYSCTL_DISTRO
50846 + bool "Extra sysctl support for distro makers (READ HELP)"
50847 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50848 + help
50849 + If you say Y here, additional sysctl options will be created
50850 + for features that affect processes running as root. Therefore,
50851 + it is critical when using this option that the grsec_lock entry be
50852 + enabled after boot. Only distros with prebuilt kernel packages
50853 + with this option enabled that can ensure grsec_lock is enabled
50854 + after boot should use this option.
50855 + *Failure to set grsec_lock after boot makes all grsec features
50856 + this option covers useless*
50857 +
50858 + Currently this option creates the following sysctl entries:
50859 + "Disable Privileged I/O": "disable_priv_io"
50860 +
50861 +config GRKERNSEC_SYSCTL_ON
50862 + bool "Turn on features by default"
50863 + depends on GRKERNSEC_SYSCTL
50864 + help
50865 + If you say Y here, instead of having all features enabled in the
50866 + kernel configuration disabled at boot time, the features will be
50867 + enabled at boot time. It is recommended you say Y here unless
50868 + there is some reason you would want all sysctl-tunable features to
50869 + be disabled by default. As mentioned elsewhere, it is important
50870 + to enable the grsec_lock entry once you have finished modifying
50871 + the sysctl entries.
50872 +
50873 +endmenu
50874 +menu "Logging Options"
50875 +depends on GRKERNSEC
50876 +
50877 +config GRKERNSEC_FLOODTIME
50878 + int "Seconds in between log messages (minimum)"
50879 + default 10
50880 + help
50881 + This option allows you to enforce the number of seconds between
50882 + grsecurity log messages. The default should be suitable for most
50883 + people, however, if you choose to change it, choose a value small enough
50884 + to allow informative logs to be produced, but large enough to
50885 + prevent flooding.
50886 +
50887 +config GRKERNSEC_FLOODBURST
50888 + int "Number of messages in a burst (maximum)"
50889 + default 4
50890 + help
50891 + This option allows you to choose the maximum number of messages allowed
50892 + within the flood time interval you chose in a separate option. The
50893 + default should be suitable for most people, however if you find that
50894 + many of your logs are being interpreted as flooding, you may want to
50895 + raise this value.
50896 +
50897 +endmenu
50898 +
50899 +endmenu
50900 diff -urNp linux-3.0.4/grsecurity/Makefile linux-3.0.4/grsecurity/Makefile
50901 --- linux-3.0.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
50902 +++ linux-3.0.4/grsecurity/Makefile 2011-08-23 21:48:14.000000000 -0400
50903 @@ -0,0 +1,34 @@
50904 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50905 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50906 +# into an RBAC system
50907 +#
50908 +# All code in this directory and various hooks inserted throughout the kernel
50909 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50910 +# under the GPL v2 or higher
50911 +
50912 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50913 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50914 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50915 +
50916 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50917 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50918 + gracl_learn.o grsec_log.o
50919 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50920 +
50921 +ifdef CONFIG_NET
50922 +obj-y += grsec_sock.o
50923 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50924 +endif
50925 +
50926 +ifndef CONFIG_GRKERNSEC
50927 +obj-y += grsec_disabled.o
50928 +endif
50929 +
50930 +ifdef CONFIG_GRKERNSEC_HIDESYM
50931 +extra-y := grsec_hidesym.o
50932 +$(obj)/grsec_hidesym.o:
50933 + @-chmod -f 500 /boot
50934 + @-chmod -f 500 /lib/modules
50935 + @-chmod -f 700 .
50936 + @echo ' grsec: protected kernel image paths'
50937 +endif
50938 diff -urNp linux-3.0.4/include/acpi/acpi_bus.h linux-3.0.4/include/acpi/acpi_bus.h
50939 --- linux-3.0.4/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
50940 +++ linux-3.0.4/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
50941 @@ -107,7 +107,7 @@ struct acpi_device_ops {
50942 acpi_op_bind bind;
50943 acpi_op_unbind unbind;
50944 acpi_op_notify notify;
50945 -};
50946 +} __no_const;
50947
50948 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
50949
50950 diff -urNp linux-3.0.4/include/asm-generic/atomic-long.h linux-3.0.4/include/asm-generic/atomic-long.h
50951 --- linux-3.0.4/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
50952 +++ linux-3.0.4/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
50953 @@ -22,6 +22,12 @@
50954
50955 typedef atomic64_t atomic_long_t;
50956
50957 +#ifdef CONFIG_PAX_REFCOUNT
50958 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
50959 +#else
50960 +typedef atomic64_t atomic_long_unchecked_t;
50961 +#endif
50962 +
50963 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
50964
50965 static inline long atomic_long_read(atomic_long_t *l)
50966 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
50967 return (long)atomic64_read(v);
50968 }
50969
50970 +#ifdef CONFIG_PAX_REFCOUNT
50971 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
50972 +{
50973 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50974 +
50975 + return (long)atomic64_read_unchecked(v);
50976 +}
50977 +#endif
50978 +
50979 static inline void atomic_long_set(atomic_long_t *l, long i)
50980 {
50981 atomic64_t *v = (atomic64_t *)l;
50982 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
50983 atomic64_set(v, i);
50984 }
50985
50986 +#ifdef CONFIG_PAX_REFCOUNT
50987 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
50988 +{
50989 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50990 +
50991 + atomic64_set_unchecked(v, i);
50992 +}
50993 +#endif
50994 +
50995 static inline void atomic_long_inc(atomic_long_t *l)
50996 {
50997 atomic64_t *v = (atomic64_t *)l;
50998 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
50999 atomic64_inc(v);
51000 }
51001
51002 +#ifdef CONFIG_PAX_REFCOUNT
51003 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51004 +{
51005 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51006 +
51007 + atomic64_inc_unchecked(v);
51008 +}
51009 +#endif
51010 +
51011 static inline void atomic_long_dec(atomic_long_t *l)
51012 {
51013 atomic64_t *v = (atomic64_t *)l;
51014 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51015 atomic64_dec(v);
51016 }
51017
51018 +#ifdef CONFIG_PAX_REFCOUNT
51019 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51020 +{
51021 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51022 +
51023 + atomic64_dec_unchecked(v);
51024 +}
51025 +#endif
51026 +
51027 static inline void atomic_long_add(long i, atomic_long_t *l)
51028 {
51029 atomic64_t *v = (atomic64_t *)l;
51030 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51031 atomic64_add(i, v);
51032 }
51033
51034 +#ifdef CONFIG_PAX_REFCOUNT
51035 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51036 +{
51037 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51038 +
51039 + atomic64_add_unchecked(i, v);
51040 +}
51041 +#endif
51042 +
51043 static inline void atomic_long_sub(long i, atomic_long_t *l)
51044 {
51045 atomic64_t *v = (atomic64_t *)l;
51046 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51047 atomic64_sub(i, v);
51048 }
51049
51050 +#ifdef CONFIG_PAX_REFCOUNT
51051 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51052 +{
51053 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51054 +
51055 + atomic64_sub_unchecked(i, v);
51056 +}
51057 +#endif
51058 +
51059 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51060 {
51061 atomic64_t *v = (atomic64_t *)l;
51062 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51063 return (long)atomic64_inc_return(v);
51064 }
51065
51066 +#ifdef CONFIG_PAX_REFCOUNT
51067 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51068 +{
51069 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51070 +
51071 + return (long)atomic64_inc_return_unchecked(v);
51072 +}
51073 +#endif
51074 +
51075 static inline long atomic_long_dec_return(atomic_long_t *l)
51076 {
51077 atomic64_t *v = (atomic64_t *)l;
51078 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51079
51080 typedef atomic_t atomic_long_t;
51081
51082 +#ifdef CONFIG_PAX_REFCOUNT
51083 +typedef atomic_unchecked_t atomic_long_unchecked_t;
51084 +#else
51085 +typedef atomic_t atomic_long_unchecked_t;
51086 +#endif
51087 +
51088 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51089 static inline long atomic_long_read(atomic_long_t *l)
51090 {
51091 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51092 return (long)atomic_read(v);
51093 }
51094
51095 +#ifdef CONFIG_PAX_REFCOUNT
51096 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51097 +{
51098 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51099 +
51100 + return (long)atomic_read_unchecked(v);
51101 +}
51102 +#endif
51103 +
51104 static inline void atomic_long_set(atomic_long_t *l, long i)
51105 {
51106 atomic_t *v = (atomic_t *)l;
51107 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51108 atomic_set(v, i);
51109 }
51110
51111 +#ifdef CONFIG_PAX_REFCOUNT
51112 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51113 +{
51114 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51115 +
51116 + atomic_set_unchecked(v, i);
51117 +}
51118 +#endif
51119 +
51120 static inline void atomic_long_inc(atomic_long_t *l)
51121 {
51122 atomic_t *v = (atomic_t *)l;
51123 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51124 atomic_inc(v);
51125 }
51126
51127 +#ifdef CONFIG_PAX_REFCOUNT
51128 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51129 +{
51130 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51131 +
51132 + atomic_inc_unchecked(v);
51133 +}
51134 +#endif
51135 +
51136 static inline void atomic_long_dec(atomic_long_t *l)
51137 {
51138 atomic_t *v = (atomic_t *)l;
51139 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51140 atomic_dec(v);
51141 }
51142
51143 +#ifdef CONFIG_PAX_REFCOUNT
51144 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51145 +{
51146 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51147 +
51148 + atomic_dec_unchecked(v);
51149 +}
51150 +#endif
51151 +
51152 static inline void atomic_long_add(long i, atomic_long_t *l)
51153 {
51154 atomic_t *v = (atomic_t *)l;
51155 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51156 atomic_add(i, v);
51157 }
51158
51159 +#ifdef CONFIG_PAX_REFCOUNT
51160 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51161 +{
51162 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51163 +
51164 + atomic_add_unchecked(i, v);
51165 +}
51166 +#endif
51167 +
51168 static inline void atomic_long_sub(long i, atomic_long_t *l)
51169 {
51170 atomic_t *v = (atomic_t *)l;
51171 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51172 atomic_sub(i, v);
51173 }
51174
51175 +#ifdef CONFIG_PAX_REFCOUNT
51176 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51177 +{
51178 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51179 +
51180 + atomic_sub_unchecked(i, v);
51181 +}
51182 +#endif
51183 +
51184 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51185 {
51186 atomic_t *v = (atomic_t *)l;
51187 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51188 return (long)atomic_inc_return(v);
51189 }
51190
51191 +#ifdef CONFIG_PAX_REFCOUNT
51192 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51193 +{
51194 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51195 +
51196 + return (long)atomic_inc_return_unchecked(v);
51197 +}
51198 +#endif
51199 +
51200 static inline long atomic_long_dec_return(atomic_long_t *l)
51201 {
51202 atomic_t *v = (atomic_t *)l;
51203 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51204
51205 #endif /* BITS_PER_LONG == 64 */
51206
51207 +#ifdef CONFIG_PAX_REFCOUNT
51208 +static inline void pax_refcount_needs_these_functions(void)
51209 +{
51210 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
51211 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51212 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51213 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51214 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51215 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51216 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51217 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51218 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51219 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51220 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51221 +
51222 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51223 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51224 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51225 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51226 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51227 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51228 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51229 +}
51230 +#else
51231 +#define atomic_read_unchecked(v) atomic_read(v)
51232 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51233 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51234 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51235 +#define atomic_inc_unchecked(v) atomic_inc(v)
51236 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51237 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51238 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51239 +#define atomic_dec_unchecked(v) atomic_dec(v)
51240 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51241 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51242 +
51243 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
51244 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51245 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51246 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51247 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51248 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51249 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51250 +#endif
51251 +
51252 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51253 diff -urNp linux-3.0.4/include/asm-generic/cache.h linux-3.0.4/include/asm-generic/cache.h
51254 --- linux-3.0.4/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
51255 +++ linux-3.0.4/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
51256 @@ -6,7 +6,7 @@
51257 * cache lines need to provide their own cache.h.
51258 */
51259
51260 -#define L1_CACHE_SHIFT 5
51261 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51262 +#define L1_CACHE_SHIFT 5UL
51263 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51264
51265 #endif /* __ASM_GENERIC_CACHE_H */
51266 diff -urNp linux-3.0.4/include/asm-generic/int-l64.h linux-3.0.4/include/asm-generic/int-l64.h
51267 --- linux-3.0.4/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
51268 +++ linux-3.0.4/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
51269 @@ -46,6 +46,8 @@ typedef unsigned int u32;
51270 typedef signed long s64;
51271 typedef unsigned long u64;
51272
51273 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51274 +
51275 #define S8_C(x) x
51276 #define U8_C(x) x ## U
51277 #define S16_C(x) x
51278 diff -urNp linux-3.0.4/include/asm-generic/int-ll64.h linux-3.0.4/include/asm-generic/int-ll64.h
51279 --- linux-3.0.4/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
51280 +++ linux-3.0.4/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
51281 @@ -51,6 +51,8 @@ typedef unsigned int u32;
51282 typedef signed long long s64;
51283 typedef unsigned long long u64;
51284
51285 +typedef unsigned long long intoverflow_t;
51286 +
51287 #define S8_C(x) x
51288 #define U8_C(x) x ## U
51289 #define S16_C(x) x
51290 diff -urNp linux-3.0.4/include/asm-generic/kmap_types.h linux-3.0.4/include/asm-generic/kmap_types.h
51291 --- linux-3.0.4/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
51292 +++ linux-3.0.4/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
51293 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51294 KMAP_D(17) KM_NMI,
51295 KMAP_D(18) KM_NMI_PTE,
51296 KMAP_D(19) KM_KDB,
51297 +KMAP_D(20) KM_CLEARPAGE,
51298 /*
51299 * Remember to update debug_kmap_atomic() when adding new kmap types!
51300 */
51301 -KMAP_D(20) KM_TYPE_NR
51302 +KMAP_D(21) KM_TYPE_NR
51303 };
51304
51305 #undef KMAP_D
51306 diff -urNp linux-3.0.4/include/asm-generic/pgtable.h linux-3.0.4/include/asm-generic/pgtable.h
51307 --- linux-3.0.4/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
51308 +++ linux-3.0.4/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
51309 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
51310 #endif /* __HAVE_ARCH_PMD_WRITE */
51311 #endif
51312
51313 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51314 +static inline unsigned long pax_open_kernel(void) { return 0; }
51315 +#endif
51316 +
51317 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51318 +static inline unsigned long pax_close_kernel(void) { return 0; }
51319 +#endif
51320 +
51321 #endif /* !__ASSEMBLY__ */
51322
51323 #endif /* _ASM_GENERIC_PGTABLE_H */
51324 diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopmd.h linux-3.0.4/include/asm-generic/pgtable-nopmd.h
51325 --- linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
51326 +++ linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
51327 @@ -1,14 +1,19 @@
51328 #ifndef _PGTABLE_NOPMD_H
51329 #define _PGTABLE_NOPMD_H
51330
51331 -#ifndef __ASSEMBLY__
51332 -
51333 #include <asm-generic/pgtable-nopud.h>
51334
51335 -struct mm_struct;
51336 -
51337 #define __PAGETABLE_PMD_FOLDED
51338
51339 +#define PMD_SHIFT PUD_SHIFT
51340 +#define PTRS_PER_PMD 1
51341 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51342 +#define PMD_MASK (~(PMD_SIZE-1))
51343 +
51344 +#ifndef __ASSEMBLY__
51345 +
51346 +struct mm_struct;
51347 +
51348 /*
51349 * Having the pmd type consist of a pud gets the size right, and allows
51350 * us to conceptually access the pud entry that this pmd is folded into
51351 @@ -16,11 +21,6 @@ struct mm_struct;
51352 */
51353 typedef struct { pud_t pud; } pmd_t;
51354
51355 -#define PMD_SHIFT PUD_SHIFT
51356 -#define PTRS_PER_PMD 1
51357 -#define PMD_SIZE (1UL << PMD_SHIFT)
51358 -#define PMD_MASK (~(PMD_SIZE-1))
51359 -
51360 /*
51361 * The "pud_xxx()" functions here are trivial for a folded two-level
51362 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51363 diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopud.h linux-3.0.4/include/asm-generic/pgtable-nopud.h
51364 --- linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
51365 +++ linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
51366 @@ -1,10 +1,15 @@
51367 #ifndef _PGTABLE_NOPUD_H
51368 #define _PGTABLE_NOPUD_H
51369
51370 -#ifndef __ASSEMBLY__
51371 -
51372 #define __PAGETABLE_PUD_FOLDED
51373
51374 +#define PUD_SHIFT PGDIR_SHIFT
51375 +#define PTRS_PER_PUD 1
51376 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51377 +#define PUD_MASK (~(PUD_SIZE-1))
51378 +
51379 +#ifndef __ASSEMBLY__
51380 +
51381 /*
51382 * Having the pud type consist of a pgd gets the size right, and allows
51383 * us to conceptually access the pgd entry that this pud is folded into
51384 @@ -12,11 +17,6 @@
51385 */
51386 typedef struct { pgd_t pgd; } pud_t;
51387
51388 -#define PUD_SHIFT PGDIR_SHIFT
51389 -#define PTRS_PER_PUD 1
51390 -#define PUD_SIZE (1UL << PUD_SHIFT)
51391 -#define PUD_MASK (~(PUD_SIZE-1))
51392 -
51393 /*
51394 * The "pgd_xxx()" functions here are trivial for a folded two-level
51395 * setup: the pud is never bad, and a pud always exists (as it's folded
51396 diff -urNp linux-3.0.4/include/asm-generic/vmlinux.lds.h linux-3.0.4/include/asm-generic/vmlinux.lds.h
51397 --- linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
51398 +++ linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
51399 @@ -217,6 +217,7 @@
51400 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51401 VMLINUX_SYMBOL(__start_rodata) = .; \
51402 *(.rodata) *(.rodata.*) \
51403 + *(.data..read_only) \
51404 *(__vermagic) /* Kernel version magic */ \
51405 . = ALIGN(8); \
51406 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51407 @@ -723,17 +724,18 @@
51408 * section in the linker script will go there too. @phdr should have
51409 * a leading colon.
51410 *
51411 - * Note that this macros defines __per_cpu_load as an absolute symbol.
51412 + * Note that this macros defines per_cpu_load as an absolute symbol.
51413 * If there is no need to put the percpu section at a predetermined
51414 * address, use PERCPU_SECTION.
51415 */
51416 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51417 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
51418 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51419 + per_cpu_load = .; \
51420 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51421 - LOAD_OFFSET) { \
51422 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51423 PERCPU_INPUT(cacheline) \
51424 } phdr \
51425 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51426 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51427
51428 /**
51429 * PERCPU_SECTION - define output section for percpu area, simple version
51430 diff -urNp linux-3.0.4/include/drm/drm_crtc_helper.h linux-3.0.4/include/drm/drm_crtc_helper.h
51431 --- linux-3.0.4/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
51432 +++ linux-3.0.4/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
51433 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51434
51435 /* disable crtc when not in use - more explicit than dpms off */
51436 void (*disable)(struct drm_crtc *crtc);
51437 -};
51438 +} __no_const;
51439
51440 struct drm_encoder_helper_funcs {
51441 void (*dpms)(struct drm_encoder *encoder, int mode);
51442 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51443 struct drm_connector *connector);
51444 /* disable encoder when not in use - more explicit than dpms off */
51445 void (*disable)(struct drm_encoder *encoder);
51446 -};
51447 +} __no_const;
51448
51449 struct drm_connector_helper_funcs {
51450 int (*get_modes)(struct drm_connector *connector);
51451 diff -urNp linux-3.0.4/include/drm/drmP.h linux-3.0.4/include/drm/drmP.h
51452 --- linux-3.0.4/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
51453 +++ linux-3.0.4/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
51454 @@ -73,6 +73,7 @@
51455 #include <linux/workqueue.h>
51456 #include <linux/poll.h>
51457 #include <asm/pgalloc.h>
51458 +#include <asm/local.h>
51459 #include "drm.h"
51460
51461 #include <linux/idr.h>
51462 @@ -1033,7 +1034,7 @@ struct drm_device {
51463
51464 /** \name Usage Counters */
51465 /*@{ */
51466 - int open_count; /**< Outstanding files open */
51467 + local_t open_count; /**< Outstanding files open */
51468 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51469 atomic_t vma_count; /**< Outstanding vma areas open */
51470 int buf_use; /**< Buffers in use -- cannot alloc */
51471 @@ -1044,7 +1045,7 @@ struct drm_device {
51472 /*@{ */
51473 unsigned long counters;
51474 enum drm_stat_type types[15];
51475 - atomic_t counts[15];
51476 + atomic_unchecked_t counts[15];
51477 /*@} */
51478
51479 struct list_head filelist;
51480 diff -urNp linux-3.0.4/include/drm/ttm/ttm_memory.h linux-3.0.4/include/drm/ttm/ttm_memory.h
51481 --- linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
51482 +++ linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
51483 @@ -47,7 +47,7 @@
51484
51485 struct ttm_mem_shrink {
51486 int (*do_shrink) (struct ttm_mem_shrink *);
51487 -};
51488 +} __no_const;
51489
51490 /**
51491 * struct ttm_mem_global - Global memory accounting structure.
51492 diff -urNp linux-3.0.4/include/linux/a.out.h linux-3.0.4/include/linux/a.out.h
51493 --- linux-3.0.4/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
51494 +++ linux-3.0.4/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
51495 @@ -39,6 +39,14 @@ enum machine_type {
51496 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51497 };
51498
51499 +/* Constants for the N_FLAGS field */
51500 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51501 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51502 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51503 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51504 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51505 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51506 +
51507 #if !defined (N_MAGIC)
51508 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51509 #endif
51510 diff -urNp linux-3.0.4/include/linux/atmdev.h linux-3.0.4/include/linux/atmdev.h
51511 --- linux-3.0.4/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
51512 +++ linux-3.0.4/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
51513 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51514 #endif
51515
51516 struct k_atm_aal_stats {
51517 -#define __HANDLE_ITEM(i) atomic_t i
51518 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
51519 __AAL_STAT_ITEMS
51520 #undef __HANDLE_ITEM
51521 };
51522 diff -urNp linux-3.0.4/include/linux/binfmts.h linux-3.0.4/include/linux/binfmts.h
51523 --- linux-3.0.4/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
51524 +++ linux-3.0.4/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
51525 @@ -88,6 +88,7 @@ struct linux_binfmt {
51526 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51527 int (*load_shlib)(struct file *);
51528 int (*core_dump)(struct coredump_params *cprm);
51529 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51530 unsigned long min_coredump; /* minimal dump size */
51531 };
51532
51533 diff -urNp linux-3.0.4/include/linux/blkdev.h linux-3.0.4/include/linux/blkdev.h
51534 --- linux-3.0.4/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
51535 +++ linux-3.0.4/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
51536 @@ -1308,7 +1308,7 @@ struct block_device_operations {
51537 /* this callback is with swap_lock and sometimes page table lock held */
51538 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51539 struct module *owner;
51540 -};
51541 +} __do_const;
51542
51543 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51544 unsigned long);
51545 diff -urNp linux-3.0.4/include/linux/blktrace_api.h linux-3.0.4/include/linux/blktrace_api.h
51546 --- linux-3.0.4/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
51547 +++ linux-3.0.4/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
51548 @@ -161,7 +161,7 @@ struct blk_trace {
51549 struct dentry *dir;
51550 struct dentry *dropped_file;
51551 struct dentry *msg_file;
51552 - atomic_t dropped;
51553 + atomic_unchecked_t dropped;
51554 };
51555
51556 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51557 diff -urNp linux-3.0.4/include/linux/byteorder/little_endian.h linux-3.0.4/include/linux/byteorder/little_endian.h
51558 --- linux-3.0.4/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
51559 +++ linux-3.0.4/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
51560 @@ -42,51 +42,51 @@
51561
51562 static inline __le64 __cpu_to_le64p(const __u64 *p)
51563 {
51564 - return (__force __le64)*p;
51565 + return (__force const __le64)*p;
51566 }
51567 static inline __u64 __le64_to_cpup(const __le64 *p)
51568 {
51569 - return (__force __u64)*p;
51570 + return (__force const __u64)*p;
51571 }
51572 static inline __le32 __cpu_to_le32p(const __u32 *p)
51573 {
51574 - return (__force __le32)*p;
51575 + return (__force const __le32)*p;
51576 }
51577 static inline __u32 __le32_to_cpup(const __le32 *p)
51578 {
51579 - return (__force __u32)*p;
51580 + return (__force const __u32)*p;
51581 }
51582 static inline __le16 __cpu_to_le16p(const __u16 *p)
51583 {
51584 - return (__force __le16)*p;
51585 + return (__force const __le16)*p;
51586 }
51587 static inline __u16 __le16_to_cpup(const __le16 *p)
51588 {
51589 - return (__force __u16)*p;
51590 + return (__force const __u16)*p;
51591 }
51592 static inline __be64 __cpu_to_be64p(const __u64 *p)
51593 {
51594 - return (__force __be64)__swab64p(p);
51595 + return (__force const __be64)__swab64p(p);
51596 }
51597 static inline __u64 __be64_to_cpup(const __be64 *p)
51598 {
51599 - return __swab64p((__u64 *)p);
51600 + return __swab64p((const __u64 *)p);
51601 }
51602 static inline __be32 __cpu_to_be32p(const __u32 *p)
51603 {
51604 - return (__force __be32)__swab32p(p);
51605 + return (__force const __be32)__swab32p(p);
51606 }
51607 static inline __u32 __be32_to_cpup(const __be32 *p)
51608 {
51609 - return __swab32p((__u32 *)p);
51610 + return __swab32p((const __u32 *)p);
51611 }
51612 static inline __be16 __cpu_to_be16p(const __u16 *p)
51613 {
51614 - return (__force __be16)__swab16p(p);
51615 + return (__force const __be16)__swab16p(p);
51616 }
51617 static inline __u16 __be16_to_cpup(const __be16 *p)
51618 {
51619 - return __swab16p((__u16 *)p);
51620 + return __swab16p((const __u16 *)p);
51621 }
51622 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51623 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51624 diff -urNp linux-3.0.4/include/linux/cache.h linux-3.0.4/include/linux/cache.h
51625 --- linux-3.0.4/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
51626 +++ linux-3.0.4/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
51627 @@ -16,6 +16,10 @@
51628 #define __read_mostly
51629 #endif
51630
51631 +#ifndef __read_only
51632 +#define __read_only __read_mostly
51633 +#endif
51634 +
51635 #ifndef ____cacheline_aligned
51636 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51637 #endif
51638 diff -urNp linux-3.0.4/include/linux/capability.h linux-3.0.4/include/linux/capability.h
51639 --- linux-3.0.4/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
51640 +++ linux-3.0.4/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
51641 @@ -547,6 +547,9 @@ extern bool capable(int cap);
51642 extern bool ns_capable(struct user_namespace *ns, int cap);
51643 extern bool task_ns_capable(struct task_struct *t, int cap);
51644 extern bool nsown_capable(int cap);
51645 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51646 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51647 +extern bool capable_nolog(int cap);
51648
51649 /* audit system wants to get cap info from files as well */
51650 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51651 diff -urNp linux-3.0.4/include/linux/cleancache.h linux-3.0.4/include/linux/cleancache.h
51652 --- linux-3.0.4/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
51653 +++ linux-3.0.4/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
51654 @@ -31,7 +31,7 @@ struct cleancache_ops {
51655 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
51656 void (*flush_inode)(int, struct cleancache_filekey);
51657 void (*flush_fs)(int);
51658 -};
51659 +} __no_const;
51660
51661 extern struct cleancache_ops
51662 cleancache_register_ops(struct cleancache_ops *ops);
51663 diff -urNp linux-3.0.4/include/linux/compiler-gcc4.h linux-3.0.4/include/linux/compiler-gcc4.h
51664 --- linux-3.0.4/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
51665 +++ linux-3.0.4/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
51666 @@ -31,6 +31,12 @@
51667
51668
51669 #if __GNUC_MINOR__ >= 5
51670 +
51671 +#ifdef CONSTIFY_PLUGIN
51672 +#define __no_const __attribute__((no_const))
51673 +#define __do_const __attribute__((do_const))
51674 +#endif
51675 +
51676 /*
51677 * Mark a position in code as unreachable. This can be used to
51678 * suppress control flow warnings after asm blocks that transfer
51679 @@ -46,6 +52,11 @@
51680 #define __noclone __attribute__((__noclone__))
51681
51682 #endif
51683 +
51684 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51685 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51686 +#define __bos0(ptr) __bos((ptr), 0)
51687 +#define __bos1(ptr) __bos((ptr), 1)
51688 #endif
51689
51690 #if __GNUC_MINOR__ > 0
51691 diff -urNp linux-3.0.4/include/linux/compiler.h linux-3.0.4/include/linux/compiler.h
51692 --- linux-3.0.4/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
51693 +++ linux-3.0.4/include/linux/compiler.h 2011-08-26 19:49:56.000000000 -0400
51694 @@ -264,6 +264,14 @@ void ftrace_likely_update(struct ftrace_
51695 # define __attribute_const__ /* unimplemented */
51696 #endif
51697
51698 +#ifndef __no_const
51699 +# define __no_const
51700 +#endif
51701 +
51702 +#ifndef __do_const
51703 +# define __do_const
51704 +#endif
51705 +
51706 /*
51707 * Tell gcc if a function is cold. The compiler will assume any path
51708 * directly leading to the call is unlikely.
51709 @@ -273,6 +281,22 @@ void ftrace_likely_update(struct ftrace_
51710 #define __cold
51711 #endif
51712
51713 +#ifndef __alloc_size
51714 +#define __alloc_size(...)
51715 +#endif
51716 +
51717 +#ifndef __bos
51718 +#define __bos(ptr, arg)
51719 +#endif
51720 +
51721 +#ifndef __bos0
51722 +#define __bos0(ptr)
51723 +#endif
51724 +
51725 +#ifndef __bos1
51726 +#define __bos1(ptr)
51727 +#endif
51728 +
51729 /* Simple shorthand for a section definition */
51730 #ifndef __section
51731 # define __section(S) __attribute__ ((__section__(#S)))
51732 @@ -306,6 +330,7 @@ void ftrace_likely_update(struct ftrace_
51733 * use is to mediate communication between process-level code and irq/NMI
51734 * handlers, all running on the same CPU.
51735 */
51736 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51737 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51738 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51739
51740 #endif /* __LINUX_COMPILER_H */
51741 diff -urNp linux-3.0.4/include/linux/cpuset.h linux-3.0.4/include/linux/cpuset.h
51742 --- linux-3.0.4/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
51743 +++ linux-3.0.4/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
51744 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51745 * nodemask.
51746 */
51747 smp_mb();
51748 - --ACCESS_ONCE(current->mems_allowed_change_disable);
51749 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51750 }
51751
51752 static inline void set_mems_allowed(nodemask_t nodemask)
51753 diff -urNp linux-3.0.4/include/linux/crypto.h linux-3.0.4/include/linux/crypto.h
51754 --- linux-3.0.4/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
51755 +++ linux-3.0.4/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
51756 @@ -361,7 +361,7 @@ struct cipher_tfm {
51757 const u8 *key, unsigned int keylen);
51758 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51759 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51760 -};
51761 +} __no_const;
51762
51763 struct hash_tfm {
51764 int (*init)(struct hash_desc *desc);
51765 @@ -382,13 +382,13 @@ struct compress_tfm {
51766 int (*cot_decompress)(struct crypto_tfm *tfm,
51767 const u8 *src, unsigned int slen,
51768 u8 *dst, unsigned int *dlen);
51769 -};
51770 +} __no_const;
51771
51772 struct rng_tfm {
51773 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51774 unsigned int dlen);
51775 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51776 -};
51777 +} __no_const;
51778
51779 #define crt_ablkcipher crt_u.ablkcipher
51780 #define crt_aead crt_u.aead
51781 diff -urNp linux-3.0.4/include/linux/decompress/mm.h linux-3.0.4/include/linux/decompress/mm.h
51782 --- linux-3.0.4/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
51783 +++ linux-3.0.4/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
51784 @@ -77,7 +77,7 @@ static void free(void *where)
51785 * warnings when not needed (indeed large_malloc / large_free are not
51786 * needed by inflate */
51787
51788 -#define malloc(a) kmalloc(a, GFP_KERNEL)
51789 +#define malloc(a) kmalloc((a), GFP_KERNEL)
51790 #define free(a) kfree(a)
51791
51792 #define large_malloc(a) vmalloc(a)
51793 diff -urNp linux-3.0.4/include/linux/dma-mapping.h linux-3.0.4/include/linux/dma-mapping.h
51794 --- linux-3.0.4/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
51795 +++ linux-3.0.4/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
51796 @@ -50,7 +50,7 @@ struct dma_map_ops {
51797 int (*dma_supported)(struct device *dev, u64 mask);
51798 int (*set_dma_mask)(struct device *dev, u64 mask);
51799 int is_phys;
51800 -};
51801 +} __do_const;
51802
51803 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
51804
51805 diff -urNp linux-3.0.4/include/linux/efi.h linux-3.0.4/include/linux/efi.h
51806 --- linux-3.0.4/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
51807 +++ linux-3.0.4/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
51808 @@ -410,7 +410,7 @@ struct efivar_operations {
51809 efi_get_variable_t *get_variable;
51810 efi_get_next_variable_t *get_next_variable;
51811 efi_set_variable_t *set_variable;
51812 -};
51813 +} __no_const;
51814
51815 struct efivars {
51816 /*
51817 diff -urNp linux-3.0.4/include/linux/elf.h linux-3.0.4/include/linux/elf.h
51818 --- linux-3.0.4/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
51819 +++ linux-3.0.4/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
51820 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
51821 #define PT_GNU_EH_FRAME 0x6474e550
51822
51823 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
51824 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
51825 +
51826 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
51827 +
51828 +/* Constants for the e_flags field */
51829 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51830 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
51831 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
51832 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
51833 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51834 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51835
51836 /*
51837 * Extended Numbering
51838 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
51839 #define DT_DEBUG 21
51840 #define DT_TEXTREL 22
51841 #define DT_JMPREL 23
51842 +#define DT_FLAGS 30
51843 + #define DF_TEXTREL 0x00000004
51844 #define DT_ENCODING 32
51845 #define OLD_DT_LOOS 0x60000000
51846 #define DT_LOOS 0x6000000d
51847 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
51848 #define PF_W 0x2
51849 #define PF_X 0x1
51850
51851 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
51852 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
51853 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
51854 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
51855 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
51856 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
51857 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
51858 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
51859 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
51860 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
51861 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
51862 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
51863 +
51864 typedef struct elf32_phdr{
51865 Elf32_Word p_type;
51866 Elf32_Off p_offset;
51867 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
51868 #define EI_OSABI 7
51869 #define EI_PAD 8
51870
51871 +#define EI_PAX 14
51872 +
51873 #define ELFMAG0 0x7f /* EI_MAG */
51874 #define ELFMAG1 'E'
51875 #define ELFMAG2 'L'
51876 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
51877 #define elf_note elf32_note
51878 #define elf_addr_t Elf32_Off
51879 #define Elf_Half Elf32_Half
51880 +#define elf_dyn Elf32_Dyn
51881
51882 #else
51883
51884 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
51885 #define elf_note elf64_note
51886 #define elf_addr_t Elf64_Off
51887 #define Elf_Half Elf64_Half
51888 +#define elf_dyn Elf64_Dyn
51889
51890 #endif
51891
51892 diff -urNp linux-3.0.4/include/linux/firewire.h linux-3.0.4/include/linux/firewire.h
51893 --- linux-3.0.4/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
51894 +++ linux-3.0.4/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
51895 @@ -428,7 +428,7 @@ struct fw_iso_context {
51896 union {
51897 fw_iso_callback_t sc;
51898 fw_iso_mc_callback_t mc;
51899 - } callback;
51900 + } __no_const callback;
51901 void *callback_data;
51902 };
51903
51904 diff -urNp linux-3.0.4/include/linux/fscache-cache.h linux-3.0.4/include/linux/fscache-cache.h
51905 --- linux-3.0.4/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
51906 +++ linux-3.0.4/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
51907 @@ -102,7 +102,7 @@ struct fscache_operation {
51908 fscache_operation_release_t release;
51909 };
51910
51911 -extern atomic_t fscache_op_debug_id;
51912 +extern atomic_unchecked_t fscache_op_debug_id;
51913 extern void fscache_op_work_func(struct work_struct *work);
51914
51915 extern void fscache_enqueue_operation(struct fscache_operation *);
51916 @@ -122,7 +122,7 @@ static inline void fscache_operation_ini
51917 {
51918 INIT_WORK(&op->work, fscache_op_work_func);
51919 atomic_set(&op->usage, 1);
51920 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
51921 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51922 op->processor = processor;
51923 op->release = release;
51924 INIT_LIST_HEAD(&op->pend_link);
51925 diff -urNp linux-3.0.4/include/linux/fs.h linux-3.0.4/include/linux/fs.h
51926 --- linux-3.0.4/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
51927 +++ linux-3.0.4/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
51928 @@ -109,6 +109,11 @@ struct inodes_stat_t {
51929 /* File was opened by fanotify and shouldn't generate fanotify events */
51930 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
51931
51932 +/* Hack for grsec so as not to require read permission simply to execute
51933 + * a binary
51934 + */
51935 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
51936 +
51937 /*
51938 * The below are the various read and write types that we support. Some of
51939 * them include behavioral modifiers that send information down to the
51940 @@ -1571,7 +1576,8 @@ struct file_operations {
51941 int (*setlease)(struct file *, long, struct file_lock **);
51942 long (*fallocate)(struct file *file, int mode, loff_t offset,
51943 loff_t len);
51944 -};
51945 +} __do_const;
51946 +typedef struct file_operations __no_const file_operations_no_const;
51947
51948 #define IPERM_FLAG_RCU 0x0001
51949
51950 diff -urNp linux-3.0.4/include/linux/fsnotify.h linux-3.0.4/include/linux/fsnotify.h
51951 --- linux-3.0.4/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
51952 +++ linux-3.0.4/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
51953 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
51954 */
51955 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
51956 {
51957 - return kstrdup(name, GFP_KERNEL);
51958 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
51959 }
51960
51961 /*
51962 diff -urNp linux-3.0.4/include/linux/fs_struct.h linux-3.0.4/include/linux/fs_struct.h
51963 --- linux-3.0.4/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
51964 +++ linux-3.0.4/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
51965 @@ -6,7 +6,7 @@
51966 #include <linux/seqlock.h>
51967
51968 struct fs_struct {
51969 - int users;
51970 + atomic_t users;
51971 spinlock_t lock;
51972 seqcount_t seq;
51973 int umask;
51974 diff -urNp linux-3.0.4/include/linux/ftrace_event.h linux-3.0.4/include/linux/ftrace_event.h
51975 --- linux-3.0.4/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
51976 +++ linux-3.0.4/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
51977 @@ -96,7 +96,7 @@ struct trace_event_functions {
51978 trace_print_func raw;
51979 trace_print_func hex;
51980 trace_print_func binary;
51981 -};
51982 +} __no_const;
51983
51984 struct trace_event {
51985 struct hlist_node node;
51986 @@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
51987 extern int trace_add_event_call(struct ftrace_event_call *call);
51988 extern void trace_remove_event_call(struct ftrace_event_call *call);
51989
51990 -#define is_signed_type(type) (((type)(-1)) < 0)
51991 +#define is_signed_type(type) (((type)(-1)) < (type)1)
51992
51993 int trace_set_clr_event(const char *system, const char *event, int set);
51994
51995 diff -urNp linux-3.0.4/include/linux/genhd.h linux-3.0.4/include/linux/genhd.h
51996 --- linux-3.0.4/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
51997 +++ linux-3.0.4/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
51998 @@ -184,7 +184,7 @@ struct gendisk {
51999 struct kobject *slave_dir;
52000
52001 struct timer_rand_state *random;
52002 - atomic_t sync_io; /* RAID */
52003 + atomic_unchecked_t sync_io; /* RAID */
52004 struct disk_events *ev;
52005 #ifdef CONFIG_BLK_DEV_INTEGRITY
52006 struct blk_integrity *integrity;
52007 diff -urNp linux-3.0.4/include/linux/gracl.h linux-3.0.4/include/linux/gracl.h
52008 --- linux-3.0.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52009 +++ linux-3.0.4/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
52010 @@ -0,0 +1,317 @@
52011 +#ifndef GR_ACL_H
52012 +#define GR_ACL_H
52013 +
52014 +#include <linux/grdefs.h>
52015 +#include <linux/resource.h>
52016 +#include <linux/capability.h>
52017 +#include <linux/dcache.h>
52018 +#include <asm/resource.h>
52019 +
52020 +/* Major status information */
52021 +
52022 +#define GR_VERSION "grsecurity 2.2.2"
52023 +#define GRSECURITY_VERSION 0x2202
52024 +
52025 +enum {
52026 + GR_SHUTDOWN = 0,
52027 + GR_ENABLE = 1,
52028 + GR_SPROLE = 2,
52029 + GR_RELOAD = 3,
52030 + GR_SEGVMOD = 4,
52031 + GR_STATUS = 5,
52032 + GR_UNSPROLE = 6,
52033 + GR_PASSSET = 7,
52034 + GR_SPROLEPAM = 8,
52035 +};
52036 +
52037 +/* Password setup definitions
52038 + * kernel/grhash.c */
52039 +enum {
52040 + GR_PW_LEN = 128,
52041 + GR_SALT_LEN = 16,
52042 + GR_SHA_LEN = 32,
52043 +};
52044 +
52045 +enum {
52046 + GR_SPROLE_LEN = 64,
52047 +};
52048 +
52049 +enum {
52050 + GR_NO_GLOB = 0,
52051 + GR_REG_GLOB,
52052 + GR_CREATE_GLOB
52053 +};
52054 +
52055 +#define GR_NLIMITS 32
52056 +
52057 +/* Begin Data Structures */
52058 +
52059 +struct sprole_pw {
52060 + unsigned char *rolename;
52061 + unsigned char salt[GR_SALT_LEN];
52062 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52063 +};
52064 +
52065 +struct name_entry {
52066 + __u32 key;
52067 + ino_t inode;
52068 + dev_t device;
52069 + char *name;
52070 + __u16 len;
52071 + __u8 deleted;
52072 + struct name_entry *prev;
52073 + struct name_entry *next;
52074 +};
52075 +
52076 +struct inodev_entry {
52077 + struct name_entry *nentry;
52078 + struct inodev_entry *prev;
52079 + struct inodev_entry *next;
52080 +};
52081 +
52082 +struct acl_role_db {
52083 + struct acl_role_label **r_hash;
52084 + __u32 r_size;
52085 +};
52086 +
52087 +struct inodev_db {
52088 + struct inodev_entry **i_hash;
52089 + __u32 i_size;
52090 +};
52091 +
52092 +struct name_db {
52093 + struct name_entry **n_hash;
52094 + __u32 n_size;
52095 +};
52096 +
52097 +struct crash_uid {
52098 + uid_t uid;
52099 + unsigned long expires;
52100 +};
52101 +
52102 +struct gr_hash_struct {
52103 + void **table;
52104 + void **nametable;
52105 + void *first;
52106 + __u32 table_size;
52107 + __u32 used_size;
52108 + int type;
52109 +};
52110 +
52111 +/* Userspace Grsecurity ACL data structures */
52112 +
52113 +struct acl_subject_label {
52114 + char *filename;
52115 + ino_t inode;
52116 + dev_t device;
52117 + __u32 mode;
52118 + kernel_cap_t cap_mask;
52119 + kernel_cap_t cap_lower;
52120 + kernel_cap_t cap_invert_audit;
52121 +
52122 + struct rlimit res[GR_NLIMITS];
52123 + __u32 resmask;
52124 +
52125 + __u8 user_trans_type;
52126 + __u8 group_trans_type;
52127 + uid_t *user_transitions;
52128 + gid_t *group_transitions;
52129 + __u16 user_trans_num;
52130 + __u16 group_trans_num;
52131 +
52132 + __u32 sock_families[2];
52133 + __u32 ip_proto[8];
52134 + __u32 ip_type;
52135 + struct acl_ip_label **ips;
52136 + __u32 ip_num;
52137 + __u32 inaddr_any_override;
52138 +
52139 + __u32 crashes;
52140 + unsigned long expires;
52141 +
52142 + struct acl_subject_label *parent_subject;
52143 + struct gr_hash_struct *hash;
52144 + struct acl_subject_label *prev;
52145 + struct acl_subject_label *next;
52146 +
52147 + struct acl_object_label **obj_hash;
52148 + __u32 obj_hash_size;
52149 + __u16 pax_flags;
52150 +};
52151 +
52152 +struct role_allowed_ip {
52153 + __u32 addr;
52154 + __u32 netmask;
52155 +
52156 + struct role_allowed_ip *prev;
52157 + struct role_allowed_ip *next;
52158 +};
52159 +
52160 +struct role_transition {
52161 + char *rolename;
52162 +
52163 + struct role_transition *prev;
52164 + struct role_transition *next;
52165 +};
52166 +
52167 +struct acl_role_label {
52168 + char *rolename;
52169 + uid_t uidgid;
52170 + __u16 roletype;
52171 +
52172 + __u16 auth_attempts;
52173 + unsigned long expires;
52174 +
52175 + struct acl_subject_label *root_label;
52176 + struct gr_hash_struct *hash;
52177 +
52178 + struct acl_role_label *prev;
52179 + struct acl_role_label *next;
52180 +
52181 + struct role_transition *transitions;
52182 + struct role_allowed_ip *allowed_ips;
52183 + uid_t *domain_children;
52184 + __u16 domain_child_num;
52185 +
52186 + struct acl_subject_label **subj_hash;
52187 + __u32 subj_hash_size;
52188 +};
52189 +
52190 +struct user_acl_role_db {
52191 + struct acl_role_label **r_table;
52192 + __u32 num_pointers; /* Number of allocations to track */
52193 + __u32 num_roles; /* Number of roles */
52194 + __u32 num_domain_children; /* Number of domain children */
52195 + __u32 num_subjects; /* Number of subjects */
52196 + __u32 num_objects; /* Number of objects */
52197 +};
52198 +
52199 +struct acl_object_label {
52200 + char *filename;
52201 + ino_t inode;
52202 + dev_t device;
52203 + __u32 mode;
52204 +
52205 + struct acl_subject_label *nested;
52206 + struct acl_object_label *globbed;
52207 +
52208 + /* next two structures not used */
52209 +
52210 + struct acl_object_label *prev;
52211 + struct acl_object_label *next;
52212 +};
52213 +
52214 +struct acl_ip_label {
52215 + char *iface;
52216 + __u32 addr;
52217 + __u32 netmask;
52218 + __u16 low, high;
52219 + __u8 mode;
52220 + __u32 type;
52221 + __u32 proto[8];
52222 +
52223 + /* next two structures not used */
52224 +
52225 + struct acl_ip_label *prev;
52226 + struct acl_ip_label *next;
52227 +};
52228 +
52229 +struct gr_arg {
52230 + struct user_acl_role_db role_db;
52231 + unsigned char pw[GR_PW_LEN];
52232 + unsigned char salt[GR_SALT_LEN];
52233 + unsigned char sum[GR_SHA_LEN];
52234 + unsigned char sp_role[GR_SPROLE_LEN];
52235 + struct sprole_pw *sprole_pws;
52236 + dev_t segv_device;
52237 + ino_t segv_inode;
52238 + uid_t segv_uid;
52239 + __u16 num_sprole_pws;
52240 + __u16 mode;
52241 +};
52242 +
52243 +struct gr_arg_wrapper {
52244 + struct gr_arg *arg;
52245 + __u32 version;
52246 + __u32 size;
52247 +};
52248 +
52249 +struct subject_map {
52250 + struct acl_subject_label *user;
52251 + struct acl_subject_label *kernel;
52252 + struct subject_map *prev;
52253 + struct subject_map *next;
52254 +};
52255 +
52256 +struct acl_subj_map_db {
52257 + struct subject_map **s_hash;
52258 + __u32 s_size;
52259 +};
52260 +
52261 +/* End Data Structures Section */
52262 +
52263 +/* Hash functions generated by empirical testing by Brad Spengler
52264 + Makes good use of the low bits of the inode. Generally 0-1 times
52265 + in loop for successful match. 0-3 for unsuccessful match.
52266 + Shift/add algorithm with modulus of table size and an XOR*/
52267 +
52268 +static __inline__ unsigned int
52269 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52270 +{
52271 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
52272 +}
52273 +
52274 + static __inline__ unsigned int
52275 +shash(const struct acl_subject_label *userp, const unsigned int sz)
52276 +{
52277 + return ((const unsigned long)userp % sz);
52278 +}
52279 +
52280 +static __inline__ unsigned int
52281 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52282 +{
52283 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52284 +}
52285 +
52286 +static __inline__ unsigned int
52287 +nhash(const char *name, const __u16 len, const unsigned int sz)
52288 +{
52289 + return full_name_hash((const unsigned char *)name, len) % sz;
52290 +}
52291 +
52292 +#define FOR_EACH_ROLE_START(role) \
52293 + role = role_list; \
52294 + while (role) {
52295 +
52296 +#define FOR_EACH_ROLE_END(role) \
52297 + role = role->prev; \
52298 + }
52299 +
52300 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52301 + subj = NULL; \
52302 + iter = 0; \
52303 + while (iter < role->subj_hash_size) { \
52304 + if (subj == NULL) \
52305 + subj = role->subj_hash[iter]; \
52306 + if (subj == NULL) { \
52307 + iter++; \
52308 + continue; \
52309 + }
52310 +
52311 +#define FOR_EACH_SUBJECT_END(subj,iter) \
52312 + subj = subj->next; \
52313 + if (subj == NULL) \
52314 + iter++; \
52315 + }
52316 +
52317 +
52318 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52319 + subj = role->hash->first; \
52320 + while (subj != NULL) {
52321 +
52322 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52323 + subj = subj->next; \
52324 + }
52325 +
52326 +#endif
52327 +
52328 diff -urNp linux-3.0.4/include/linux/gralloc.h linux-3.0.4/include/linux/gralloc.h
52329 --- linux-3.0.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52330 +++ linux-3.0.4/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
52331 @@ -0,0 +1,9 @@
52332 +#ifndef __GRALLOC_H
52333 +#define __GRALLOC_H
52334 +
52335 +void acl_free_all(void);
52336 +int acl_alloc_stack_init(unsigned long size);
52337 +void *acl_alloc(unsigned long len);
52338 +void *acl_alloc_num(unsigned long num, unsigned long len);
52339 +
52340 +#endif
52341 diff -urNp linux-3.0.4/include/linux/grdefs.h linux-3.0.4/include/linux/grdefs.h
52342 --- linux-3.0.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52343 +++ linux-3.0.4/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
52344 @@ -0,0 +1,140 @@
52345 +#ifndef GRDEFS_H
52346 +#define GRDEFS_H
52347 +
52348 +/* Begin grsecurity status declarations */
52349 +
52350 +enum {
52351 + GR_READY = 0x01,
52352 + GR_STATUS_INIT = 0x00 // disabled state
52353 +};
52354 +
52355 +/* Begin ACL declarations */
52356 +
52357 +/* Role flags */
52358 +
52359 +enum {
52360 + GR_ROLE_USER = 0x0001,
52361 + GR_ROLE_GROUP = 0x0002,
52362 + GR_ROLE_DEFAULT = 0x0004,
52363 + GR_ROLE_SPECIAL = 0x0008,
52364 + GR_ROLE_AUTH = 0x0010,
52365 + GR_ROLE_NOPW = 0x0020,
52366 + GR_ROLE_GOD = 0x0040,
52367 + GR_ROLE_LEARN = 0x0080,
52368 + GR_ROLE_TPE = 0x0100,
52369 + GR_ROLE_DOMAIN = 0x0200,
52370 + GR_ROLE_PAM = 0x0400,
52371 + GR_ROLE_PERSIST = 0x0800
52372 +};
52373 +
52374 +/* ACL Subject and Object mode flags */
52375 +enum {
52376 + GR_DELETED = 0x80000000
52377 +};
52378 +
52379 +/* ACL Object-only mode flags */
52380 +enum {
52381 + GR_READ = 0x00000001,
52382 + GR_APPEND = 0x00000002,
52383 + GR_WRITE = 0x00000004,
52384 + GR_EXEC = 0x00000008,
52385 + GR_FIND = 0x00000010,
52386 + GR_INHERIT = 0x00000020,
52387 + GR_SETID = 0x00000040,
52388 + GR_CREATE = 0x00000080,
52389 + GR_DELETE = 0x00000100,
52390 + GR_LINK = 0x00000200,
52391 + GR_AUDIT_READ = 0x00000400,
52392 + GR_AUDIT_APPEND = 0x00000800,
52393 + GR_AUDIT_WRITE = 0x00001000,
52394 + GR_AUDIT_EXEC = 0x00002000,
52395 + GR_AUDIT_FIND = 0x00004000,
52396 + GR_AUDIT_INHERIT= 0x00008000,
52397 + GR_AUDIT_SETID = 0x00010000,
52398 + GR_AUDIT_CREATE = 0x00020000,
52399 + GR_AUDIT_DELETE = 0x00040000,
52400 + GR_AUDIT_LINK = 0x00080000,
52401 + GR_PTRACERD = 0x00100000,
52402 + GR_NOPTRACE = 0x00200000,
52403 + GR_SUPPRESS = 0x00400000,
52404 + GR_NOLEARN = 0x00800000,
52405 + GR_INIT_TRANSFER= 0x01000000
52406 +};
52407 +
52408 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52409 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52410 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52411 +
52412 +/* ACL subject-only mode flags */
52413 +enum {
52414 + GR_KILL = 0x00000001,
52415 + GR_VIEW = 0x00000002,
52416 + GR_PROTECTED = 0x00000004,
52417 + GR_LEARN = 0x00000008,
52418 + GR_OVERRIDE = 0x00000010,
52419 + /* just a placeholder, this mode is only used in userspace */
52420 + GR_DUMMY = 0x00000020,
52421 + GR_PROTSHM = 0x00000040,
52422 + GR_KILLPROC = 0x00000080,
52423 + GR_KILLIPPROC = 0x00000100,
52424 + /* just a placeholder, this mode is only used in userspace */
52425 + GR_NOTROJAN = 0x00000200,
52426 + GR_PROTPROCFD = 0x00000400,
52427 + GR_PROCACCT = 0x00000800,
52428 + GR_RELAXPTRACE = 0x00001000,
52429 + GR_NESTED = 0x00002000,
52430 + GR_INHERITLEARN = 0x00004000,
52431 + GR_PROCFIND = 0x00008000,
52432 + GR_POVERRIDE = 0x00010000,
52433 + GR_KERNELAUTH = 0x00020000,
52434 + GR_ATSECURE = 0x00040000,
52435 + GR_SHMEXEC = 0x00080000
52436 +};
52437 +
52438 +enum {
52439 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52440 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52441 + GR_PAX_ENABLE_MPROTECT = 0x0004,
52442 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
52443 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52444 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52445 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52446 + GR_PAX_DISABLE_MPROTECT = 0x0400,
52447 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
52448 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52449 +};
52450 +
52451 +enum {
52452 + GR_ID_USER = 0x01,
52453 + GR_ID_GROUP = 0x02,
52454 +};
52455 +
52456 +enum {
52457 + GR_ID_ALLOW = 0x01,
52458 + GR_ID_DENY = 0x02,
52459 +};
52460 +
52461 +#define GR_CRASH_RES 31
52462 +#define GR_UIDTABLE_MAX 500
52463 +
52464 +/* begin resource learning section */
52465 +enum {
52466 + GR_RLIM_CPU_BUMP = 60,
52467 + GR_RLIM_FSIZE_BUMP = 50000,
52468 + GR_RLIM_DATA_BUMP = 10000,
52469 + GR_RLIM_STACK_BUMP = 1000,
52470 + GR_RLIM_CORE_BUMP = 10000,
52471 + GR_RLIM_RSS_BUMP = 500000,
52472 + GR_RLIM_NPROC_BUMP = 1,
52473 + GR_RLIM_NOFILE_BUMP = 5,
52474 + GR_RLIM_MEMLOCK_BUMP = 50000,
52475 + GR_RLIM_AS_BUMP = 500000,
52476 + GR_RLIM_LOCKS_BUMP = 2,
52477 + GR_RLIM_SIGPENDING_BUMP = 5,
52478 + GR_RLIM_MSGQUEUE_BUMP = 10000,
52479 + GR_RLIM_NICE_BUMP = 1,
52480 + GR_RLIM_RTPRIO_BUMP = 1,
52481 + GR_RLIM_RTTIME_BUMP = 1000000
52482 +};
52483 +
52484 +#endif
52485 diff -urNp linux-3.0.4/include/linux/grinternal.h linux-3.0.4/include/linux/grinternal.h
52486 --- linux-3.0.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52487 +++ linux-3.0.4/include/linux/grinternal.h 2011-08-23 21:48:14.000000000 -0400
52488 @@ -0,0 +1,219 @@
52489 +#ifndef __GRINTERNAL_H
52490 +#define __GRINTERNAL_H
52491 +
52492 +#ifdef CONFIG_GRKERNSEC
52493 +
52494 +#include <linux/fs.h>
52495 +#include <linux/mnt_namespace.h>
52496 +#include <linux/nsproxy.h>
52497 +#include <linux/gracl.h>
52498 +#include <linux/grdefs.h>
52499 +#include <linux/grmsg.h>
52500 +
52501 +void gr_add_learn_entry(const char *fmt, ...)
52502 + __attribute__ ((format (printf, 1, 2)));
52503 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52504 + const struct vfsmount *mnt);
52505 +__u32 gr_check_create(const struct dentry *new_dentry,
52506 + const struct dentry *parent,
52507 + const struct vfsmount *mnt, const __u32 mode);
52508 +int gr_check_protected_task(const struct task_struct *task);
52509 +__u32 to_gr_audit(const __u32 reqmode);
52510 +int gr_set_acls(const int type);
52511 +int gr_apply_subject_to_task(struct task_struct *task);
52512 +int gr_acl_is_enabled(void);
52513 +char gr_roletype_to_char(void);
52514 +
52515 +void gr_handle_alertkill(struct task_struct *task);
52516 +char *gr_to_filename(const struct dentry *dentry,
52517 + const struct vfsmount *mnt);
52518 +char *gr_to_filename1(const struct dentry *dentry,
52519 + const struct vfsmount *mnt);
52520 +char *gr_to_filename2(const struct dentry *dentry,
52521 + const struct vfsmount *mnt);
52522 +char *gr_to_filename3(const struct dentry *dentry,
52523 + const struct vfsmount *mnt);
52524 +
52525 +extern int grsec_enable_harden_ptrace;
52526 +extern int grsec_enable_link;
52527 +extern int grsec_enable_fifo;
52528 +extern int grsec_enable_execve;
52529 +extern int grsec_enable_shm;
52530 +extern int grsec_enable_execlog;
52531 +extern int grsec_enable_signal;
52532 +extern int grsec_enable_audit_ptrace;
52533 +extern int grsec_enable_forkfail;
52534 +extern int grsec_enable_time;
52535 +extern int grsec_enable_rofs;
52536 +extern int grsec_enable_chroot_shmat;
52537 +extern int grsec_enable_chroot_mount;
52538 +extern int grsec_enable_chroot_double;
52539 +extern int grsec_enable_chroot_pivot;
52540 +extern int grsec_enable_chroot_chdir;
52541 +extern int grsec_enable_chroot_chmod;
52542 +extern int grsec_enable_chroot_mknod;
52543 +extern int grsec_enable_chroot_fchdir;
52544 +extern int grsec_enable_chroot_nice;
52545 +extern int grsec_enable_chroot_execlog;
52546 +extern int grsec_enable_chroot_caps;
52547 +extern int grsec_enable_chroot_sysctl;
52548 +extern int grsec_enable_chroot_unix;
52549 +extern int grsec_enable_tpe;
52550 +extern int grsec_tpe_gid;
52551 +extern int grsec_enable_tpe_all;
52552 +extern int grsec_enable_tpe_invert;
52553 +extern int grsec_enable_socket_all;
52554 +extern int grsec_socket_all_gid;
52555 +extern int grsec_enable_socket_client;
52556 +extern int grsec_socket_client_gid;
52557 +extern int grsec_enable_socket_server;
52558 +extern int grsec_socket_server_gid;
52559 +extern int grsec_audit_gid;
52560 +extern int grsec_enable_group;
52561 +extern int grsec_enable_audit_textrel;
52562 +extern int grsec_enable_log_rwxmaps;
52563 +extern int grsec_enable_mount;
52564 +extern int grsec_enable_chdir;
52565 +extern int grsec_resource_logging;
52566 +extern int grsec_enable_blackhole;
52567 +extern int grsec_lastack_retries;
52568 +extern int grsec_enable_brute;
52569 +extern int grsec_lock;
52570 +
52571 +extern spinlock_t grsec_alert_lock;
52572 +extern unsigned long grsec_alert_wtime;
52573 +extern unsigned long grsec_alert_fyet;
52574 +
52575 +extern spinlock_t grsec_audit_lock;
52576 +
52577 +extern rwlock_t grsec_exec_file_lock;
52578 +
52579 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52580 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52581 + (tsk)->exec_file->f_vfsmnt) : "/")
52582 +
52583 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52584 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52585 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52586 +
52587 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52588 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
52589 + (tsk)->exec_file->f_vfsmnt) : "/")
52590 +
52591 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52592 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52593 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52594 +
52595 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52596 +
52597 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52598 +
52599 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52600 + (task)->pid, (cred)->uid, \
52601 + (cred)->euid, (cred)->gid, (cred)->egid, \
52602 + gr_parent_task_fullpath(task), \
52603 + (task)->real_parent->comm, (task)->real_parent->pid, \
52604 + (pcred)->uid, (pcred)->euid, \
52605 + (pcred)->gid, (pcred)->egid
52606 +
52607 +#define GR_CHROOT_CAPS {{ \
52608 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52609 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52610 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52611 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52612 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52613 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52614 +
52615 +#define security_learn(normal_msg,args...) \
52616 +({ \
52617 + read_lock(&grsec_exec_file_lock); \
52618 + gr_add_learn_entry(normal_msg "\n", ## args); \
52619 + read_unlock(&grsec_exec_file_lock); \
52620 +})
52621 +
52622 +enum {
52623 + GR_DO_AUDIT,
52624 + GR_DONT_AUDIT,
52625 + /* used for non-audit messages that we shouldn't kill the task on */
52626 + GR_DONT_AUDIT_GOOD
52627 +};
52628 +
52629 +enum {
52630 + GR_TTYSNIFF,
52631 + GR_RBAC,
52632 + GR_RBAC_STR,
52633 + GR_STR_RBAC,
52634 + GR_RBAC_MODE2,
52635 + GR_RBAC_MODE3,
52636 + GR_FILENAME,
52637 + GR_SYSCTL_HIDDEN,
52638 + GR_NOARGS,
52639 + GR_ONE_INT,
52640 + GR_ONE_INT_TWO_STR,
52641 + GR_ONE_STR,
52642 + GR_STR_INT,
52643 + GR_TWO_STR_INT,
52644 + GR_TWO_INT,
52645 + GR_TWO_U64,
52646 + GR_THREE_INT,
52647 + GR_FIVE_INT_TWO_STR,
52648 + GR_TWO_STR,
52649 + GR_THREE_STR,
52650 + GR_FOUR_STR,
52651 + GR_STR_FILENAME,
52652 + GR_FILENAME_STR,
52653 + GR_FILENAME_TWO_INT,
52654 + GR_FILENAME_TWO_INT_STR,
52655 + GR_TEXTREL,
52656 + GR_PTRACE,
52657 + GR_RESOURCE,
52658 + GR_CAP,
52659 + GR_SIG,
52660 + GR_SIG2,
52661 + GR_CRASH1,
52662 + GR_CRASH2,
52663 + GR_PSACCT,
52664 + GR_RWXMAP
52665 +};
52666 +
52667 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52668 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52669 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52670 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52671 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52672 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52673 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52674 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52675 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52676 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52677 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52678 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52679 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52680 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52681 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52682 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52683 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52684 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52685 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52686 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52687 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52688 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52689 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52690 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52691 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52692 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52693 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52694 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52695 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52696 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52697 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52698 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52699 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52700 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52701 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52702 +
52703 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52704 +
52705 +#endif
52706 +
52707 +#endif
52708 diff -urNp linux-3.0.4/include/linux/grmsg.h linux-3.0.4/include/linux/grmsg.h
52709 --- linux-3.0.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52710 +++ linux-3.0.4/include/linux/grmsg.h 2011-08-25 17:27:26.000000000 -0400
52711 @@ -0,0 +1,107 @@
52712 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52713 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52714 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52715 +#define GR_STOPMOD_MSG "denied modification of module state by "
52716 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52717 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52718 +#define GR_IOPERM_MSG "denied use of ioperm() by "
52719 +#define GR_IOPL_MSG "denied use of iopl() by "
52720 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52721 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52722 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52723 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52724 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52725 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52726 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52727 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52728 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52729 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52730 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52731 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52732 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52733 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52734 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52735 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52736 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52737 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52738 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52739 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52740 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52741 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52742 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52743 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52744 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52745 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52746 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52747 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52748 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52749 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52750 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52751 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52752 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52753 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52754 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52755 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52756 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52757 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52758 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52759 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52760 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52761 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52762 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52763 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52764 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52765 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52766 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52767 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52768 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52769 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52770 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52771 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52772 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52773 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52774 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52775 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52776 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52777 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52778 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52779 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52780 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52781 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52782 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52783 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52784 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
52785 +#define GR_NICE_CHROOT_MSG "denied priority change by "
52786 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52787 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52788 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52789 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52790 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52791 +#define GR_TIME_MSG "time set by "
52792 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52793 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52794 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52795 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52796 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52797 +#define GR_BIND_MSG "denied bind() by "
52798 +#define GR_CONNECT_MSG "denied connect() by "
52799 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52800 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
52801 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
52802 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
52803 +#define GR_CAP_ACL_MSG "use of %s denied for "
52804 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
52805 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
52806 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
52807 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
52808 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
52809 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
52810 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
52811 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
52812 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
52813 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
52814 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
52815 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
52816 +#define GR_VM86_MSG "denied use of vm86 by "
52817 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
52818 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
52819 diff -urNp linux-3.0.4/include/linux/grsecurity.h linux-3.0.4/include/linux/grsecurity.h
52820 --- linux-3.0.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
52821 +++ linux-3.0.4/include/linux/grsecurity.h 2011-08-25 17:27:36.000000000 -0400
52822 @@ -0,0 +1,227 @@
52823 +#ifndef GR_SECURITY_H
52824 +#define GR_SECURITY_H
52825 +#include <linux/fs.h>
52826 +#include <linux/fs_struct.h>
52827 +#include <linux/binfmts.h>
52828 +#include <linux/gracl.h>
52829 +
52830 +/* notify of brain-dead configs */
52831 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52832 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
52833 +#endif
52834 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
52835 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
52836 +#endif
52837 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52838 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52839 +#endif
52840 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52841 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52842 +#endif
52843 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
52844 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
52845 +#endif
52846 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
52847 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
52848 +#endif
52849 +
52850 +#include <linux/compat.h>
52851 +
52852 +struct user_arg_ptr {
52853 +#ifdef CONFIG_COMPAT
52854 + bool is_compat;
52855 +#endif
52856 + union {
52857 + const char __user *const __user *native;
52858 +#ifdef CONFIG_COMPAT
52859 + compat_uptr_t __user *compat;
52860 +#endif
52861 + } ptr;
52862 +};
52863 +
52864 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
52865 +void gr_handle_brute_check(void);
52866 +void gr_handle_kernel_exploit(void);
52867 +int gr_process_user_ban(void);
52868 +
52869 +char gr_roletype_to_char(void);
52870 +
52871 +int gr_acl_enable_at_secure(void);
52872 +
52873 +int gr_check_user_change(int real, int effective, int fs);
52874 +int gr_check_group_change(int real, int effective, int fs);
52875 +
52876 +void gr_del_task_from_ip_table(struct task_struct *p);
52877 +
52878 +int gr_pid_is_chrooted(struct task_struct *p);
52879 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
52880 +int gr_handle_chroot_nice(void);
52881 +int gr_handle_chroot_sysctl(const int op);
52882 +int gr_handle_chroot_setpriority(struct task_struct *p,
52883 + const int niceval);
52884 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
52885 +int gr_handle_chroot_chroot(const struct dentry *dentry,
52886 + const struct vfsmount *mnt);
52887 +int gr_handle_chroot_caps(struct path *path);
52888 +void gr_handle_chroot_chdir(struct path *path);
52889 +int gr_handle_chroot_chmod(const struct dentry *dentry,
52890 + const struct vfsmount *mnt, const int mode);
52891 +int gr_handle_chroot_mknod(const struct dentry *dentry,
52892 + const struct vfsmount *mnt, const int mode);
52893 +int gr_handle_chroot_mount(const struct dentry *dentry,
52894 + const struct vfsmount *mnt,
52895 + const char *dev_name);
52896 +int gr_handle_chroot_pivot(void);
52897 +int gr_handle_chroot_unix(const pid_t pid);
52898 +
52899 +int gr_handle_rawio(const struct inode *inode);
52900 +
52901 +void gr_handle_ioperm(void);
52902 +void gr_handle_iopl(void);
52903 +
52904 +int gr_tpe_allow(const struct file *file);
52905 +
52906 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
52907 +void gr_clear_chroot_entries(struct task_struct *task);
52908 +
52909 +void gr_log_forkfail(const int retval);
52910 +void gr_log_timechange(void);
52911 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
52912 +void gr_log_chdir(const struct dentry *dentry,
52913 + const struct vfsmount *mnt);
52914 +void gr_log_chroot_exec(const struct dentry *dentry,
52915 + const struct vfsmount *mnt);
52916 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
52917 +void gr_log_remount(const char *devname, const int retval);
52918 +void gr_log_unmount(const char *devname, const int retval);
52919 +void gr_log_mount(const char *from, const char *to, const int retval);
52920 +void gr_log_textrel(struct vm_area_struct *vma);
52921 +void gr_log_rwxmmap(struct file *file);
52922 +void gr_log_rwxmprotect(struct file *file);
52923 +
52924 +int gr_handle_follow_link(const struct inode *parent,
52925 + const struct inode *inode,
52926 + const struct dentry *dentry,
52927 + const struct vfsmount *mnt);
52928 +int gr_handle_fifo(const struct dentry *dentry,
52929 + const struct vfsmount *mnt,
52930 + const struct dentry *dir, const int flag,
52931 + const int acc_mode);
52932 +int gr_handle_hardlink(const struct dentry *dentry,
52933 + const struct vfsmount *mnt,
52934 + struct inode *inode,
52935 + const int mode, const char *to);
52936 +
52937 +int gr_is_capable(const int cap);
52938 +int gr_is_capable_nolog(const int cap);
52939 +void gr_learn_resource(const struct task_struct *task, const int limit,
52940 + const unsigned long wanted, const int gt);
52941 +void gr_copy_label(struct task_struct *tsk);
52942 +void gr_handle_crash(struct task_struct *task, const int sig);
52943 +int gr_handle_signal(const struct task_struct *p, const int sig);
52944 +int gr_check_crash_uid(const uid_t uid);
52945 +int gr_check_protected_task(const struct task_struct *task);
52946 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
52947 +int gr_acl_handle_mmap(const struct file *file,
52948 + const unsigned long prot);
52949 +int gr_acl_handle_mprotect(const struct file *file,
52950 + const unsigned long prot);
52951 +int gr_check_hidden_task(const struct task_struct *tsk);
52952 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
52953 + const struct vfsmount *mnt);
52954 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
52955 + const struct vfsmount *mnt);
52956 +__u32 gr_acl_handle_access(const struct dentry *dentry,
52957 + const struct vfsmount *mnt, const int fmode);
52958 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
52959 + const struct vfsmount *mnt, mode_t mode);
52960 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
52961 + const struct vfsmount *mnt, mode_t mode);
52962 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
52963 + const struct vfsmount *mnt);
52964 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
52965 + const struct vfsmount *mnt);
52966 +int gr_handle_ptrace(struct task_struct *task, const long request);
52967 +int gr_handle_proc_ptrace(struct task_struct *task);
52968 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
52969 + const struct vfsmount *mnt);
52970 +int gr_check_crash_exec(const struct file *filp);
52971 +int gr_acl_is_enabled(void);
52972 +void gr_set_kernel_label(struct task_struct *task);
52973 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
52974 + const gid_t gid);
52975 +int gr_set_proc_label(const struct dentry *dentry,
52976 + const struct vfsmount *mnt,
52977 + const int unsafe_share);
52978 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
52979 + const struct vfsmount *mnt);
52980 +__u32 gr_acl_handle_open(const struct dentry *dentry,
52981 + const struct vfsmount *mnt, const int fmode);
52982 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
52983 + const struct dentry *p_dentry,
52984 + const struct vfsmount *p_mnt, const int fmode,
52985 + const int imode);
52986 +void gr_handle_create(const struct dentry *dentry,
52987 + const struct vfsmount *mnt);
52988 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
52989 + const struct dentry *parent_dentry,
52990 + const struct vfsmount *parent_mnt,
52991 + const int mode);
52992 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
52993 + const struct dentry *parent_dentry,
52994 + const struct vfsmount *parent_mnt);
52995 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
52996 + const struct vfsmount *mnt);
52997 +void gr_handle_delete(const ino_t ino, const dev_t dev);
52998 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
52999 + const struct vfsmount *mnt);
53000 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
53001 + const struct dentry *parent_dentry,
53002 + const struct vfsmount *parent_mnt,
53003 + const char *from);
53004 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
53005 + const struct dentry *parent_dentry,
53006 + const struct vfsmount *parent_mnt,
53007 + const struct dentry *old_dentry,
53008 + const struct vfsmount *old_mnt, const char *to);
53009 +int gr_acl_handle_rename(struct dentry *new_dentry,
53010 + struct dentry *parent_dentry,
53011 + const struct vfsmount *parent_mnt,
53012 + struct dentry *old_dentry,
53013 + struct inode *old_parent_inode,
53014 + struct vfsmount *old_mnt, const char *newname);
53015 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53016 + struct dentry *old_dentry,
53017 + struct dentry *new_dentry,
53018 + struct vfsmount *mnt, const __u8 replace);
53019 +__u32 gr_check_link(const struct dentry *new_dentry,
53020 + const struct dentry *parent_dentry,
53021 + const struct vfsmount *parent_mnt,
53022 + const struct dentry *old_dentry,
53023 + const struct vfsmount *old_mnt);
53024 +int gr_acl_handle_filldir(const struct file *file, const char *name,
53025 + const unsigned int namelen, const ino_t ino);
53026 +
53027 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
53028 + const struct vfsmount *mnt);
53029 +void gr_acl_handle_exit(void);
53030 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
53031 +int gr_acl_handle_procpidmem(const struct task_struct *task);
53032 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53033 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53034 +void gr_audit_ptrace(struct task_struct *task);
53035 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53036 +
53037 +#ifdef CONFIG_GRKERNSEC
53038 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53039 +void gr_handle_vm86(void);
53040 +void gr_handle_mem_readwrite(u64 from, u64 to);
53041 +
53042 +extern int grsec_enable_dmesg;
53043 +extern int grsec_disable_privio;
53044 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53045 +extern int grsec_enable_chroot_findtask;
53046 +#endif
53047 +#endif
53048 +
53049 +#endif
53050 diff -urNp linux-3.0.4/include/linux/grsock.h linux-3.0.4/include/linux/grsock.h
53051 --- linux-3.0.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53052 +++ linux-3.0.4/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
53053 @@ -0,0 +1,19 @@
53054 +#ifndef __GRSOCK_H
53055 +#define __GRSOCK_H
53056 +
53057 +extern void gr_attach_curr_ip(const struct sock *sk);
53058 +extern int gr_handle_sock_all(const int family, const int type,
53059 + const int protocol);
53060 +extern int gr_handle_sock_server(const struct sockaddr *sck);
53061 +extern int gr_handle_sock_server_other(const struct sock *sck);
53062 +extern int gr_handle_sock_client(const struct sockaddr *sck);
53063 +extern int gr_search_connect(struct socket * sock,
53064 + struct sockaddr_in * addr);
53065 +extern int gr_search_bind(struct socket * sock,
53066 + struct sockaddr_in * addr);
53067 +extern int gr_search_listen(struct socket * sock);
53068 +extern int gr_search_accept(struct socket * sock);
53069 +extern int gr_search_socket(const int domain, const int type,
53070 + const int protocol);
53071 +
53072 +#endif
53073 diff -urNp linux-3.0.4/include/linux/hid.h linux-3.0.4/include/linux/hid.h
53074 --- linux-3.0.4/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
53075 +++ linux-3.0.4/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
53076 @@ -675,7 +675,7 @@ struct hid_ll_driver {
53077 unsigned int code, int value);
53078
53079 int (*parse)(struct hid_device *hdev);
53080 -};
53081 +} __no_const;
53082
53083 #define PM_HINT_FULLON 1<<5
53084 #define PM_HINT_NORMAL 1<<1
53085 diff -urNp linux-3.0.4/include/linux/highmem.h linux-3.0.4/include/linux/highmem.h
53086 --- linux-3.0.4/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
53087 +++ linux-3.0.4/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
53088 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53089 kunmap_atomic(kaddr, KM_USER0);
53090 }
53091
53092 +static inline void sanitize_highpage(struct page *page)
53093 +{
53094 + void *kaddr;
53095 + unsigned long flags;
53096 +
53097 + local_irq_save(flags);
53098 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
53099 + clear_page(kaddr);
53100 + kunmap_atomic(kaddr, KM_CLEARPAGE);
53101 + local_irq_restore(flags);
53102 +}
53103 +
53104 static inline void zero_user_segments(struct page *page,
53105 unsigned start1, unsigned end1,
53106 unsigned start2, unsigned end2)
53107 diff -urNp linux-3.0.4/include/linux/i2c.h linux-3.0.4/include/linux/i2c.h
53108 --- linux-3.0.4/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
53109 +++ linux-3.0.4/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
53110 @@ -346,6 +346,7 @@ struct i2c_algorithm {
53111 /* To determine what the adapter supports */
53112 u32 (*functionality) (struct i2c_adapter *);
53113 };
53114 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53115
53116 /*
53117 * i2c_adapter is the structure used to identify a physical i2c bus along
53118 diff -urNp linux-3.0.4/include/linux/i2o.h linux-3.0.4/include/linux/i2o.h
53119 --- linux-3.0.4/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
53120 +++ linux-3.0.4/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
53121 @@ -564,7 +564,7 @@ struct i2o_controller {
53122 struct i2o_device *exec; /* Executive */
53123 #if BITS_PER_LONG == 64
53124 spinlock_t context_list_lock; /* lock for context_list */
53125 - atomic_t context_list_counter; /* needed for unique contexts */
53126 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53127 struct list_head context_list; /* list of context id's
53128 and pointers */
53129 #endif
53130 diff -urNp linux-3.0.4/include/linux/init.h linux-3.0.4/include/linux/init.h
53131 --- linux-3.0.4/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
53132 +++ linux-3.0.4/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
53133 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53134
53135 /* Each module must use one module_init(). */
53136 #define module_init(initfn) \
53137 - static inline initcall_t __inittest(void) \
53138 + static inline __used initcall_t __inittest(void) \
53139 { return initfn; } \
53140 int init_module(void) __attribute__((alias(#initfn)));
53141
53142 /* This is only required if you want to be unloadable. */
53143 #define module_exit(exitfn) \
53144 - static inline exitcall_t __exittest(void) \
53145 + static inline __used exitcall_t __exittest(void) \
53146 { return exitfn; } \
53147 void cleanup_module(void) __attribute__((alias(#exitfn)));
53148
53149 diff -urNp linux-3.0.4/include/linux/init_task.h linux-3.0.4/include/linux/init_task.h
53150 --- linux-3.0.4/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
53151 +++ linux-3.0.4/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
53152 @@ -126,6 +126,12 @@ extern struct cred init_cred;
53153 # define INIT_PERF_EVENTS(tsk)
53154 #endif
53155
53156 +#ifdef CONFIG_X86
53157 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53158 +#else
53159 +#define INIT_TASK_THREAD_INFO
53160 +#endif
53161 +
53162 /*
53163 * INIT_TASK is used to set up the first task table, touch at
53164 * your own risk!. Base=0, limit=0x1fffff (=2MB)
53165 @@ -164,6 +170,7 @@ extern struct cred init_cred;
53166 RCU_INIT_POINTER(.cred, &init_cred), \
53167 .comm = "swapper", \
53168 .thread = INIT_THREAD, \
53169 + INIT_TASK_THREAD_INFO \
53170 .fs = &init_fs, \
53171 .files = &init_files, \
53172 .signal = &init_signals, \
53173 diff -urNp linux-3.0.4/include/linux/intel-iommu.h linux-3.0.4/include/linux/intel-iommu.h
53174 --- linux-3.0.4/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
53175 +++ linux-3.0.4/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
53176 @@ -296,7 +296,7 @@ struct iommu_flush {
53177 u8 fm, u64 type);
53178 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53179 unsigned int size_order, u64 type);
53180 -};
53181 +} __no_const;
53182
53183 enum {
53184 SR_DMAR_FECTL_REG,
53185 diff -urNp linux-3.0.4/include/linux/interrupt.h linux-3.0.4/include/linux/interrupt.h
53186 --- linux-3.0.4/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
53187 +++ linux-3.0.4/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
53188 @@ -422,7 +422,7 @@ enum
53189 /* map softirq index to softirq name. update 'softirq_to_name' in
53190 * kernel/softirq.c when adding a new softirq.
53191 */
53192 -extern char *softirq_to_name[NR_SOFTIRQS];
53193 +extern const char * const softirq_to_name[NR_SOFTIRQS];
53194
53195 /* softirq mask and active fields moved to irq_cpustat_t in
53196 * asm/hardirq.h to get better cache usage. KAO
53197 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53198
53199 struct softirq_action
53200 {
53201 - void (*action)(struct softirq_action *);
53202 + void (*action)(void);
53203 };
53204
53205 asmlinkage void do_softirq(void);
53206 asmlinkage void __do_softirq(void);
53207 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53208 +extern void open_softirq(int nr, void (*action)(void));
53209 extern void softirq_init(void);
53210 static inline void __raise_softirq_irqoff(unsigned int nr)
53211 {
53212 diff -urNp linux-3.0.4/include/linux/kallsyms.h linux-3.0.4/include/linux/kallsyms.h
53213 --- linux-3.0.4/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
53214 +++ linux-3.0.4/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
53215 @@ -15,7 +15,8 @@
53216
53217 struct module;
53218
53219 -#ifdef CONFIG_KALLSYMS
53220 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53221 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53222 /* Lookup the address for a symbol. Returns 0 if not found. */
53223 unsigned long kallsyms_lookup_name(const char *name);
53224
53225 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53226 /* Stupid that this does nothing, but I didn't create this mess. */
53227 #define __print_symbol(fmt, addr)
53228 #endif /*CONFIG_KALLSYMS*/
53229 +#else /* when included by kallsyms.c, vsnprintf.c, or
53230 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53231 +extern void __print_symbol(const char *fmt, unsigned long address);
53232 +extern int sprint_backtrace(char *buffer, unsigned long address);
53233 +extern int sprint_symbol(char *buffer, unsigned long address);
53234 +const char *kallsyms_lookup(unsigned long addr,
53235 + unsigned long *symbolsize,
53236 + unsigned long *offset,
53237 + char **modname, char *namebuf);
53238 +#endif
53239
53240 /* This macro allows us to keep printk typechecking */
53241 static void __check_printsym_format(const char *fmt, ...)
53242 diff -urNp linux-3.0.4/include/linux/kgdb.h linux-3.0.4/include/linux/kgdb.h
53243 --- linux-3.0.4/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
53244 +++ linux-3.0.4/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
53245 @@ -53,7 +53,7 @@ extern int kgdb_connected;
53246 extern int kgdb_io_module_registered;
53247
53248 extern atomic_t kgdb_setting_breakpoint;
53249 -extern atomic_t kgdb_cpu_doing_single_step;
53250 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53251
53252 extern struct task_struct *kgdb_usethread;
53253 extern struct task_struct *kgdb_contthread;
53254 @@ -251,7 +251,7 @@ struct kgdb_arch {
53255 void (*disable_hw_break)(struct pt_regs *regs);
53256 void (*remove_all_hw_break)(void);
53257 void (*correct_hw_break)(void);
53258 -};
53259 +} __do_const;
53260
53261 /**
53262 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
53263 @@ -276,7 +276,7 @@ struct kgdb_io {
53264 void (*pre_exception) (void);
53265 void (*post_exception) (void);
53266 int is_console;
53267 -};
53268 +} __do_const;
53269
53270 extern struct kgdb_arch arch_kgdb_ops;
53271
53272 diff -urNp linux-3.0.4/include/linux/kmod.h linux-3.0.4/include/linux/kmod.h
53273 --- linux-3.0.4/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
53274 +++ linux-3.0.4/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
53275 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
53276 * usually useless though. */
53277 extern int __request_module(bool wait, const char *name, ...) \
53278 __attribute__((format(printf, 2, 3)));
53279 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53280 + __attribute__((format(printf, 3, 4)));
53281 #define request_module(mod...) __request_module(true, mod)
53282 #define request_module_nowait(mod...) __request_module(false, mod)
53283 #define try_then_request_module(x, mod...) \
53284 diff -urNp linux-3.0.4/include/linux/kvm_host.h linux-3.0.4/include/linux/kvm_host.h
53285 --- linux-3.0.4/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
53286 +++ linux-3.0.4/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
53287 @@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53288 void vcpu_load(struct kvm_vcpu *vcpu);
53289 void vcpu_put(struct kvm_vcpu *vcpu);
53290
53291 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53292 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53293 struct module *module);
53294 void kvm_exit(void);
53295
53296 @@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53297 struct kvm_guest_debug *dbg);
53298 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53299
53300 -int kvm_arch_init(void *opaque);
53301 +int kvm_arch_init(const void *opaque);
53302 void kvm_arch_exit(void);
53303
53304 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53305 diff -urNp linux-3.0.4/include/linux/libata.h linux-3.0.4/include/linux/libata.h
53306 --- linux-3.0.4/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
53307 +++ linux-3.0.4/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
53308 @@ -899,7 +899,7 @@ struct ata_port_operations {
53309 * fields must be pointers.
53310 */
53311 const struct ata_port_operations *inherits;
53312 -};
53313 +} __do_const;
53314
53315 struct ata_port_info {
53316 unsigned long flags;
53317 diff -urNp linux-3.0.4/include/linux/mca.h linux-3.0.4/include/linux/mca.h
53318 --- linux-3.0.4/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
53319 +++ linux-3.0.4/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
53320 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53321 int region);
53322 void * (*mca_transform_memory)(struct mca_device *,
53323 void *memory);
53324 -};
53325 +} __no_const;
53326
53327 struct mca_bus {
53328 u64 default_dma_mask;
53329 diff -urNp linux-3.0.4/include/linux/memory.h linux-3.0.4/include/linux/memory.h
53330 --- linux-3.0.4/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
53331 +++ linux-3.0.4/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
53332 @@ -144,7 +144,7 @@ struct memory_accessor {
53333 size_t count);
53334 ssize_t (*write)(struct memory_accessor *, const char *buf,
53335 off_t offset, size_t count);
53336 -};
53337 +} __no_const;
53338
53339 /*
53340 * Kernel text modification mutex, used for code patching. Users of this lock
53341 diff -urNp linux-3.0.4/include/linux/mfd/abx500.h linux-3.0.4/include/linux/mfd/abx500.h
53342 --- linux-3.0.4/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
53343 +++ linux-3.0.4/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
53344 @@ -234,6 +234,7 @@ struct abx500_ops {
53345 int (*event_registers_startup_state_get) (struct device *, u8 *);
53346 int (*startup_irq_enabled) (struct device *, unsigned int);
53347 };
53348 +typedef struct abx500_ops __no_const abx500_ops_no_const;
53349
53350 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53351 void abx500_remove_ops(struct device *dev);
53352 diff -urNp linux-3.0.4/include/linux/mm.h linux-3.0.4/include/linux/mm.h
53353 --- linux-3.0.4/include/linux/mm.h 2011-08-23 21:44:40.000000000 -0400
53354 +++ linux-3.0.4/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
53355 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53356
53357 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53358 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53359 +
53360 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53361 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53362 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53363 +#else
53364 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53365 +#endif
53366 +
53367 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53368 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53369
53370 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
53371 int set_page_dirty_lock(struct page *page);
53372 int clear_page_dirty_for_io(struct page *page);
53373
53374 -/* Is the vma a continuation of the stack vma above it? */
53375 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53376 -{
53377 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53378 -}
53379 -
53380 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
53381 - unsigned long addr)
53382 -{
53383 - return (vma->vm_flags & VM_GROWSDOWN) &&
53384 - (vma->vm_start == addr) &&
53385 - !vma_growsdown(vma->vm_prev, addr);
53386 -}
53387 -
53388 -/* Is the vma a continuation of the stack vma below it? */
53389 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53390 -{
53391 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53392 -}
53393 -
53394 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
53395 - unsigned long addr)
53396 -{
53397 - return (vma->vm_flags & VM_GROWSUP) &&
53398 - (vma->vm_end == addr) &&
53399 - !vma_growsup(vma->vm_next, addr);
53400 -}
53401 -
53402 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53403 unsigned long old_addr, struct vm_area_struct *new_vma,
53404 unsigned long new_addr, unsigned long len);
53405 @@ -1169,6 +1148,15 @@ struct shrinker {
53406 extern void register_shrinker(struct shrinker *);
53407 extern void unregister_shrinker(struct shrinker *);
53408
53409 +#ifdef CONFIG_MMU
53410 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
53411 +#else
53412 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
53413 +{
53414 + return __pgprot(0);
53415 +}
53416 +#endif
53417 +
53418 int vma_wants_writenotify(struct vm_area_struct *vma);
53419
53420 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53421 @@ -1452,6 +1440,7 @@ out:
53422 }
53423
53424 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53425 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53426
53427 extern unsigned long do_brk(unsigned long, unsigned long);
53428
53429 @@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
53430 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53431 struct vm_area_struct **pprev);
53432
53433 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53434 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53435 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53436 +
53437 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53438 NULL if none. Assume start_addr < end_addr. */
53439 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53440 @@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
53441 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53442 }
53443
53444 -#ifdef CONFIG_MMU
53445 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
53446 -#else
53447 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53448 -{
53449 - return __pgprot(0);
53450 -}
53451 -#endif
53452 -
53453 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53454 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53455 unsigned long pfn, unsigned long size, pgprot_t);
53456 @@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
53457 extern int sysctl_memory_failure_early_kill;
53458 extern int sysctl_memory_failure_recovery;
53459 extern void shake_page(struct page *p, int access);
53460 -extern atomic_long_t mce_bad_pages;
53461 +extern atomic_long_unchecked_t mce_bad_pages;
53462 extern int soft_offline_page(struct page *page, int flags);
53463
53464 extern void dump_page(struct page *page);
53465 @@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
53466 unsigned int pages_per_huge_page);
53467 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53468
53469 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53470 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53471 +#else
53472 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53473 +#endif
53474 +
53475 #endif /* __KERNEL__ */
53476 #endif /* _LINUX_MM_H */
53477 diff -urNp linux-3.0.4/include/linux/mm_types.h linux-3.0.4/include/linux/mm_types.h
53478 --- linux-3.0.4/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
53479 +++ linux-3.0.4/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
53480 @@ -184,6 +184,8 @@ struct vm_area_struct {
53481 #ifdef CONFIG_NUMA
53482 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53483 #endif
53484 +
53485 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53486 };
53487
53488 struct core_thread {
53489 @@ -316,6 +318,24 @@ struct mm_struct {
53490 #ifdef CONFIG_CPUMASK_OFFSTACK
53491 struct cpumask cpumask_allocation;
53492 #endif
53493 +
53494 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53495 + unsigned long pax_flags;
53496 +#endif
53497 +
53498 +#ifdef CONFIG_PAX_DLRESOLVE
53499 + unsigned long call_dl_resolve;
53500 +#endif
53501 +
53502 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53503 + unsigned long call_syscall;
53504 +#endif
53505 +
53506 +#ifdef CONFIG_PAX_ASLR
53507 + unsigned long delta_mmap; /* randomized offset */
53508 + unsigned long delta_stack; /* randomized offset */
53509 +#endif
53510 +
53511 };
53512
53513 static inline void mm_init_cpumask(struct mm_struct *mm)
53514 diff -urNp linux-3.0.4/include/linux/mmu_notifier.h linux-3.0.4/include/linux/mmu_notifier.h
53515 --- linux-3.0.4/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
53516 +++ linux-3.0.4/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
53517 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53518 */
53519 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53520 ({ \
53521 - pte_t __pte; \
53522 + pte_t ___pte; \
53523 struct vm_area_struct *___vma = __vma; \
53524 unsigned long ___address = __address; \
53525 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53526 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53527 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53528 - __pte; \
53529 + ___pte; \
53530 })
53531
53532 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53533 diff -urNp linux-3.0.4/include/linux/mmzone.h linux-3.0.4/include/linux/mmzone.h
53534 --- linux-3.0.4/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
53535 +++ linux-3.0.4/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
53536 @@ -350,7 +350,7 @@ struct zone {
53537 unsigned long flags; /* zone flags, see below */
53538
53539 /* Zone statistics */
53540 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53541 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53542
53543 /*
53544 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53545 diff -urNp linux-3.0.4/include/linux/mod_devicetable.h linux-3.0.4/include/linux/mod_devicetable.h
53546 --- linux-3.0.4/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
53547 +++ linux-3.0.4/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
53548 @@ -12,7 +12,7 @@
53549 typedef unsigned long kernel_ulong_t;
53550 #endif
53551
53552 -#define PCI_ANY_ID (~0)
53553 +#define PCI_ANY_ID ((__u16)~0)
53554
53555 struct pci_device_id {
53556 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53557 @@ -131,7 +131,7 @@ struct usb_device_id {
53558 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53559 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53560
53561 -#define HID_ANY_ID (~0)
53562 +#define HID_ANY_ID (~0U)
53563
53564 struct hid_device_id {
53565 __u16 bus;
53566 diff -urNp linux-3.0.4/include/linux/module.h linux-3.0.4/include/linux/module.h
53567 --- linux-3.0.4/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
53568 +++ linux-3.0.4/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
53569 @@ -16,6 +16,7 @@
53570 #include <linux/kobject.h>
53571 #include <linux/moduleparam.h>
53572 #include <linux/tracepoint.h>
53573 +#include <linux/fs.h>
53574
53575 #include <linux/percpu.h>
53576 #include <asm/module.h>
53577 @@ -325,19 +326,16 @@ struct module
53578 int (*init)(void);
53579
53580 /* If this is non-NULL, vfree after init() returns */
53581 - void *module_init;
53582 + void *module_init_rx, *module_init_rw;
53583
53584 /* Here is the actual code + data, vfree'd on unload. */
53585 - void *module_core;
53586 + void *module_core_rx, *module_core_rw;
53587
53588 /* Here are the sizes of the init and core sections */
53589 - unsigned int init_size, core_size;
53590 + unsigned int init_size_rw, core_size_rw;
53591
53592 /* The size of the executable code in each section. */
53593 - unsigned int init_text_size, core_text_size;
53594 -
53595 - /* Size of RO sections of the module (text+rodata) */
53596 - unsigned int init_ro_size, core_ro_size;
53597 + unsigned int init_size_rx, core_size_rx;
53598
53599 /* Arch-specific module values */
53600 struct mod_arch_specific arch;
53601 @@ -393,6 +391,10 @@ struct module
53602 #ifdef CONFIG_EVENT_TRACING
53603 struct ftrace_event_call **trace_events;
53604 unsigned int num_trace_events;
53605 + struct file_operations trace_id;
53606 + struct file_operations trace_enable;
53607 + struct file_operations trace_format;
53608 + struct file_operations trace_filter;
53609 #endif
53610 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53611 unsigned int num_ftrace_callsites;
53612 @@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
53613 bool is_module_percpu_address(unsigned long addr);
53614 bool is_module_text_address(unsigned long addr);
53615
53616 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53617 +{
53618 +
53619 +#ifdef CONFIG_PAX_KERNEXEC
53620 + if (ktla_ktva(addr) >= (unsigned long)start &&
53621 + ktla_ktva(addr) < (unsigned long)start + size)
53622 + return 1;
53623 +#endif
53624 +
53625 + return ((void *)addr >= start && (void *)addr < start + size);
53626 +}
53627 +
53628 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53629 +{
53630 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53631 +}
53632 +
53633 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53634 +{
53635 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53636 +}
53637 +
53638 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53639 +{
53640 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53641 +}
53642 +
53643 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53644 +{
53645 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53646 +}
53647 +
53648 static inline int within_module_core(unsigned long addr, struct module *mod)
53649 {
53650 - return (unsigned long)mod->module_core <= addr &&
53651 - addr < (unsigned long)mod->module_core + mod->core_size;
53652 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53653 }
53654
53655 static inline int within_module_init(unsigned long addr, struct module *mod)
53656 {
53657 - return (unsigned long)mod->module_init <= addr &&
53658 - addr < (unsigned long)mod->module_init + mod->init_size;
53659 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53660 }
53661
53662 /* Search for module by name: must hold module_mutex. */
53663 diff -urNp linux-3.0.4/include/linux/moduleloader.h linux-3.0.4/include/linux/moduleloader.h
53664 --- linux-3.0.4/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
53665 +++ linux-3.0.4/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
53666 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53667 sections. Returns NULL on failure. */
53668 void *module_alloc(unsigned long size);
53669
53670 +#ifdef CONFIG_PAX_KERNEXEC
53671 +void *module_alloc_exec(unsigned long size);
53672 +#else
53673 +#define module_alloc_exec(x) module_alloc(x)
53674 +#endif
53675 +
53676 /* Free memory returned from module_alloc. */
53677 void module_free(struct module *mod, void *module_region);
53678
53679 +#ifdef CONFIG_PAX_KERNEXEC
53680 +void module_free_exec(struct module *mod, void *module_region);
53681 +#else
53682 +#define module_free_exec(x, y) module_free((x), (y))
53683 +#endif
53684 +
53685 /* Apply the given relocation to the (simplified) ELF. Return -error
53686 or 0. */
53687 int apply_relocate(Elf_Shdr *sechdrs,
53688 diff -urNp linux-3.0.4/include/linux/moduleparam.h linux-3.0.4/include/linux/moduleparam.h
53689 --- linux-3.0.4/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
53690 +++ linux-3.0.4/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
53691 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53692 * @len is usually just sizeof(string).
53693 */
53694 #define module_param_string(name, string, len, perm) \
53695 - static const struct kparam_string __param_string_##name \
53696 + static const struct kparam_string __param_string_##name __used \
53697 = { len, string }; \
53698 __module_param_call(MODULE_PARAM_PREFIX, name, \
53699 &param_ops_string, \
53700 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53701 * module_param_named() for why this might be necessary.
53702 */
53703 #define module_param_array_named(name, array, type, nump, perm) \
53704 - static const struct kparam_array __param_arr_##name \
53705 + static const struct kparam_array __param_arr_##name __used \
53706 = { .max = ARRAY_SIZE(array), .num = nump, \
53707 .ops = &param_ops_##type, \
53708 .elemsize = sizeof(array[0]), .elem = array }; \
53709 diff -urNp linux-3.0.4/include/linux/namei.h linux-3.0.4/include/linux/namei.h
53710 --- linux-3.0.4/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
53711 +++ linux-3.0.4/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
53712 @@ -24,7 +24,7 @@ struct nameidata {
53713 unsigned seq;
53714 int last_type;
53715 unsigned depth;
53716 - char *saved_names[MAX_NESTED_LINKS + 1];
53717 + const char *saved_names[MAX_NESTED_LINKS + 1];
53718
53719 /* Intent data */
53720 union {
53721 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53722 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53723 extern void unlock_rename(struct dentry *, struct dentry *);
53724
53725 -static inline void nd_set_link(struct nameidata *nd, char *path)
53726 +static inline void nd_set_link(struct nameidata *nd, const char *path)
53727 {
53728 nd->saved_names[nd->depth] = path;
53729 }
53730
53731 -static inline char *nd_get_link(struct nameidata *nd)
53732 +static inline const char *nd_get_link(const struct nameidata *nd)
53733 {
53734 return nd->saved_names[nd->depth];
53735 }
53736 diff -urNp linux-3.0.4/include/linux/netdevice.h linux-3.0.4/include/linux/netdevice.h
53737 --- linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:44:40.000000000 -0400
53738 +++ linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
53739 @@ -979,6 +979,7 @@ struct net_device_ops {
53740 int (*ndo_set_features)(struct net_device *dev,
53741 u32 features);
53742 };
53743 +typedef struct net_device_ops __no_const net_device_ops_no_const;
53744
53745 /*
53746 * The DEVICE structure.
53747 diff -urNp linux-3.0.4/include/linux/netfilter/xt_gradm.h linux-3.0.4/include/linux/netfilter/xt_gradm.h
53748 --- linux-3.0.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53749 +++ linux-3.0.4/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
53750 @@ -0,0 +1,9 @@
53751 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
53752 +#define _LINUX_NETFILTER_XT_GRADM_H 1
53753 +
53754 +struct xt_gradm_mtinfo {
53755 + __u16 flags;
53756 + __u16 invflags;
53757 +};
53758 +
53759 +#endif
53760 diff -urNp linux-3.0.4/include/linux/oprofile.h linux-3.0.4/include/linux/oprofile.h
53761 --- linux-3.0.4/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
53762 +++ linux-3.0.4/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
53763 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53764 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53765 char const * name, ulong * val);
53766
53767 -/** Create a file for read-only access to an atomic_t. */
53768 +/** Create a file for read-only access to an atomic_unchecked_t. */
53769 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53770 - char const * name, atomic_t * val);
53771 + char const * name, atomic_unchecked_t * val);
53772
53773 /** create a directory */
53774 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53775 diff -urNp linux-3.0.4/include/linux/padata.h linux-3.0.4/include/linux/padata.h
53776 --- linux-3.0.4/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
53777 +++ linux-3.0.4/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
53778 @@ -129,7 +129,7 @@ struct parallel_data {
53779 struct padata_instance *pinst;
53780 struct padata_parallel_queue __percpu *pqueue;
53781 struct padata_serial_queue __percpu *squeue;
53782 - atomic_t seq_nr;
53783 + atomic_unchecked_t seq_nr;
53784 atomic_t reorder_objects;
53785 atomic_t refcnt;
53786 unsigned int max_seq_nr;
53787 diff -urNp linux-3.0.4/include/linux/perf_event.h linux-3.0.4/include/linux/perf_event.h
53788 --- linux-3.0.4/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
53789 +++ linux-3.0.4/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
53790 @@ -761,8 +761,8 @@ struct perf_event {
53791
53792 enum perf_event_active_state state;
53793 unsigned int attach_state;
53794 - local64_t count;
53795 - atomic64_t child_count;
53796 + local64_t count; /* PaX: fix it one day */
53797 + atomic64_unchecked_t child_count;
53798
53799 /*
53800 * These are the total time in nanoseconds that the event
53801 @@ -813,8 +813,8 @@ struct perf_event {
53802 * These accumulate total time (in nanoseconds) that children
53803 * events have been enabled and running, respectively.
53804 */
53805 - atomic64_t child_total_time_enabled;
53806 - atomic64_t child_total_time_running;
53807 + atomic64_unchecked_t child_total_time_enabled;
53808 + atomic64_unchecked_t child_total_time_running;
53809
53810 /*
53811 * Protect attach/detach and child_list:
53812 diff -urNp linux-3.0.4/include/linux/pipe_fs_i.h linux-3.0.4/include/linux/pipe_fs_i.h
53813 --- linux-3.0.4/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
53814 +++ linux-3.0.4/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
53815 @@ -46,9 +46,9 @@ struct pipe_buffer {
53816 struct pipe_inode_info {
53817 wait_queue_head_t wait;
53818 unsigned int nrbufs, curbuf, buffers;
53819 - unsigned int readers;
53820 - unsigned int writers;
53821 - unsigned int waiting_writers;
53822 + atomic_t readers;
53823 + atomic_t writers;
53824 + atomic_t waiting_writers;
53825 unsigned int r_counter;
53826 unsigned int w_counter;
53827 struct page *tmp_page;
53828 diff -urNp linux-3.0.4/include/linux/pm_runtime.h linux-3.0.4/include/linux/pm_runtime.h
53829 --- linux-3.0.4/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
53830 +++ linux-3.0.4/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
53831 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
53832
53833 static inline void pm_runtime_mark_last_busy(struct device *dev)
53834 {
53835 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
53836 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
53837 }
53838
53839 #else /* !CONFIG_PM_RUNTIME */
53840 diff -urNp linux-3.0.4/include/linux/poison.h linux-3.0.4/include/linux/poison.h
53841 --- linux-3.0.4/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
53842 +++ linux-3.0.4/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
53843 @@ -19,8 +19,8 @@
53844 * under normal circumstances, used to verify that nobody uses
53845 * non-initialized list entries.
53846 */
53847 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
53848 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
53849 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
53850 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
53851
53852 /********** include/linux/timer.h **********/
53853 /*
53854 diff -urNp linux-3.0.4/include/linux/preempt.h linux-3.0.4/include/linux/preempt.h
53855 --- linux-3.0.4/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
53856 +++ linux-3.0.4/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
53857 @@ -115,7 +115,7 @@ struct preempt_ops {
53858 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
53859 void (*sched_out)(struct preempt_notifier *notifier,
53860 struct task_struct *next);
53861 -};
53862 +} __no_const;
53863
53864 /**
53865 * preempt_notifier - key for installing preemption notifiers
53866 diff -urNp linux-3.0.4/include/linux/proc_fs.h linux-3.0.4/include/linux/proc_fs.h
53867 --- linux-3.0.4/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
53868 +++ linux-3.0.4/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
53869 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
53870 return proc_create_data(name, mode, parent, proc_fops, NULL);
53871 }
53872
53873 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
53874 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
53875 +{
53876 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53877 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
53878 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53879 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
53880 +#else
53881 + return proc_create_data(name, mode, parent, proc_fops, NULL);
53882 +#endif
53883 +}
53884 +
53885 +
53886 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
53887 mode_t mode, struct proc_dir_entry *base,
53888 read_proc_t *read_proc, void * data)
53889 @@ -258,7 +271,7 @@ union proc_op {
53890 int (*proc_show)(struct seq_file *m,
53891 struct pid_namespace *ns, struct pid *pid,
53892 struct task_struct *task);
53893 -};
53894 +} __no_const;
53895
53896 struct ctl_table_header;
53897 struct ctl_table;
53898 diff -urNp linux-3.0.4/include/linux/ptrace.h linux-3.0.4/include/linux/ptrace.h
53899 --- linux-3.0.4/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
53900 +++ linux-3.0.4/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
53901 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
53902 extern void exit_ptrace(struct task_struct *tracer);
53903 #define PTRACE_MODE_READ 1
53904 #define PTRACE_MODE_ATTACH 2
53905 -/* Returns 0 on success, -errno on denial. */
53906 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
53907 /* Returns true on success, false on denial. */
53908 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
53909 +/* Returns true on success, false on denial. */
53910 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
53911
53912 static inline int ptrace_reparented(struct task_struct *child)
53913 {
53914 diff -urNp linux-3.0.4/include/linux/random.h linux-3.0.4/include/linux/random.h
53915 --- linux-3.0.4/include/linux/random.h 2011-08-23 21:44:40.000000000 -0400
53916 +++ linux-3.0.4/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
53917 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
53918
53919 u32 prandom32(struct rnd_state *);
53920
53921 +static inline unsigned long pax_get_random_long(void)
53922 +{
53923 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
53924 +}
53925 +
53926 /*
53927 * Handle minimum values for seeds
53928 */
53929 static inline u32 __seed(u32 x, u32 m)
53930 {
53931 - return (x < m) ? x + m : x;
53932 + return (x <= m) ? x + m + 1 : x;
53933 }
53934
53935 /**
53936 diff -urNp linux-3.0.4/include/linux/reboot.h linux-3.0.4/include/linux/reboot.h
53937 --- linux-3.0.4/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
53938 +++ linux-3.0.4/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
53939 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
53940 * Architecture-specific implementations of sys_reboot commands.
53941 */
53942
53943 -extern void machine_restart(char *cmd);
53944 -extern void machine_halt(void);
53945 -extern void machine_power_off(void);
53946 +extern void machine_restart(char *cmd) __noreturn;
53947 +extern void machine_halt(void) __noreturn;
53948 +extern void machine_power_off(void) __noreturn;
53949
53950 extern void machine_shutdown(void);
53951 struct pt_regs;
53952 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
53953 */
53954
53955 extern void kernel_restart_prepare(char *cmd);
53956 -extern void kernel_restart(char *cmd);
53957 -extern void kernel_halt(void);
53958 -extern void kernel_power_off(void);
53959 +extern void kernel_restart(char *cmd) __noreturn;
53960 +extern void kernel_halt(void) __noreturn;
53961 +extern void kernel_power_off(void) __noreturn;
53962
53963 extern int C_A_D; /* for sysctl */
53964 void ctrl_alt_del(void);
53965 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
53966 * Emergency restart, callable from an interrupt handler.
53967 */
53968
53969 -extern void emergency_restart(void);
53970 +extern void emergency_restart(void) __noreturn;
53971 #include <asm/emergency-restart.h>
53972
53973 #endif
53974 diff -urNp linux-3.0.4/include/linux/reiserfs_fs.h linux-3.0.4/include/linux/reiserfs_fs.h
53975 --- linux-3.0.4/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
53976 +++ linux-3.0.4/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
53977 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
53978 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
53979
53980 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
53981 -#define get_generation(s) atomic_read (&fs_generation(s))
53982 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
53983 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
53984 #define __fs_changed(gen,s) (gen != get_generation (s))
53985 #define fs_changed(gen,s) \
53986 diff -urNp linux-3.0.4/include/linux/reiserfs_fs_sb.h linux-3.0.4/include/linux/reiserfs_fs_sb.h
53987 --- linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
53988 +++ linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
53989 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
53990 /* Comment? -Hans */
53991 wait_queue_head_t s_wait;
53992 /* To be obsoleted soon by per buffer seals.. -Hans */
53993 - atomic_t s_generation_counter; // increased by one every time the
53994 + atomic_unchecked_t s_generation_counter; // increased by one every time the
53995 // tree gets re-balanced
53996 unsigned long s_properties; /* File system properties. Currently holds
53997 on-disk FS format */
53998 diff -urNp linux-3.0.4/include/linux/relay.h linux-3.0.4/include/linux/relay.h
53999 --- linux-3.0.4/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
54000 +++ linux-3.0.4/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
54001 @@ -159,7 +159,7 @@ struct rchan_callbacks
54002 * The callback should return 0 if successful, negative if not.
54003 */
54004 int (*remove_buf_file)(struct dentry *dentry);
54005 -};
54006 +} __no_const;
54007
54008 /*
54009 * CONFIG_RELAY kernel API, kernel/relay.c
54010 diff -urNp linux-3.0.4/include/linux/rfkill.h linux-3.0.4/include/linux/rfkill.h
54011 --- linux-3.0.4/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
54012 +++ linux-3.0.4/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
54013 @@ -147,6 +147,7 @@ struct rfkill_ops {
54014 void (*query)(struct rfkill *rfkill, void *data);
54015 int (*set_block)(void *data, bool blocked);
54016 };
54017 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54018
54019 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54020 /**
54021 diff -urNp linux-3.0.4/include/linux/rmap.h linux-3.0.4/include/linux/rmap.h
54022 --- linux-3.0.4/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
54023 +++ linux-3.0.4/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
54024 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54025 void anon_vma_init(void); /* create anon_vma_cachep */
54026 int anon_vma_prepare(struct vm_area_struct *);
54027 void unlink_anon_vmas(struct vm_area_struct *);
54028 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54029 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54030 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54031 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54032 void __anon_vma_link(struct vm_area_struct *);
54033
54034 static inline void anon_vma_merge(struct vm_area_struct *vma,
54035 diff -urNp linux-3.0.4/include/linux/sched.h linux-3.0.4/include/linux/sched.h
54036 --- linux-3.0.4/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
54037 +++ linux-3.0.4/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
54038 @@ -100,6 +100,7 @@ struct bio_list;
54039 struct fs_struct;
54040 struct perf_event_context;
54041 struct blk_plug;
54042 +struct linux_binprm;
54043
54044 /*
54045 * List of flags we want to share for kernel threads,
54046 @@ -380,10 +381,13 @@ struct user_namespace;
54047 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54048
54049 extern int sysctl_max_map_count;
54050 +extern unsigned long sysctl_heap_stack_gap;
54051
54052 #include <linux/aio.h>
54053
54054 #ifdef CONFIG_MMU
54055 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54056 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54057 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54058 extern unsigned long
54059 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54060 @@ -629,6 +633,17 @@ struct signal_struct {
54061 #ifdef CONFIG_TASKSTATS
54062 struct taskstats *stats;
54063 #endif
54064 +
54065 +#ifdef CONFIG_GRKERNSEC
54066 + u32 curr_ip;
54067 + u32 saved_ip;
54068 + u32 gr_saddr;
54069 + u32 gr_daddr;
54070 + u16 gr_sport;
54071 + u16 gr_dport;
54072 + u8 used_accept:1;
54073 +#endif
54074 +
54075 #ifdef CONFIG_AUDIT
54076 unsigned audit_tty;
54077 struct tty_audit_buf *tty_audit_buf;
54078 @@ -710,6 +725,11 @@ struct user_struct {
54079 struct key *session_keyring; /* UID's default session keyring */
54080 #endif
54081
54082 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54083 + unsigned int banned;
54084 + unsigned long ban_expires;
54085 +#endif
54086 +
54087 /* Hash table maintenance information */
54088 struct hlist_node uidhash_node;
54089 uid_t uid;
54090 @@ -1340,8 +1360,8 @@ struct task_struct {
54091 struct list_head thread_group;
54092
54093 struct completion *vfork_done; /* for vfork() */
54094 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54095 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54096 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54097 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54098
54099 cputime_t utime, stime, utimescaled, stimescaled;
54100 cputime_t gtime;
54101 @@ -1357,13 +1377,6 @@ struct task_struct {
54102 struct task_cputime cputime_expires;
54103 struct list_head cpu_timers[3];
54104
54105 -/* process credentials */
54106 - const struct cred __rcu *real_cred; /* objective and real subjective task
54107 - * credentials (COW) */
54108 - const struct cred __rcu *cred; /* effective (overridable) subjective task
54109 - * credentials (COW) */
54110 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54111 -
54112 char comm[TASK_COMM_LEN]; /* executable name excluding path
54113 - access with [gs]et_task_comm (which lock
54114 it with task_lock())
54115 @@ -1380,8 +1393,16 @@ struct task_struct {
54116 #endif
54117 /* CPU-specific state of this task */
54118 struct thread_struct thread;
54119 +/* thread_info moved to task_struct */
54120 +#ifdef CONFIG_X86
54121 + struct thread_info tinfo;
54122 +#endif
54123 /* filesystem information */
54124 struct fs_struct *fs;
54125 +
54126 + const struct cred __rcu *cred; /* effective (overridable) subjective task
54127 + * credentials (COW) */
54128 +
54129 /* open file information */
54130 struct files_struct *files;
54131 /* namespaces */
54132 @@ -1428,6 +1449,11 @@ struct task_struct {
54133 struct rt_mutex_waiter *pi_blocked_on;
54134 #endif
54135
54136 +/* process credentials */
54137 + const struct cred __rcu *real_cred; /* objective and real subjective task
54138 + * credentials (COW) */
54139 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54140 +
54141 #ifdef CONFIG_DEBUG_MUTEXES
54142 /* mutex deadlock detection */
54143 struct mutex_waiter *blocked_on;
54144 @@ -1538,6 +1564,21 @@ struct task_struct {
54145 unsigned long default_timer_slack_ns;
54146
54147 struct list_head *scm_work_list;
54148 +
54149 +#ifdef CONFIG_GRKERNSEC
54150 + /* grsecurity */
54151 + struct dentry *gr_chroot_dentry;
54152 + struct acl_subject_label *acl;
54153 + struct acl_role_label *role;
54154 + struct file *exec_file;
54155 + u16 acl_role_id;
54156 + /* is this the task that authenticated to the special role */
54157 + u8 acl_sp_role;
54158 + u8 is_writable;
54159 + u8 brute;
54160 + u8 gr_is_chrooted;
54161 +#endif
54162 +
54163 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54164 /* Index of current stored address in ret_stack */
54165 int curr_ret_stack;
54166 @@ -1572,6 +1613,57 @@ struct task_struct {
54167 #endif
54168 };
54169
54170 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54171 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54172 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54173 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54174 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54175 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54176 +
54177 +#ifdef CONFIG_PAX_SOFTMODE
54178 +extern int pax_softmode;
54179 +#endif
54180 +
54181 +extern int pax_check_flags(unsigned long *);
54182 +
54183 +/* if tsk != current then task_lock must be held on it */
54184 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54185 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
54186 +{
54187 + if (likely(tsk->mm))
54188 + return tsk->mm->pax_flags;
54189 + else
54190 + return 0UL;
54191 +}
54192 +
54193 +/* if tsk != current then task_lock must be held on it */
54194 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54195 +{
54196 + if (likely(tsk->mm)) {
54197 + tsk->mm->pax_flags = flags;
54198 + return 0;
54199 + }
54200 + return -EINVAL;
54201 +}
54202 +#endif
54203 +
54204 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54205 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
54206 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54207 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54208 +#endif
54209 +
54210 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54211 +extern void pax_report_insns(void *pc, void *sp);
54212 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
54213 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54214 +
54215 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54216 +extern void pax_track_stack(void);
54217 +#else
54218 +static inline void pax_track_stack(void) {}
54219 +#endif
54220 +
54221 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54222 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54223
54224 @@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
54225 #define PF_DUMPCORE 0x00000200 /* dumped core */
54226 #define PF_SIGNALED 0x00000400 /* killed by a signal */
54227 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
54228 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
54229 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
54230 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
54231 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
54232 @@ -2056,7 +2149,9 @@ void yield(void);
54233 extern struct exec_domain default_exec_domain;
54234
54235 union thread_union {
54236 +#ifndef CONFIG_X86
54237 struct thread_info thread_info;
54238 +#endif
54239 unsigned long stack[THREAD_SIZE/sizeof(long)];
54240 };
54241
54242 @@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
54243 */
54244
54245 extern struct task_struct *find_task_by_vpid(pid_t nr);
54246 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54247 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54248 struct pid_namespace *ns);
54249
54250 @@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
54251 extern void exit_itimers(struct signal_struct *);
54252 extern void flush_itimer_signals(void);
54253
54254 -extern NORET_TYPE void do_group_exit(int);
54255 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54256
54257 extern void daemonize(const char *, ...);
54258 extern int allow_signal(int);
54259 @@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
54260
54261 #endif
54262
54263 -static inline int object_is_on_stack(void *obj)
54264 +static inline int object_starts_on_stack(void *obj)
54265 {
54266 - void *stack = task_stack_page(current);
54267 + const void *stack = task_stack_page(current);
54268
54269 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54270 }
54271
54272 +#ifdef CONFIG_PAX_USERCOPY
54273 +extern int object_is_on_stack(const void *obj, unsigned long len);
54274 +#endif
54275 +
54276 extern void thread_info_cache_init(void);
54277
54278 #ifdef CONFIG_DEBUG_STACK_USAGE
54279 diff -urNp linux-3.0.4/include/linux/screen_info.h linux-3.0.4/include/linux/screen_info.h
54280 --- linux-3.0.4/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
54281 +++ linux-3.0.4/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
54282 @@ -43,7 +43,8 @@ struct screen_info {
54283 __u16 pages; /* 0x32 */
54284 __u16 vesa_attributes; /* 0x34 */
54285 __u32 capabilities; /* 0x36 */
54286 - __u8 _reserved[6]; /* 0x3a */
54287 + __u16 vesapm_size; /* 0x3a */
54288 + __u8 _reserved[4]; /* 0x3c */
54289 } __attribute__((packed));
54290
54291 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54292 diff -urNp linux-3.0.4/include/linux/security.h linux-3.0.4/include/linux/security.h
54293 --- linux-3.0.4/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
54294 +++ linux-3.0.4/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
54295 @@ -36,6 +36,7 @@
54296 #include <linux/key.h>
54297 #include <linux/xfrm.h>
54298 #include <linux/slab.h>
54299 +#include <linux/grsecurity.h>
54300 #include <net/flow.h>
54301
54302 /* Maximum number of letters for an LSM name string */
54303 diff -urNp linux-3.0.4/include/linux/seq_file.h linux-3.0.4/include/linux/seq_file.h
54304 --- linux-3.0.4/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
54305 +++ linux-3.0.4/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
54306 @@ -32,6 +32,7 @@ struct seq_operations {
54307 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54308 int (*show) (struct seq_file *m, void *v);
54309 };
54310 +typedef struct seq_operations __no_const seq_operations_no_const;
54311
54312 #define SEQ_SKIP 1
54313
54314 diff -urNp linux-3.0.4/include/linux/shmem_fs.h linux-3.0.4/include/linux/shmem_fs.h
54315 --- linux-3.0.4/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
54316 +++ linux-3.0.4/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
54317 @@ -10,7 +10,7 @@
54318
54319 #define SHMEM_NR_DIRECT 16
54320
54321 -#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
54322 +#define SHMEM_SYMLINK_INLINE_LEN 64
54323
54324 struct shmem_inode_info {
54325 spinlock_t lock;
54326 diff -urNp linux-3.0.4/include/linux/shm.h linux-3.0.4/include/linux/shm.h
54327 --- linux-3.0.4/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
54328 +++ linux-3.0.4/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
54329 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54330 pid_t shm_cprid;
54331 pid_t shm_lprid;
54332 struct user_struct *mlock_user;
54333 +#ifdef CONFIG_GRKERNSEC
54334 + time_t shm_createtime;
54335 + pid_t shm_lapid;
54336 +#endif
54337 };
54338
54339 /* shm_mode upper byte flags */
54340 diff -urNp linux-3.0.4/include/linux/skbuff.h linux-3.0.4/include/linux/skbuff.h
54341 --- linux-3.0.4/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
54342 +++ linux-3.0.4/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
54343 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54344 */
54345 static inline int skb_queue_empty(const struct sk_buff_head *list)
54346 {
54347 - return list->next == (struct sk_buff *)list;
54348 + return list->next == (const struct sk_buff *)list;
54349 }
54350
54351 /**
54352 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54353 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54354 const struct sk_buff *skb)
54355 {
54356 - return skb->next == (struct sk_buff *)list;
54357 + return skb->next == (const struct sk_buff *)list;
54358 }
54359
54360 /**
54361 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54362 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54363 const struct sk_buff *skb)
54364 {
54365 - return skb->prev == (struct sk_buff *)list;
54366 + return skb->prev == (const struct sk_buff *)list;
54367 }
54368
54369 /**
54370 @@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
54371 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54372 */
54373 #ifndef NET_SKB_PAD
54374 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54375 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54376 #endif
54377
54378 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54379 diff -urNp linux-3.0.4/include/linux/slab_def.h linux-3.0.4/include/linux/slab_def.h
54380 --- linux-3.0.4/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
54381 +++ linux-3.0.4/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
54382 @@ -96,10 +96,10 @@ struct kmem_cache {
54383 unsigned long node_allocs;
54384 unsigned long node_frees;
54385 unsigned long node_overflow;
54386 - atomic_t allochit;
54387 - atomic_t allocmiss;
54388 - atomic_t freehit;
54389 - atomic_t freemiss;
54390 + atomic_unchecked_t allochit;
54391 + atomic_unchecked_t allocmiss;
54392 + atomic_unchecked_t freehit;
54393 + atomic_unchecked_t freemiss;
54394
54395 /*
54396 * If debugging is enabled, then the allocator can add additional
54397 diff -urNp linux-3.0.4/include/linux/slab.h linux-3.0.4/include/linux/slab.h
54398 --- linux-3.0.4/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
54399 +++ linux-3.0.4/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
54400 @@ -11,12 +11,20 @@
54401
54402 #include <linux/gfp.h>
54403 #include <linux/types.h>
54404 +#include <linux/err.h>
54405
54406 /*
54407 * Flags to pass to kmem_cache_create().
54408 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54409 */
54410 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54411 +
54412 +#ifdef CONFIG_PAX_USERCOPY
54413 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54414 +#else
54415 +#define SLAB_USERCOPY 0x00000000UL
54416 +#endif
54417 +
54418 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54419 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54420 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54421 @@ -87,10 +95,13 @@
54422 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54423 * Both make kfree a no-op.
54424 */
54425 -#define ZERO_SIZE_PTR ((void *)16)
54426 +#define ZERO_SIZE_PTR \
54427 +({ \
54428 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54429 + (void *)(-MAX_ERRNO-1L); \
54430 +})
54431
54432 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54433 - (unsigned long)ZERO_SIZE_PTR)
54434 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54435
54436 /*
54437 * struct kmem_cache related prototypes
54438 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54439 void kfree(const void *);
54440 void kzfree(const void *);
54441 size_t ksize(const void *);
54442 +void check_object_size(const void *ptr, unsigned long n, bool to);
54443
54444 /*
54445 * Allocator specific definitions. These are mainly used to establish optimized
54446 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54447
54448 void __init kmem_cache_init_late(void);
54449
54450 +#define kmalloc(x, y) \
54451 +({ \
54452 + void *___retval; \
54453 + intoverflow_t ___x = (intoverflow_t)x; \
54454 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54455 + ___retval = NULL; \
54456 + else \
54457 + ___retval = kmalloc((size_t)___x, (y)); \
54458 + ___retval; \
54459 +})
54460 +
54461 +#define kmalloc_node(x, y, z) \
54462 +({ \
54463 + void *___retval; \
54464 + intoverflow_t ___x = (intoverflow_t)x; \
54465 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54466 + ___retval = NULL; \
54467 + else \
54468 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
54469 + ___retval; \
54470 +})
54471 +
54472 +#define kzalloc(x, y) \
54473 +({ \
54474 + void *___retval; \
54475 + intoverflow_t ___x = (intoverflow_t)x; \
54476 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54477 + ___retval = NULL; \
54478 + else \
54479 + ___retval = kzalloc((size_t)___x, (y)); \
54480 + ___retval; \
54481 +})
54482 +
54483 +#define __krealloc(x, y, z) \
54484 +({ \
54485 + void *___retval; \
54486 + intoverflow_t ___y = (intoverflow_t)y; \
54487 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54488 + ___retval = NULL; \
54489 + else \
54490 + ___retval = __krealloc((x), (size_t)___y, (z)); \
54491 + ___retval; \
54492 +})
54493 +
54494 +#define krealloc(x, y, z) \
54495 +({ \
54496 + void *___retval; \
54497 + intoverflow_t ___y = (intoverflow_t)y; \
54498 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54499 + ___retval = NULL; \
54500 + else \
54501 + ___retval = krealloc((x), (size_t)___y, (z)); \
54502 + ___retval; \
54503 +})
54504 +
54505 #endif /* _LINUX_SLAB_H */
54506 diff -urNp linux-3.0.4/include/linux/slub_def.h linux-3.0.4/include/linux/slub_def.h
54507 --- linux-3.0.4/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
54508 +++ linux-3.0.4/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
54509 @@ -82,7 +82,7 @@ struct kmem_cache {
54510 struct kmem_cache_order_objects max;
54511 struct kmem_cache_order_objects min;
54512 gfp_t allocflags; /* gfp flags to use on each alloc */
54513 - int refcount; /* Refcount for slab cache destroy */
54514 + atomic_t refcount; /* Refcount for slab cache destroy */
54515 void (*ctor)(void *);
54516 int inuse; /* Offset to metadata */
54517 int align; /* Alignment */
54518 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54519 }
54520
54521 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54522 -void *__kmalloc(size_t size, gfp_t flags);
54523 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54524
54525 static __always_inline void *
54526 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54527 diff -urNp linux-3.0.4/include/linux/sonet.h linux-3.0.4/include/linux/sonet.h
54528 --- linux-3.0.4/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
54529 +++ linux-3.0.4/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
54530 @@ -61,7 +61,7 @@ struct sonet_stats {
54531 #include <asm/atomic.h>
54532
54533 struct k_sonet_stats {
54534 -#define __HANDLE_ITEM(i) atomic_t i
54535 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54536 __SONET_ITEMS
54537 #undef __HANDLE_ITEM
54538 };
54539 diff -urNp linux-3.0.4/include/linux/sunrpc/clnt.h linux-3.0.4/include/linux/sunrpc/clnt.h
54540 --- linux-3.0.4/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
54541 +++ linux-3.0.4/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
54542 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54543 {
54544 switch (sap->sa_family) {
54545 case AF_INET:
54546 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
54547 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54548 case AF_INET6:
54549 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54550 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54551 }
54552 return 0;
54553 }
54554 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54555 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54556 const struct sockaddr *src)
54557 {
54558 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54559 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54560 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54561
54562 dsin->sin_family = ssin->sin_family;
54563 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54564 if (sa->sa_family != AF_INET6)
54565 return 0;
54566
54567 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54568 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54569 }
54570
54571 #endif /* __KERNEL__ */
54572 diff -urNp linux-3.0.4/include/linux/sunrpc/svc_rdma.h linux-3.0.4/include/linux/sunrpc/svc_rdma.h
54573 --- linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
54574 +++ linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
54575 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54576 extern unsigned int svcrdma_max_requests;
54577 extern unsigned int svcrdma_max_req_size;
54578
54579 -extern atomic_t rdma_stat_recv;
54580 -extern atomic_t rdma_stat_read;
54581 -extern atomic_t rdma_stat_write;
54582 -extern atomic_t rdma_stat_sq_starve;
54583 -extern atomic_t rdma_stat_rq_starve;
54584 -extern atomic_t rdma_stat_rq_poll;
54585 -extern atomic_t rdma_stat_rq_prod;
54586 -extern atomic_t rdma_stat_sq_poll;
54587 -extern atomic_t rdma_stat_sq_prod;
54588 +extern atomic_unchecked_t rdma_stat_recv;
54589 +extern atomic_unchecked_t rdma_stat_read;
54590 +extern atomic_unchecked_t rdma_stat_write;
54591 +extern atomic_unchecked_t rdma_stat_sq_starve;
54592 +extern atomic_unchecked_t rdma_stat_rq_starve;
54593 +extern atomic_unchecked_t rdma_stat_rq_poll;
54594 +extern atomic_unchecked_t rdma_stat_rq_prod;
54595 +extern atomic_unchecked_t rdma_stat_sq_poll;
54596 +extern atomic_unchecked_t rdma_stat_sq_prod;
54597
54598 #define RPCRDMA_VERSION 1
54599
54600 diff -urNp linux-3.0.4/include/linux/sysctl.h linux-3.0.4/include/linux/sysctl.h
54601 --- linux-3.0.4/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
54602 +++ linux-3.0.4/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
54603 @@ -155,7 +155,11 @@ enum
54604 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54605 };
54606
54607 -
54608 +#ifdef CONFIG_PAX_SOFTMODE
54609 +enum {
54610 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54611 +};
54612 +#endif
54613
54614 /* CTL_VM names: */
54615 enum
54616 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54617
54618 extern int proc_dostring(struct ctl_table *, int,
54619 void __user *, size_t *, loff_t *);
54620 +extern int proc_dostring_modpriv(struct ctl_table *, int,
54621 + void __user *, size_t *, loff_t *);
54622 extern int proc_dointvec(struct ctl_table *, int,
54623 void __user *, size_t *, loff_t *);
54624 extern int proc_dointvec_minmax(struct ctl_table *, int,
54625 diff -urNp linux-3.0.4/include/linux/tty_ldisc.h linux-3.0.4/include/linux/tty_ldisc.h
54626 --- linux-3.0.4/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
54627 +++ linux-3.0.4/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
54628 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54629
54630 struct module *owner;
54631
54632 - int refcount;
54633 + atomic_t refcount;
54634 };
54635
54636 struct tty_ldisc {
54637 diff -urNp linux-3.0.4/include/linux/types.h linux-3.0.4/include/linux/types.h
54638 --- linux-3.0.4/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
54639 +++ linux-3.0.4/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
54640 @@ -213,10 +213,26 @@ typedef struct {
54641 int counter;
54642 } atomic_t;
54643
54644 +#ifdef CONFIG_PAX_REFCOUNT
54645 +typedef struct {
54646 + int counter;
54647 +} atomic_unchecked_t;
54648 +#else
54649 +typedef atomic_t atomic_unchecked_t;
54650 +#endif
54651 +
54652 #ifdef CONFIG_64BIT
54653 typedef struct {
54654 long counter;
54655 } atomic64_t;
54656 +
54657 +#ifdef CONFIG_PAX_REFCOUNT
54658 +typedef struct {
54659 + long counter;
54660 +} atomic64_unchecked_t;
54661 +#else
54662 +typedef atomic64_t atomic64_unchecked_t;
54663 +#endif
54664 #endif
54665
54666 struct list_head {
54667 diff -urNp linux-3.0.4/include/linux/uaccess.h linux-3.0.4/include/linux/uaccess.h
54668 --- linux-3.0.4/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
54669 +++ linux-3.0.4/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
54670 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54671 long ret; \
54672 mm_segment_t old_fs = get_fs(); \
54673 \
54674 - set_fs(KERNEL_DS); \
54675 pagefault_disable(); \
54676 + set_fs(KERNEL_DS); \
54677 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54678 - pagefault_enable(); \
54679 set_fs(old_fs); \
54680 + pagefault_enable(); \
54681 ret; \
54682 })
54683
54684 diff -urNp linux-3.0.4/include/linux/unaligned/access_ok.h linux-3.0.4/include/linux/unaligned/access_ok.h
54685 --- linux-3.0.4/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
54686 +++ linux-3.0.4/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
54687 @@ -6,32 +6,32 @@
54688
54689 static inline u16 get_unaligned_le16(const void *p)
54690 {
54691 - return le16_to_cpup((__le16 *)p);
54692 + return le16_to_cpup((const __le16 *)p);
54693 }
54694
54695 static inline u32 get_unaligned_le32(const void *p)
54696 {
54697 - return le32_to_cpup((__le32 *)p);
54698 + return le32_to_cpup((const __le32 *)p);
54699 }
54700
54701 static inline u64 get_unaligned_le64(const void *p)
54702 {
54703 - return le64_to_cpup((__le64 *)p);
54704 + return le64_to_cpup((const __le64 *)p);
54705 }
54706
54707 static inline u16 get_unaligned_be16(const void *p)
54708 {
54709 - return be16_to_cpup((__be16 *)p);
54710 + return be16_to_cpup((const __be16 *)p);
54711 }
54712
54713 static inline u32 get_unaligned_be32(const void *p)
54714 {
54715 - return be32_to_cpup((__be32 *)p);
54716 + return be32_to_cpup((const __be32 *)p);
54717 }
54718
54719 static inline u64 get_unaligned_be64(const void *p)
54720 {
54721 - return be64_to_cpup((__be64 *)p);
54722 + return be64_to_cpup((const __be64 *)p);
54723 }
54724
54725 static inline void put_unaligned_le16(u16 val, void *p)
54726 diff -urNp linux-3.0.4/include/linux/vmalloc.h linux-3.0.4/include/linux/vmalloc.h
54727 --- linux-3.0.4/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
54728 +++ linux-3.0.4/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
54729 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54730 #define VM_MAP 0x00000004 /* vmap()ed pages */
54731 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54732 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54733 +
54734 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54735 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54736 +#endif
54737 +
54738 /* bits [20..32] reserved for arch specific ioremap internals */
54739
54740 /*
54741 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54742 # endif
54743 #endif
54744
54745 +#define vmalloc(x) \
54746 +({ \
54747 + void *___retval; \
54748 + intoverflow_t ___x = (intoverflow_t)x; \
54749 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54750 + ___retval = NULL; \
54751 + else \
54752 + ___retval = vmalloc((unsigned long)___x); \
54753 + ___retval; \
54754 +})
54755 +
54756 +#define vzalloc(x) \
54757 +({ \
54758 + void *___retval; \
54759 + intoverflow_t ___x = (intoverflow_t)x; \
54760 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54761 + ___retval = NULL; \
54762 + else \
54763 + ___retval = vzalloc((unsigned long)___x); \
54764 + ___retval; \
54765 +})
54766 +
54767 +#define __vmalloc(x, y, z) \
54768 +({ \
54769 + void *___retval; \
54770 + intoverflow_t ___x = (intoverflow_t)x; \
54771 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54772 + ___retval = NULL; \
54773 + else \
54774 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54775 + ___retval; \
54776 +})
54777 +
54778 +#define vmalloc_user(x) \
54779 +({ \
54780 + void *___retval; \
54781 + intoverflow_t ___x = (intoverflow_t)x; \
54782 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54783 + ___retval = NULL; \
54784 + else \
54785 + ___retval = vmalloc_user((unsigned long)___x); \
54786 + ___retval; \
54787 +})
54788 +
54789 +#define vmalloc_exec(x) \
54790 +({ \
54791 + void *___retval; \
54792 + intoverflow_t ___x = (intoverflow_t)x; \
54793 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
54794 + ___retval = NULL; \
54795 + else \
54796 + ___retval = vmalloc_exec((unsigned long)___x); \
54797 + ___retval; \
54798 +})
54799 +
54800 +#define vmalloc_node(x, y) \
54801 +({ \
54802 + void *___retval; \
54803 + intoverflow_t ___x = (intoverflow_t)x; \
54804 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
54805 + ___retval = NULL; \
54806 + else \
54807 + ___retval = vmalloc_node((unsigned long)___x, (y));\
54808 + ___retval; \
54809 +})
54810 +
54811 +#define vzalloc_node(x, y) \
54812 +({ \
54813 + void *___retval; \
54814 + intoverflow_t ___x = (intoverflow_t)x; \
54815 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
54816 + ___retval = NULL; \
54817 + else \
54818 + ___retval = vzalloc_node((unsigned long)___x, (y));\
54819 + ___retval; \
54820 +})
54821 +
54822 +#define vmalloc_32(x) \
54823 +({ \
54824 + void *___retval; \
54825 + intoverflow_t ___x = (intoverflow_t)x; \
54826 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
54827 + ___retval = NULL; \
54828 + else \
54829 + ___retval = vmalloc_32((unsigned long)___x); \
54830 + ___retval; \
54831 +})
54832 +
54833 +#define vmalloc_32_user(x) \
54834 +({ \
54835 +void *___retval; \
54836 + intoverflow_t ___x = (intoverflow_t)x; \
54837 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
54838 + ___retval = NULL; \
54839 + else \
54840 + ___retval = vmalloc_32_user((unsigned long)___x);\
54841 + ___retval; \
54842 +})
54843 +
54844 #endif /* _LINUX_VMALLOC_H */
54845 diff -urNp linux-3.0.4/include/linux/vmstat.h linux-3.0.4/include/linux/vmstat.h
54846 --- linux-3.0.4/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
54847 +++ linux-3.0.4/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
54848 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
54849 /*
54850 * Zone based page accounting with per cpu differentials.
54851 */
54852 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54853 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54854
54855 static inline void zone_page_state_add(long x, struct zone *zone,
54856 enum zone_stat_item item)
54857 {
54858 - atomic_long_add(x, &zone->vm_stat[item]);
54859 - atomic_long_add(x, &vm_stat[item]);
54860 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
54861 + atomic_long_add_unchecked(x, &vm_stat[item]);
54862 }
54863
54864 static inline unsigned long global_page_state(enum zone_stat_item item)
54865 {
54866 - long x = atomic_long_read(&vm_stat[item]);
54867 + long x = atomic_long_read_unchecked(&vm_stat[item]);
54868 #ifdef CONFIG_SMP
54869 if (x < 0)
54870 x = 0;
54871 @@ -109,7 +109,7 @@ static inline unsigned long global_page_
54872 static inline unsigned long zone_page_state(struct zone *zone,
54873 enum zone_stat_item item)
54874 {
54875 - long x = atomic_long_read(&zone->vm_stat[item]);
54876 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54877 #ifdef CONFIG_SMP
54878 if (x < 0)
54879 x = 0;
54880 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
54881 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
54882 enum zone_stat_item item)
54883 {
54884 - long x = atomic_long_read(&zone->vm_stat[item]);
54885 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54886
54887 #ifdef CONFIG_SMP
54888 int cpu;
54889 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
54890
54891 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
54892 {
54893 - atomic_long_inc(&zone->vm_stat[item]);
54894 - atomic_long_inc(&vm_stat[item]);
54895 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
54896 + atomic_long_inc_unchecked(&vm_stat[item]);
54897 }
54898
54899 static inline void __inc_zone_page_state(struct page *page,
54900 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
54901
54902 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
54903 {
54904 - atomic_long_dec(&zone->vm_stat[item]);
54905 - atomic_long_dec(&vm_stat[item]);
54906 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
54907 + atomic_long_dec_unchecked(&vm_stat[item]);
54908 }
54909
54910 static inline void __dec_zone_page_state(struct page *page,
54911 diff -urNp linux-3.0.4/include/media/saa7146_vv.h linux-3.0.4/include/media/saa7146_vv.h
54912 --- linux-3.0.4/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
54913 +++ linux-3.0.4/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
54914 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
54915 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
54916
54917 /* the extension can override this */
54918 - struct v4l2_ioctl_ops ops;
54919 + v4l2_ioctl_ops_no_const ops;
54920 /* pointer to the saa7146 core ops */
54921 const struct v4l2_ioctl_ops *core_ops;
54922
54923 diff -urNp linux-3.0.4/include/media/v4l2-ioctl.h linux-3.0.4/include/media/v4l2-ioctl.h
54924 --- linux-3.0.4/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
54925 +++ linux-3.0.4/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
54926 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
54927 long (*vidioc_default) (struct file *file, void *fh,
54928 bool valid_prio, int cmd, void *arg);
54929 };
54930 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
54931
54932
54933 /* v4l debugging and diagnostics */
54934 diff -urNp linux-3.0.4/include/net/caif/cfctrl.h linux-3.0.4/include/net/caif/cfctrl.h
54935 --- linux-3.0.4/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
54936 +++ linux-3.0.4/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
54937 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
54938 void (*radioset_rsp)(void);
54939 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
54940 struct cflayer *client_layer);
54941 -};
54942 +} __no_const;
54943
54944 /* Link Setup Parameters for CAIF-Links. */
54945 struct cfctrl_link_param {
54946 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
54947 struct cfctrl {
54948 struct cfsrvl serv;
54949 struct cfctrl_rsp res;
54950 - atomic_t req_seq_no;
54951 - atomic_t rsp_seq_no;
54952 + atomic_unchecked_t req_seq_no;
54953 + atomic_unchecked_t rsp_seq_no;
54954 struct list_head list;
54955 /* Protects from simultaneous access to first_req list */
54956 spinlock_t info_list_lock;
54957 diff -urNp linux-3.0.4/include/net/flow.h linux-3.0.4/include/net/flow.h
54958 --- linux-3.0.4/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
54959 +++ linux-3.0.4/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
54960 @@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
54961 u8 dir, flow_resolve_t resolver, void *ctx);
54962
54963 extern void flow_cache_flush(void);
54964 -extern atomic_t flow_cache_genid;
54965 +extern atomic_unchecked_t flow_cache_genid;
54966
54967 #endif
54968 diff -urNp linux-3.0.4/include/net/inetpeer.h linux-3.0.4/include/net/inetpeer.h
54969 --- linux-3.0.4/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
54970 +++ linux-3.0.4/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
54971 @@ -43,8 +43,8 @@ struct inet_peer {
54972 */
54973 union {
54974 struct {
54975 - atomic_t rid; /* Frag reception counter */
54976 - atomic_t ip_id_count; /* IP ID for the next packet */
54977 + atomic_unchecked_t rid; /* Frag reception counter */
54978 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
54979 __u32 tcp_ts;
54980 __u32 tcp_ts_stamp;
54981 u32 metrics[RTAX_MAX];
54982 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
54983 {
54984 more++;
54985 inet_peer_refcheck(p);
54986 - return atomic_add_return(more, &p->ip_id_count) - more;
54987 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
54988 }
54989
54990 #endif /* _NET_INETPEER_H */
54991 diff -urNp linux-3.0.4/include/net/ip_fib.h linux-3.0.4/include/net/ip_fib.h
54992 --- linux-3.0.4/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
54993 +++ linux-3.0.4/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
54994 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
54995
54996 #define FIB_RES_SADDR(net, res) \
54997 ((FIB_RES_NH(res).nh_saddr_genid == \
54998 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
54999 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55000 FIB_RES_NH(res).nh_saddr : \
55001 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55002 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55003 diff -urNp linux-3.0.4/include/net/ip_vs.h linux-3.0.4/include/net/ip_vs.h
55004 --- linux-3.0.4/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
55005 +++ linux-3.0.4/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
55006 @@ -509,7 +509,7 @@ struct ip_vs_conn {
55007 struct ip_vs_conn *control; /* Master control connection */
55008 atomic_t n_control; /* Number of controlled ones */
55009 struct ip_vs_dest *dest; /* real server */
55010 - atomic_t in_pkts; /* incoming packet counter */
55011 + atomic_unchecked_t in_pkts; /* incoming packet counter */
55012
55013 /* packet transmitter for different forwarding methods. If it
55014 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55015 @@ -647,7 +647,7 @@ struct ip_vs_dest {
55016 __be16 port; /* port number of the server */
55017 union nf_inet_addr addr; /* IP address of the server */
55018 volatile unsigned flags; /* dest status flags */
55019 - atomic_t conn_flags; /* flags to copy to conn */
55020 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
55021 atomic_t weight; /* server weight */
55022
55023 atomic_t refcnt; /* reference counter */
55024 diff -urNp linux-3.0.4/include/net/irda/ircomm_core.h linux-3.0.4/include/net/irda/ircomm_core.h
55025 --- linux-3.0.4/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
55026 +++ linux-3.0.4/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
55027 @@ -51,7 +51,7 @@ typedef struct {
55028 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55029 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55030 struct ircomm_info *);
55031 -} call_t;
55032 +} __no_const call_t;
55033
55034 struct ircomm_cb {
55035 irda_queue_t queue;
55036 diff -urNp linux-3.0.4/include/net/irda/ircomm_tty.h linux-3.0.4/include/net/irda/ircomm_tty.h
55037 --- linux-3.0.4/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
55038 +++ linux-3.0.4/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
55039 @@ -35,6 +35,7 @@
55040 #include <linux/termios.h>
55041 #include <linux/timer.h>
55042 #include <linux/tty.h> /* struct tty_struct */
55043 +#include <asm/local.h>
55044
55045 #include <net/irda/irias_object.h>
55046 #include <net/irda/ircomm_core.h>
55047 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55048 unsigned short close_delay;
55049 unsigned short closing_wait; /* time to wait before closing */
55050
55051 - int open_count;
55052 - int blocked_open; /* # of blocked opens */
55053 + local_t open_count;
55054 + local_t blocked_open; /* # of blocked opens */
55055
55056 /* Protect concurent access to :
55057 * o self->open_count
55058 diff -urNp linux-3.0.4/include/net/iucv/af_iucv.h linux-3.0.4/include/net/iucv/af_iucv.h
55059 --- linux-3.0.4/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
55060 +++ linux-3.0.4/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
55061 @@ -87,7 +87,7 @@ struct iucv_sock {
55062 struct iucv_sock_list {
55063 struct hlist_head head;
55064 rwlock_t lock;
55065 - atomic_t autobind_name;
55066 + atomic_unchecked_t autobind_name;
55067 };
55068
55069 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55070 diff -urNp linux-3.0.4/include/net/lapb.h linux-3.0.4/include/net/lapb.h
55071 --- linux-3.0.4/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
55072 +++ linux-3.0.4/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
55073 @@ -95,7 +95,7 @@ struct lapb_cb {
55074 struct sk_buff_head write_queue;
55075 struct sk_buff_head ack_queue;
55076 unsigned char window;
55077 - struct lapb_register_struct callbacks;
55078 + struct lapb_register_struct *callbacks;
55079
55080 /* FRMR control information */
55081 struct lapb_frame frmr_data;
55082 diff -urNp linux-3.0.4/include/net/neighbour.h linux-3.0.4/include/net/neighbour.h
55083 --- linux-3.0.4/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
55084 +++ linux-3.0.4/include/net/neighbour.h 2011-08-26 19:49:56.000000000 -0400
55085 @@ -117,14 +117,14 @@ struct neighbour {
55086 };
55087
55088 struct neigh_ops {
55089 - int family;
55090 + const int family;
55091 void (*solicit)(struct neighbour *, struct sk_buff*);
55092 void (*error_report)(struct neighbour *, struct sk_buff*);
55093 int (*output)(struct sk_buff*);
55094 int (*connected_output)(struct sk_buff*);
55095 int (*hh_output)(struct sk_buff*);
55096 int (*queue_xmit)(struct sk_buff*);
55097 -};
55098 +} __do_const;
55099
55100 struct pneigh_entry {
55101 struct pneigh_entry *next;
55102 diff -urNp linux-3.0.4/include/net/netlink.h linux-3.0.4/include/net/netlink.h
55103 --- linux-3.0.4/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
55104 +++ linux-3.0.4/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
55105 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55106 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55107 {
55108 if (mark)
55109 - skb_trim(skb, (unsigned char *) mark - skb->data);
55110 + skb_trim(skb, (const unsigned char *) mark - skb->data);
55111 }
55112
55113 /**
55114 diff -urNp linux-3.0.4/include/net/netns/ipv4.h linux-3.0.4/include/net/netns/ipv4.h
55115 --- linux-3.0.4/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
55116 +++ linux-3.0.4/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
55117 @@ -56,8 +56,8 @@ struct netns_ipv4 {
55118
55119 unsigned int sysctl_ping_group_range[2];
55120
55121 - atomic_t rt_genid;
55122 - atomic_t dev_addr_genid;
55123 + atomic_unchecked_t rt_genid;
55124 + atomic_unchecked_t dev_addr_genid;
55125
55126 #ifdef CONFIG_IP_MROUTE
55127 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55128 diff -urNp linux-3.0.4/include/net/sctp/sctp.h linux-3.0.4/include/net/sctp/sctp.h
55129 --- linux-3.0.4/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
55130 +++ linux-3.0.4/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
55131 @@ -315,9 +315,9 @@ do { \
55132
55133 #else /* SCTP_DEBUG */
55134
55135 -#define SCTP_DEBUG_PRINTK(whatever...)
55136 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55137 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55138 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55139 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55140 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55141 #define SCTP_ENABLE_DEBUG
55142 #define SCTP_DISABLE_DEBUG
55143 #define SCTP_ASSERT(expr, str, func)
55144 diff -urNp linux-3.0.4/include/net/sock.h linux-3.0.4/include/net/sock.h
55145 --- linux-3.0.4/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
55146 +++ linux-3.0.4/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
55147 @@ -277,7 +277,7 @@ struct sock {
55148 #ifdef CONFIG_RPS
55149 __u32 sk_rxhash;
55150 #endif
55151 - atomic_t sk_drops;
55152 + atomic_unchecked_t sk_drops;
55153 int sk_rcvbuf;
55154
55155 struct sk_filter __rcu *sk_filter;
55156 @@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
55157 }
55158
55159 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
55160 - char __user *from, char *to,
55161 + char __user *from, unsigned char *to,
55162 int copy, int offset)
55163 {
55164 if (skb->ip_summed == CHECKSUM_NONE) {
55165 diff -urNp linux-3.0.4/include/net/tcp.h linux-3.0.4/include/net/tcp.h
55166 --- linux-3.0.4/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
55167 +++ linux-3.0.4/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
55168 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55169 struct tcp_seq_afinfo {
55170 char *name;
55171 sa_family_t family;
55172 - struct file_operations seq_fops;
55173 - struct seq_operations seq_ops;
55174 + file_operations_no_const seq_fops;
55175 + seq_operations_no_const seq_ops;
55176 };
55177
55178 struct tcp_iter_state {
55179 diff -urNp linux-3.0.4/include/net/udp.h linux-3.0.4/include/net/udp.h
55180 --- linux-3.0.4/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
55181 +++ linux-3.0.4/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
55182 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55183 char *name;
55184 sa_family_t family;
55185 struct udp_table *udp_table;
55186 - struct file_operations seq_fops;
55187 - struct seq_operations seq_ops;
55188 + file_operations_no_const seq_fops;
55189 + seq_operations_no_const seq_ops;
55190 };
55191
55192 struct udp_iter_state {
55193 diff -urNp linux-3.0.4/include/net/xfrm.h linux-3.0.4/include/net/xfrm.h
55194 --- linux-3.0.4/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
55195 +++ linux-3.0.4/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
55196 @@ -505,7 +505,7 @@ struct xfrm_policy {
55197 struct timer_list timer;
55198
55199 struct flow_cache_object flo;
55200 - atomic_t genid;
55201 + atomic_unchecked_t genid;
55202 u32 priority;
55203 u32 index;
55204 struct xfrm_mark mark;
55205 diff -urNp linux-3.0.4/include/rdma/iw_cm.h linux-3.0.4/include/rdma/iw_cm.h
55206 --- linux-3.0.4/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
55207 +++ linux-3.0.4/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
55208 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
55209 int backlog);
55210
55211 int (*destroy_listen)(struct iw_cm_id *cm_id);
55212 -};
55213 +} __no_const;
55214
55215 /**
55216 * iw_create_cm_id - Create an IW CM identifier.
55217 diff -urNp linux-3.0.4/include/scsi/libfc.h linux-3.0.4/include/scsi/libfc.h
55218 --- linux-3.0.4/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
55219 +++ linux-3.0.4/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
55220 @@ -750,6 +750,7 @@ struct libfc_function_template {
55221 */
55222 void (*disc_stop_final) (struct fc_lport *);
55223 };
55224 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55225
55226 /**
55227 * struct fc_disc - Discovery context
55228 @@ -853,7 +854,7 @@ struct fc_lport {
55229 struct fc_vport *vport;
55230
55231 /* Operational Information */
55232 - struct libfc_function_template tt;
55233 + libfc_function_template_no_const tt;
55234 u8 link_up;
55235 u8 qfull;
55236 enum fc_lport_state state;
55237 diff -urNp linux-3.0.4/include/scsi/scsi_device.h linux-3.0.4/include/scsi/scsi_device.h
55238 --- linux-3.0.4/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
55239 +++ linux-3.0.4/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
55240 @@ -161,9 +161,9 @@ struct scsi_device {
55241 unsigned int max_device_blocked; /* what device_blocked counts down from */
55242 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55243
55244 - atomic_t iorequest_cnt;
55245 - atomic_t iodone_cnt;
55246 - atomic_t ioerr_cnt;
55247 + atomic_unchecked_t iorequest_cnt;
55248 + atomic_unchecked_t iodone_cnt;
55249 + atomic_unchecked_t ioerr_cnt;
55250
55251 struct device sdev_gendev,
55252 sdev_dev;
55253 diff -urNp linux-3.0.4/include/scsi/scsi_transport_fc.h linux-3.0.4/include/scsi/scsi_transport_fc.h
55254 --- linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
55255 +++ linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
55256 @@ -711,7 +711,7 @@ struct fc_function_template {
55257 unsigned long show_host_system_hostname:1;
55258
55259 unsigned long disable_target_scan:1;
55260 -};
55261 +} __do_const;
55262
55263
55264 /**
55265 diff -urNp linux-3.0.4/include/sound/ak4xxx-adda.h linux-3.0.4/include/sound/ak4xxx-adda.h
55266 --- linux-3.0.4/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
55267 +++ linux-3.0.4/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
55268 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55269 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55270 unsigned char val);
55271 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55272 -};
55273 +} __no_const;
55274
55275 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55276
55277 diff -urNp linux-3.0.4/include/sound/hwdep.h linux-3.0.4/include/sound/hwdep.h
55278 --- linux-3.0.4/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
55279 +++ linux-3.0.4/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
55280 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55281 struct snd_hwdep_dsp_status *status);
55282 int (*dsp_load)(struct snd_hwdep *hw,
55283 struct snd_hwdep_dsp_image *image);
55284 -};
55285 +} __no_const;
55286
55287 struct snd_hwdep {
55288 struct snd_card *card;
55289 diff -urNp linux-3.0.4/include/sound/info.h linux-3.0.4/include/sound/info.h
55290 --- linux-3.0.4/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
55291 +++ linux-3.0.4/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
55292 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
55293 struct snd_info_buffer *buffer);
55294 void (*write)(struct snd_info_entry *entry,
55295 struct snd_info_buffer *buffer);
55296 -};
55297 +} __no_const;
55298
55299 struct snd_info_entry_ops {
55300 int (*open)(struct snd_info_entry *entry,
55301 diff -urNp linux-3.0.4/include/sound/pcm.h linux-3.0.4/include/sound/pcm.h
55302 --- linux-3.0.4/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
55303 +++ linux-3.0.4/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
55304 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
55305 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55306 int (*ack)(struct snd_pcm_substream *substream);
55307 };
55308 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55309
55310 /*
55311 *
55312 diff -urNp linux-3.0.4/include/sound/sb16_csp.h linux-3.0.4/include/sound/sb16_csp.h
55313 --- linux-3.0.4/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
55314 +++ linux-3.0.4/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
55315 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
55316 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55317 int (*csp_stop) (struct snd_sb_csp * p);
55318 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55319 -};
55320 +} __no_const;
55321
55322 /*
55323 * CSP private data
55324 diff -urNp linux-3.0.4/include/sound/soc.h linux-3.0.4/include/sound/soc.h
55325 --- linux-3.0.4/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
55326 +++ linux-3.0.4/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
55327 @@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
55328
55329 /* platform stream ops */
55330 struct snd_pcm_ops *ops;
55331 -};
55332 +} __do_const;
55333
55334 struct snd_soc_platform {
55335 const char *name;
55336 diff -urNp linux-3.0.4/include/sound/ymfpci.h linux-3.0.4/include/sound/ymfpci.h
55337 --- linux-3.0.4/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
55338 +++ linux-3.0.4/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
55339 @@ -358,7 +358,7 @@ struct snd_ymfpci {
55340 spinlock_t reg_lock;
55341 spinlock_t voice_lock;
55342 wait_queue_head_t interrupt_sleep;
55343 - atomic_t interrupt_sleep_count;
55344 + atomic_unchecked_t interrupt_sleep_count;
55345 struct snd_info_entry *proc_entry;
55346 const struct firmware *dsp_microcode;
55347 const struct firmware *controller_microcode;
55348 diff -urNp linux-3.0.4/include/target/target_core_base.h linux-3.0.4/include/target/target_core_base.h
55349 --- linux-3.0.4/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
55350 +++ linux-3.0.4/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
55351 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
55352 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55353 int (*t10_pr_register)(struct se_cmd *);
55354 int (*t10_pr_clear)(struct se_cmd *);
55355 -};
55356 +} __no_const;
55357
55358 struct t10_reservation_template {
55359 /* Reservation effects all target ports */
55360 @@ -432,8 +432,8 @@ struct se_transport_task {
55361 atomic_t t_task_cdbs_left;
55362 atomic_t t_task_cdbs_ex_left;
55363 atomic_t t_task_cdbs_timeout_left;
55364 - atomic_t t_task_cdbs_sent;
55365 - atomic_t t_transport_aborted;
55366 + atomic_unchecked_t t_task_cdbs_sent;
55367 + atomic_unchecked_t t_transport_aborted;
55368 atomic_t t_transport_active;
55369 atomic_t t_transport_complete;
55370 atomic_t t_transport_queue_active;
55371 @@ -774,7 +774,7 @@ struct se_device {
55372 atomic_t active_cmds;
55373 atomic_t simple_cmds;
55374 atomic_t depth_left;
55375 - atomic_t dev_ordered_id;
55376 + atomic_unchecked_t dev_ordered_id;
55377 atomic_t dev_tur_active;
55378 atomic_t execute_tasks;
55379 atomic_t dev_status_thr_count;
55380 diff -urNp linux-3.0.4/include/trace/events/irq.h linux-3.0.4/include/trace/events/irq.h
55381 --- linux-3.0.4/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
55382 +++ linux-3.0.4/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
55383 @@ -36,7 +36,7 @@ struct softirq_action;
55384 */
55385 TRACE_EVENT(irq_handler_entry,
55386
55387 - TP_PROTO(int irq, struct irqaction *action),
55388 + TP_PROTO(int irq, const struct irqaction *action),
55389
55390 TP_ARGS(irq, action),
55391
55392 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55393 */
55394 TRACE_EVENT(irq_handler_exit,
55395
55396 - TP_PROTO(int irq, struct irqaction *action, int ret),
55397 + TP_PROTO(int irq, const struct irqaction *action, int ret),
55398
55399 TP_ARGS(irq, action, ret),
55400
55401 diff -urNp linux-3.0.4/include/video/udlfb.h linux-3.0.4/include/video/udlfb.h
55402 --- linux-3.0.4/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
55403 +++ linux-3.0.4/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
55404 @@ -51,10 +51,10 @@ struct dlfb_data {
55405 int base8;
55406 u32 pseudo_palette[256];
55407 /* blit-only rendering path metrics, exposed through sysfs */
55408 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55409 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55410 - atomic_t bytes_sent; /* to usb, after compression including overhead */
55411 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55412 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55413 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55414 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55415 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55416 };
55417
55418 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55419 diff -urNp linux-3.0.4/include/video/uvesafb.h linux-3.0.4/include/video/uvesafb.h
55420 --- linux-3.0.4/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
55421 +++ linux-3.0.4/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
55422 @@ -177,6 +177,7 @@ struct uvesafb_par {
55423 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55424 u8 pmi_setpal; /* PMI for palette changes */
55425 u16 *pmi_base; /* protected mode interface location */
55426 + u8 *pmi_code; /* protected mode code location */
55427 void *pmi_start;
55428 void *pmi_pal;
55429 u8 *vbe_state_orig; /*
55430 diff -urNp linux-3.0.4/init/do_mounts.c linux-3.0.4/init/do_mounts.c
55431 --- linux-3.0.4/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
55432 +++ linux-3.0.4/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
55433 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55434
55435 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55436 {
55437 - int err = sys_mount(name, "/root", fs, flags, data);
55438 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55439 if (err)
55440 return err;
55441
55442 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55443 va_start(args, fmt);
55444 vsprintf(buf, fmt, args);
55445 va_end(args);
55446 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55447 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55448 if (fd >= 0) {
55449 sys_ioctl(fd, FDEJECT, 0);
55450 sys_close(fd);
55451 }
55452 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55453 - fd = sys_open("/dev/console", O_RDWR, 0);
55454 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55455 if (fd >= 0) {
55456 sys_ioctl(fd, TCGETS, (long)&termios);
55457 termios.c_lflag &= ~ICANON;
55458 sys_ioctl(fd, TCSETSF, (long)&termios);
55459 - sys_read(fd, &c, 1);
55460 + sys_read(fd, (char __user *)&c, 1);
55461 termios.c_lflag |= ICANON;
55462 sys_ioctl(fd, TCSETSF, (long)&termios);
55463 sys_close(fd);
55464 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55465 mount_root();
55466 out:
55467 devtmpfs_mount("dev");
55468 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55469 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55470 sys_chroot((const char __user __force *)".");
55471 }
55472 diff -urNp linux-3.0.4/init/do_mounts.h linux-3.0.4/init/do_mounts.h
55473 --- linux-3.0.4/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
55474 +++ linux-3.0.4/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
55475 @@ -15,15 +15,15 @@ extern int root_mountflags;
55476
55477 static inline int create_dev(char *name, dev_t dev)
55478 {
55479 - sys_unlink(name);
55480 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55481 + sys_unlink((__force char __user *)name);
55482 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55483 }
55484
55485 #if BITS_PER_LONG == 32
55486 static inline u32 bstat(char *name)
55487 {
55488 struct stat64 stat;
55489 - if (sys_stat64(name, &stat) != 0)
55490 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55491 return 0;
55492 if (!S_ISBLK(stat.st_mode))
55493 return 0;
55494 diff -urNp linux-3.0.4/init/do_mounts_initrd.c linux-3.0.4/init/do_mounts_initrd.c
55495 --- linux-3.0.4/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
55496 +++ linux-3.0.4/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
55497 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55498 create_dev("/dev/root.old", Root_RAM0);
55499 /* mount initrd on rootfs' /root */
55500 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55501 - sys_mkdir("/old", 0700);
55502 - root_fd = sys_open("/", 0, 0);
55503 - old_fd = sys_open("/old", 0, 0);
55504 + sys_mkdir((__force const char __user *)"/old", 0700);
55505 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
55506 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55507 /* move initrd over / and chdir/chroot in initrd root */
55508 - sys_chdir("/root");
55509 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55510 - sys_chroot(".");
55511 + sys_chdir((__force const char __user *)"/root");
55512 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55513 + sys_chroot((__force const char __user *)".");
55514
55515 /*
55516 * In case that a resume from disk is carried out by linuxrc or one of
55517 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55518
55519 /* move initrd to rootfs' /old */
55520 sys_fchdir(old_fd);
55521 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
55522 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55523 /* switch root and cwd back to / of rootfs */
55524 sys_fchdir(root_fd);
55525 - sys_chroot(".");
55526 + sys_chroot((__force const char __user *)".");
55527 sys_close(old_fd);
55528 sys_close(root_fd);
55529
55530 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55531 - sys_chdir("/old");
55532 + sys_chdir((__force const char __user *)"/old");
55533 return;
55534 }
55535
55536 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55537 mount_root();
55538
55539 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55540 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55541 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55542 if (!error)
55543 printk("okay\n");
55544 else {
55545 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
55546 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55547 if (error == -ENOENT)
55548 printk("/initrd does not exist. Ignored.\n");
55549 else
55550 printk("failed\n");
55551 printk(KERN_NOTICE "Unmounting old root\n");
55552 - sys_umount("/old", MNT_DETACH);
55553 + sys_umount((__force char __user *)"/old", MNT_DETACH);
55554 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55555 if (fd < 0) {
55556 error = fd;
55557 @@ -116,11 +116,11 @@ int __init initrd_load(void)
55558 * mounted in the normal path.
55559 */
55560 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55561 - sys_unlink("/initrd.image");
55562 + sys_unlink((__force const char __user *)"/initrd.image");
55563 handle_initrd();
55564 return 1;
55565 }
55566 }
55567 - sys_unlink("/initrd.image");
55568 + sys_unlink((__force const char __user *)"/initrd.image");
55569 return 0;
55570 }
55571 diff -urNp linux-3.0.4/init/do_mounts_md.c linux-3.0.4/init/do_mounts_md.c
55572 --- linux-3.0.4/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
55573 +++ linux-3.0.4/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
55574 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55575 partitioned ? "_d" : "", minor,
55576 md_setup_args[ent].device_names);
55577
55578 - fd = sys_open(name, 0, 0);
55579 + fd = sys_open((__force char __user *)name, 0, 0);
55580 if (fd < 0) {
55581 printk(KERN_ERR "md: open failed - cannot start "
55582 "array %s\n", name);
55583 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55584 * array without it
55585 */
55586 sys_close(fd);
55587 - fd = sys_open(name, 0, 0);
55588 + fd = sys_open((__force char __user *)name, 0, 0);
55589 sys_ioctl(fd, BLKRRPART, 0);
55590 }
55591 sys_close(fd);
55592 diff -urNp linux-3.0.4/init/initramfs.c linux-3.0.4/init/initramfs.c
55593 --- linux-3.0.4/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
55594 +++ linux-3.0.4/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
55595 @@ -74,7 +74,7 @@ static void __init free_hash(void)
55596 }
55597 }
55598
55599 -static long __init do_utime(char __user *filename, time_t mtime)
55600 +static long __init do_utime(__force char __user *filename, time_t mtime)
55601 {
55602 struct timespec t[2];
55603
55604 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
55605 struct dir_entry *de, *tmp;
55606 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55607 list_del(&de->list);
55608 - do_utime(de->name, de->mtime);
55609 + do_utime((__force char __user *)de->name, de->mtime);
55610 kfree(de->name);
55611 kfree(de);
55612 }
55613 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
55614 if (nlink >= 2) {
55615 char *old = find_link(major, minor, ino, mode, collected);
55616 if (old)
55617 - return (sys_link(old, collected) < 0) ? -1 : 1;
55618 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55619 }
55620 return 0;
55621 }
55622 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
55623 {
55624 struct stat st;
55625
55626 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55627 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55628 if (S_ISDIR(st.st_mode))
55629 - sys_rmdir(path);
55630 + sys_rmdir((__force char __user *)path);
55631 else
55632 - sys_unlink(path);
55633 + sys_unlink((__force char __user *)path);
55634 }
55635 }
55636
55637 @@ -305,7 +305,7 @@ static int __init do_name(void)
55638 int openflags = O_WRONLY|O_CREAT;
55639 if (ml != 1)
55640 openflags |= O_TRUNC;
55641 - wfd = sys_open(collected, openflags, mode);
55642 + wfd = sys_open((__force char __user *)collected, openflags, mode);
55643
55644 if (wfd >= 0) {
55645 sys_fchown(wfd, uid, gid);
55646 @@ -317,17 +317,17 @@ static int __init do_name(void)
55647 }
55648 }
55649 } else if (S_ISDIR(mode)) {
55650 - sys_mkdir(collected, mode);
55651 - sys_chown(collected, uid, gid);
55652 - sys_chmod(collected, mode);
55653 + sys_mkdir((__force char __user *)collected, mode);
55654 + sys_chown((__force char __user *)collected, uid, gid);
55655 + sys_chmod((__force char __user *)collected, mode);
55656 dir_add(collected, mtime);
55657 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55658 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55659 if (maybe_link() == 0) {
55660 - sys_mknod(collected, mode, rdev);
55661 - sys_chown(collected, uid, gid);
55662 - sys_chmod(collected, mode);
55663 - do_utime(collected, mtime);
55664 + sys_mknod((__force char __user *)collected, mode, rdev);
55665 + sys_chown((__force char __user *)collected, uid, gid);
55666 + sys_chmod((__force char __user *)collected, mode);
55667 + do_utime((__force char __user *)collected, mtime);
55668 }
55669 }
55670 return 0;
55671 @@ -336,15 +336,15 @@ static int __init do_name(void)
55672 static int __init do_copy(void)
55673 {
55674 if (count >= body_len) {
55675 - sys_write(wfd, victim, body_len);
55676 + sys_write(wfd, (__force char __user *)victim, body_len);
55677 sys_close(wfd);
55678 - do_utime(vcollected, mtime);
55679 + do_utime((__force char __user *)vcollected, mtime);
55680 kfree(vcollected);
55681 eat(body_len);
55682 state = SkipIt;
55683 return 0;
55684 } else {
55685 - sys_write(wfd, victim, count);
55686 + sys_write(wfd, (__force char __user *)victim, count);
55687 body_len -= count;
55688 eat(count);
55689 return 1;
55690 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
55691 {
55692 collected[N_ALIGN(name_len) + body_len] = '\0';
55693 clean_path(collected, 0);
55694 - sys_symlink(collected + N_ALIGN(name_len), collected);
55695 - sys_lchown(collected, uid, gid);
55696 - do_utime(collected, mtime);
55697 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55698 + sys_lchown((__force char __user *)collected, uid, gid);
55699 + do_utime((__force char __user *)collected, mtime);
55700 state = SkipIt;
55701 next_state = Reset;
55702 return 0;
55703 diff -urNp linux-3.0.4/init/Kconfig linux-3.0.4/init/Kconfig
55704 --- linux-3.0.4/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
55705 +++ linux-3.0.4/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
55706 @@ -1195,7 +1195,7 @@ config SLUB_DEBUG
55707
55708 config COMPAT_BRK
55709 bool "Disable heap randomization"
55710 - default y
55711 + default n
55712 help
55713 Randomizing heap placement makes heap exploits harder, but it
55714 also breaks ancient binaries (including anything libc5 based).
55715 diff -urNp linux-3.0.4/init/main.c linux-3.0.4/init/main.c
55716 --- linux-3.0.4/init/main.c 2011-07-21 22:17:23.000000000 -0400
55717 +++ linux-3.0.4/init/main.c 2011-08-23 21:48:14.000000000 -0400
55718 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
55719 extern void tc_init(void);
55720 #endif
55721
55722 +extern void grsecurity_init(void);
55723 +
55724 /*
55725 * Debug helper: via this flag we know that we are in 'early bootup code'
55726 * where only the boot processor is running with IRQ disabled. This means
55727 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
55728
55729 __setup("reset_devices", set_reset_devices);
55730
55731 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
55732 +extern char pax_enter_kernel_user[];
55733 +extern char pax_exit_kernel_user[];
55734 +extern pgdval_t clone_pgd_mask;
55735 +#endif
55736 +
55737 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
55738 +static int __init setup_pax_nouderef(char *str)
55739 +{
55740 +#ifdef CONFIG_X86_32
55741 + unsigned int cpu;
55742 + struct desc_struct *gdt;
55743 +
55744 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
55745 + gdt = get_cpu_gdt_table(cpu);
55746 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
55747 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
55748 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
55749 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
55750 + }
55751 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
55752 +#else
55753 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
55754 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
55755 + clone_pgd_mask = ~(pgdval_t)0UL;
55756 +#endif
55757 +
55758 + return 0;
55759 +}
55760 +early_param("pax_nouderef", setup_pax_nouderef);
55761 +#endif
55762 +
55763 +#ifdef CONFIG_PAX_SOFTMODE
55764 +int pax_softmode;
55765 +
55766 +static int __init setup_pax_softmode(char *str)
55767 +{
55768 + get_option(&str, &pax_softmode);
55769 + return 1;
55770 +}
55771 +__setup("pax_softmode=", setup_pax_softmode);
55772 +#endif
55773 +
55774 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
55775 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
55776 static const char *panic_later, *panic_param;
55777 @@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
55778 {
55779 int count = preempt_count();
55780 int ret;
55781 + const char *msg1 = "", *msg2 = "";
55782
55783 if (initcall_debug)
55784 ret = do_one_initcall_debug(fn);
55785 @@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
55786 sprintf(msgbuf, "error code %d ", ret);
55787
55788 if (preempt_count() != count) {
55789 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
55790 + msg1 = " preemption imbalance";
55791 preempt_count() = count;
55792 }
55793 if (irqs_disabled()) {
55794 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
55795 + msg2 = " disabled interrupts";
55796 local_irq_enable();
55797 }
55798 - if (msgbuf[0]) {
55799 - printk("initcall %pF returned with %s\n", fn, msgbuf);
55800 + if (msgbuf[0] || *msg1 || *msg2) {
55801 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
55802 }
55803
55804 return ret;
55805 @@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
55806 do_basic_setup();
55807
55808 /* Open the /dev/console on the rootfs, this should never fail */
55809 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
55810 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
55811 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
55812
55813 (void) sys_dup(0);
55814 @@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
55815 if (!ramdisk_execute_command)
55816 ramdisk_execute_command = "/init";
55817
55818 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
55819 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
55820 ramdisk_execute_command = NULL;
55821 prepare_namespace();
55822 }
55823
55824 + grsecurity_init();
55825 +
55826 /*
55827 * Ok, we have completed the initial bootup, and
55828 * we're essentially up and running. Get rid of the
55829 diff -urNp linux-3.0.4/ipc/mqueue.c linux-3.0.4/ipc/mqueue.c
55830 --- linux-3.0.4/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
55831 +++ linux-3.0.4/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
55832 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
55833 mq_bytes = (mq_msg_tblsz +
55834 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
55835
55836 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
55837 spin_lock(&mq_lock);
55838 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
55839 u->mq_bytes + mq_bytes >
55840 diff -urNp linux-3.0.4/ipc/msg.c linux-3.0.4/ipc/msg.c
55841 --- linux-3.0.4/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
55842 +++ linux-3.0.4/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
55843 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
55844 return security_msg_queue_associate(msq, msgflg);
55845 }
55846
55847 +static struct ipc_ops msg_ops = {
55848 + .getnew = newque,
55849 + .associate = msg_security,
55850 + .more_checks = NULL
55851 +};
55852 +
55853 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
55854 {
55855 struct ipc_namespace *ns;
55856 - struct ipc_ops msg_ops;
55857 struct ipc_params msg_params;
55858
55859 ns = current->nsproxy->ipc_ns;
55860
55861 - msg_ops.getnew = newque;
55862 - msg_ops.associate = msg_security;
55863 - msg_ops.more_checks = NULL;
55864 -
55865 msg_params.key = key;
55866 msg_params.flg = msgflg;
55867
55868 diff -urNp linux-3.0.4/ipc/sem.c linux-3.0.4/ipc/sem.c
55869 --- linux-3.0.4/ipc/sem.c 2011-08-23 21:44:40.000000000 -0400
55870 +++ linux-3.0.4/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
55871 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
55872 return 0;
55873 }
55874
55875 +static struct ipc_ops sem_ops = {
55876 + .getnew = newary,
55877 + .associate = sem_security,
55878 + .more_checks = sem_more_checks
55879 +};
55880 +
55881 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
55882 {
55883 struct ipc_namespace *ns;
55884 - struct ipc_ops sem_ops;
55885 struct ipc_params sem_params;
55886
55887 ns = current->nsproxy->ipc_ns;
55888 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
55889 if (nsems < 0 || nsems > ns->sc_semmsl)
55890 return -EINVAL;
55891
55892 - sem_ops.getnew = newary;
55893 - sem_ops.associate = sem_security;
55894 - sem_ops.more_checks = sem_more_checks;
55895 -
55896 sem_params.key = key;
55897 sem_params.flg = semflg;
55898 sem_params.u.nsems = nsems;
55899 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
55900 int nsems;
55901 struct list_head tasks;
55902
55903 + pax_track_stack();
55904 +
55905 sma = sem_lock_check(ns, semid);
55906 if (IS_ERR(sma))
55907 return PTR_ERR(sma);
55908 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
55909 struct ipc_namespace *ns;
55910 struct list_head tasks;
55911
55912 + pax_track_stack();
55913 +
55914 ns = current->nsproxy->ipc_ns;
55915
55916 if (nsops < 1 || semid < 0)
55917 diff -urNp linux-3.0.4/ipc/shm.c linux-3.0.4/ipc/shm.c
55918 --- linux-3.0.4/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
55919 +++ linux-3.0.4/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
55920 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
55921 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
55922 #endif
55923
55924 +#ifdef CONFIG_GRKERNSEC
55925 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55926 + const time_t shm_createtime, const uid_t cuid,
55927 + const int shmid);
55928 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55929 + const time_t shm_createtime);
55930 +#endif
55931 +
55932 void shm_init_ns(struct ipc_namespace *ns)
55933 {
55934 ns->shm_ctlmax = SHMMAX;
55935 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
55936 shp->shm_lprid = 0;
55937 shp->shm_atim = shp->shm_dtim = 0;
55938 shp->shm_ctim = get_seconds();
55939 +#ifdef CONFIG_GRKERNSEC
55940 + {
55941 + struct timespec timeval;
55942 + do_posix_clock_monotonic_gettime(&timeval);
55943 +
55944 + shp->shm_createtime = timeval.tv_sec;
55945 + }
55946 +#endif
55947 shp->shm_segsz = size;
55948 shp->shm_nattch = 0;
55949 shp->shm_file = file;
55950 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
55951 return 0;
55952 }
55953
55954 +static struct ipc_ops shm_ops = {
55955 + .getnew = newseg,
55956 + .associate = shm_security,
55957 + .more_checks = shm_more_checks
55958 +};
55959 +
55960 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
55961 {
55962 struct ipc_namespace *ns;
55963 - struct ipc_ops shm_ops;
55964 struct ipc_params shm_params;
55965
55966 ns = current->nsproxy->ipc_ns;
55967
55968 - shm_ops.getnew = newseg;
55969 - shm_ops.associate = shm_security;
55970 - shm_ops.more_checks = shm_more_checks;
55971 -
55972 shm_params.key = key;
55973 shm_params.flg = shmflg;
55974 shm_params.u.size = size;
55975 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
55976 case SHM_LOCK:
55977 case SHM_UNLOCK:
55978 {
55979 - struct file *uninitialized_var(shm_file);
55980 -
55981 lru_add_drain_all(); /* drain pagevecs to lru lists */
55982
55983 shp = shm_lock_check(ns, shmid);
55984 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
55985 if (err)
55986 goto out_unlock;
55987
55988 +#ifdef CONFIG_GRKERNSEC
55989 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
55990 + shp->shm_perm.cuid, shmid) ||
55991 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
55992 + err = -EACCES;
55993 + goto out_unlock;
55994 + }
55995 +#endif
55996 +
55997 path = shp->shm_file->f_path;
55998 path_get(&path);
55999 shp->shm_nattch++;
56000 +#ifdef CONFIG_GRKERNSEC
56001 + shp->shm_lapid = current->pid;
56002 +#endif
56003 size = i_size_read(path.dentry->d_inode);
56004 shm_unlock(shp);
56005
56006 diff -urNp linux-3.0.4/kernel/acct.c linux-3.0.4/kernel/acct.c
56007 --- linux-3.0.4/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
56008 +++ linux-3.0.4/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
56009 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56010 */
56011 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56012 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56013 - file->f_op->write(file, (char *)&ac,
56014 + file->f_op->write(file, (__force char __user *)&ac,
56015 sizeof(acct_t), &file->f_pos);
56016 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56017 set_fs(fs);
56018 diff -urNp linux-3.0.4/kernel/audit.c linux-3.0.4/kernel/audit.c
56019 --- linux-3.0.4/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
56020 +++ linux-3.0.4/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
56021 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56022 3) suppressed due to audit_rate_limit
56023 4) suppressed due to audit_backlog_limit
56024 */
56025 -static atomic_t audit_lost = ATOMIC_INIT(0);
56026 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56027
56028 /* The netlink socket. */
56029 static struct sock *audit_sock;
56030 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56031 unsigned long now;
56032 int print;
56033
56034 - atomic_inc(&audit_lost);
56035 + atomic_inc_unchecked(&audit_lost);
56036
56037 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56038
56039 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56040 printk(KERN_WARNING
56041 "audit: audit_lost=%d audit_rate_limit=%d "
56042 "audit_backlog_limit=%d\n",
56043 - atomic_read(&audit_lost),
56044 + atomic_read_unchecked(&audit_lost),
56045 audit_rate_limit,
56046 audit_backlog_limit);
56047 audit_panic(message);
56048 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56049 status_set.pid = audit_pid;
56050 status_set.rate_limit = audit_rate_limit;
56051 status_set.backlog_limit = audit_backlog_limit;
56052 - status_set.lost = atomic_read(&audit_lost);
56053 + status_set.lost = atomic_read_unchecked(&audit_lost);
56054 status_set.backlog = skb_queue_len(&audit_skb_queue);
56055 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56056 &status_set, sizeof(status_set));
56057 diff -urNp linux-3.0.4/kernel/auditsc.c linux-3.0.4/kernel/auditsc.c
56058 --- linux-3.0.4/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
56059 +++ linux-3.0.4/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
56060 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
56061 }
56062
56063 /* global counter which is incremented every time something logs in */
56064 -static atomic_t session_id = ATOMIC_INIT(0);
56065 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56066
56067 /**
56068 * audit_set_loginuid - set a task's audit_context loginuid
56069 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
56070 */
56071 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56072 {
56073 - unsigned int sessionid = atomic_inc_return(&session_id);
56074 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56075 struct audit_context *context = task->audit_context;
56076
56077 if (context && context->in_syscall) {
56078 diff -urNp linux-3.0.4/kernel/capability.c linux-3.0.4/kernel/capability.c
56079 --- linux-3.0.4/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
56080 +++ linux-3.0.4/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
56081 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56082 * before modification is attempted and the application
56083 * fails.
56084 */
56085 + if (tocopy > ARRAY_SIZE(kdata))
56086 + return -EFAULT;
56087 +
56088 if (copy_to_user(dataptr, kdata, tocopy
56089 * sizeof(struct __user_cap_data_struct))) {
56090 return -EFAULT;
56091 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
56092 BUG();
56093 }
56094
56095 - if (security_capable(ns, current_cred(), cap) == 0) {
56096 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56097 current->flags |= PF_SUPERPRIV;
56098 return true;
56099 }
56100 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
56101 }
56102 EXPORT_SYMBOL(ns_capable);
56103
56104 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
56105 +{
56106 + if (unlikely(!cap_valid(cap))) {
56107 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56108 + BUG();
56109 + }
56110 +
56111 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56112 + current->flags |= PF_SUPERPRIV;
56113 + return true;
56114 + }
56115 + return false;
56116 +}
56117 +EXPORT_SYMBOL(ns_capable_nolog);
56118 +
56119 +bool capable_nolog(int cap)
56120 +{
56121 + return ns_capable_nolog(&init_user_ns, cap);
56122 +}
56123 +EXPORT_SYMBOL(capable_nolog);
56124 +
56125 /**
56126 * task_ns_capable - Determine whether current task has a superior
56127 * capability targeted at a specific task's user namespace.
56128 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
56129 }
56130 EXPORT_SYMBOL(task_ns_capable);
56131
56132 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
56133 +{
56134 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56135 +}
56136 +EXPORT_SYMBOL(task_ns_capable_nolog);
56137 +
56138 /**
56139 * nsown_capable - Check superior capability to one's own user_ns
56140 * @cap: The capability in question
56141 diff -urNp linux-3.0.4/kernel/cgroup.c linux-3.0.4/kernel/cgroup.c
56142 --- linux-3.0.4/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
56143 +++ linux-3.0.4/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
56144 @@ -593,6 +593,8 @@ static struct css_set *find_css_set(
56145 struct hlist_head *hhead;
56146 struct cg_cgroup_link *link;
56147
56148 + pax_track_stack();
56149 +
56150 /* First see if we already have a cgroup group that matches
56151 * the desired set */
56152 read_lock(&css_set_lock);
56153 diff -urNp linux-3.0.4/kernel/compat.c linux-3.0.4/kernel/compat.c
56154 --- linux-3.0.4/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
56155 +++ linux-3.0.4/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
56156 @@ -13,6 +13,7 @@
56157
56158 #include <linux/linkage.h>
56159 #include <linux/compat.h>
56160 +#include <linux/module.h>
56161 #include <linux/errno.h>
56162 #include <linux/time.h>
56163 #include <linux/signal.h>
56164 diff -urNp linux-3.0.4/kernel/configs.c linux-3.0.4/kernel/configs.c
56165 --- linux-3.0.4/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
56166 +++ linux-3.0.4/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
56167 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56168 struct proc_dir_entry *entry;
56169
56170 /* create the current config file */
56171 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56172 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56173 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56174 + &ikconfig_file_ops);
56175 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56176 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56177 + &ikconfig_file_ops);
56178 +#endif
56179 +#else
56180 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56181 &ikconfig_file_ops);
56182 +#endif
56183 +
56184 if (!entry)
56185 return -ENOMEM;
56186
56187 diff -urNp linux-3.0.4/kernel/cred.c linux-3.0.4/kernel/cred.c
56188 --- linux-3.0.4/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
56189 +++ linux-3.0.4/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
56190 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56191 */
56192 void __put_cred(struct cred *cred)
56193 {
56194 + pax_track_stack();
56195 +
56196 kdebug("__put_cred(%p{%d,%d})", cred,
56197 atomic_read(&cred->usage),
56198 read_cred_subscribers(cred));
56199 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56200 {
56201 struct cred *cred;
56202
56203 + pax_track_stack();
56204 +
56205 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56206 atomic_read(&tsk->cred->usage),
56207 read_cred_subscribers(tsk->cred));
56208 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56209 {
56210 const struct cred *cred;
56211
56212 + pax_track_stack();
56213 +
56214 rcu_read_lock();
56215
56216 do {
56217 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56218 {
56219 struct cred *new;
56220
56221 + pax_track_stack();
56222 +
56223 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56224 if (!new)
56225 return NULL;
56226 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56227 const struct cred *old;
56228 struct cred *new;
56229
56230 + pax_track_stack();
56231 +
56232 validate_process_creds();
56233
56234 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56235 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56236 struct thread_group_cred *tgcred = NULL;
56237 struct cred *new;
56238
56239 + pax_track_stack();
56240 +
56241 #ifdef CONFIG_KEYS
56242 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56243 if (!tgcred)
56244 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56245 struct cred *new;
56246 int ret;
56247
56248 + pax_track_stack();
56249 +
56250 if (
56251 #ifdef CONFIG_KEYS
56252 !p->cred->thread_keyring &&
56253 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56254 struct task_struct *task = current;
56255 const struct cred *old = task->real_cred;
56256
56257 + pax_track_stack();
56258 +
56259 kdebug("commit_creds(%p{%d,%d})", new,
56260 atomic_read(&new->usage),
56261 read_cred_subscribers(new));
56262 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56263
56264 get_cred(new); /* we will require a ref for the subj creds too */
56265
56266 + gr_set_role_label(task, new->uid, new->gid);
56267 +
56268 /* dumpability changes */
56269 if (old->euid != new->euid ||
56270 old->egid != new->egid ||
56271 @@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
56272 key_fsgid_changed(task);
56273
56274 /* do it
56275 - * - What if a process setreuid()'s and this brings the
56276 - * new uid over his NPROC rlimit? We can check this now
56277 - * cheaply with the new uid cache, so if it matters
56278 - * we should be checking for it. -DaveM
56279 + * RLIMIT_NPROC limits on user->processes have already been checked
56280 + * in set_user().
56281 */
56282 alter_cred_subscribers(new, 2);
56283 if (new->user != old->user)
56284 @@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
56285 */
56286 void abort_creds(struct cred *new)
56287 {
56288 + pax_track_stack();
56289 +
56290 kdebug("abort_creds(%p{%d,%d})", new,
56291 atomic_read(&new->usage),
56292 read_cred_subscribers(new));
56293 @@ -574,6 +592,8 @@ const struct cred *override_creds(const
56294 {
56295 const struct cred *old = current->cred;
56296
56297 + pax_track_stack();
56298 +
56299 kdebug("override_creds(%p{%d,%d})", new,
56300 atomic_read(&new->usage),
56301 read_cred_subscribers(new));
56302 @@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
56303 {
56304 const struct cred *override = current->cred;
56305
56306 + pax_track_stack();
56307 +
56308 kdebug("revert_creds(%p{%d,%d})", old,
56309 atomic_read(&old->usage),
56310 read_cred_subscribers(old));
56311 @@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
56312 const struct cred *old;
56313 struct cred *new;
56314
56315 + pax_track_stack();
56316 +
56317 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56318 if (!new)
56319 return NULL;
56320 @@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56321 */
56322 int set_security_override(struct cred *new, u32 secid)
56323 {
56324 + pax_track_stack();
56325 +
56326 return security_kernel_act_as(new, secid);
56327 }
56328 EXPORT_SYMBOL(set_security_override);
56329 @@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
56330 u32 secid;
56331 int ret;
56332
56333 + pax_track_stack();
56334 +
56335 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56336 if (ret < 0)
56337 return ret;
56338 diff -urNp linux-3.0.4/kernel/debug/debug_core.c linux-3.0.4/kernel/debug/debug_core.c
56339 --- linux-3.0.4/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
56340 +++ linux-3.0.4/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
56341 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56342 */
56343 static atomic_t masters_in_kgdb;
56344 static atomic_t slaves_in_kgdb;
56345 -static atomic_t kgdb_break_tasklet_var;
56346 +static atomic_unchecked_t kgdb_break_tasklet_var;
56347 atomic_t kgdb_setting_breakpoint;
56348
56349 struct task_struct *kgdb_usethread;
56350 @@ -129,7 +129,7 @@ int kgdb_single_step;
56351 static pid_t kgdb_sstep_pid;
56352
56353 /* to keep track of the CPU which is doing the single stepping*/
56354 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56355 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56356
56357 /*
56358 * If you are debugging a problem where roundup (the collection of
56359 @@ -542,7 +542,7 @@ return_normal:
56360 * kernel will only try for the value of sstep_tries before
56361 * giving up and continuing on.
56362 */
56363 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56364 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56365 (kgdb_info[cpu].task &&
56366 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56367 atomic_set(&kgdb_active, -1);
56368 @@ -636,8 +636,8 @@ cpu_master_loop:
56369 }
56370
56371 kgdb_restore:
56372 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56373 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56374 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56375 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56376 if (kgdb_info[sstep_cpu].task)
56377 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56378 else
56379 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56380 static void kgdb_tasklet_bpt(unsigned long ing)
56381 {
56382 kgdb_breakpoint();
56383 - atomic_set(&kgdb_break_tasklet_var, 0);
56384 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56385 }
56386
56387 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56388
56389 void kgdb_schedule_breakpoint(void)
56390 {
56391 - if (atomic_read(&kgdb_break_tasklet_var) ||
56392 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56393 atomic_read(&kgdb_active) != -1 ||
56394 atomic_read(&kgdb_setting_breakpoint))
56395 return;
56396 - atomic_inc(&kgdb_break_tasklet_var);
56397 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
56398 tasklet_schedule(&kgdb_tasklet_breakpoint);
56399 }
56400 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56401 diff -urNp linux-3.0.4/kernel/debug/kdb/kdb_main.c linux-3.0.4/kernel/debug/kdb/kdb_main.c
56402 --- linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
56403 +++ linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
56404 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56405 list_for_each_entry(mod, kdb_modules, list) {
56406
56407 kdb_printf("%-20s%8u 0x%p ", mod->name,
56408 - mod->core_size, (void *)mod);
56409 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
56410 #ifdef CONFIG_MODULE_UNLOAD
56411 kdb_printf("%4d ", module_refcount(mod));
56412 #endif
56413 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56414 kdb_printf(" (Loading)");
56415 else
56416 kdb_printf(" (Live)");
56417 - kdb_printf(" 0x%p", mod->module_core);
56418 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56419
56420 #ifdef CONFIG_MODULE_UNLOAD
56421 {
56422 diff -urNp linux-3.0.4/kernel/events/core.c linux-3.0.4/kernel/events/core.c
56423 --- linux-3.0.4/kernel/events/core.c 2011-08-23 21:44:40.000000000 -0400
56424 +++ linux-3.0.4/kernel/events/core.c 2011-08-23 21:47:56.000000000 -0400
56425 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
56426 return 0;
56427 }
56428
56429 -static atomic64_t perf_event_id;
56430 +static atomic64_unchecked_t perf_event_id;
56431
56432 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
56433 enum event_type_t event_type);
56434 @@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
56435
56436 static inline u64 perf_event_count(struct perf_event *event)
56437 {
56438 - return local64_read(&event->count) + atomic64_read(&event->child_count);
56439 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
56440 }
56441
56442 static u64 perf_event_read(struct perf_event *event)
56443 @@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
56444 mutex_lock(&event->child_mutex);
56445 total += perf_event_read(event);
56446 *enabled += event->total_time_enabled +
56447 - atomic64_read(&event->child_total_time_enabled);
56448 + atomic64_read_unchecked(&event->child_total_time_enabled);
56449 *running += event->total_time_running +
56450 - atomic64_read(&event->child_total_time_running);
56451 + atomic64_read_unchecked(&event->child_total_time_running);
56452
56453 list_for_each_entry(child, &event->child_list, child_list) {
56454 total += perf_event_read(child);
56455 @@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
56456 userpg->offset -= local64_read(&event->hw.prev_count);
56457
56458 userpg->time_enabled = event->total_time_enabled +
56459 - atomic64_read(&event->child_total_time_enabled);
56460 + atomic64_read_unchecked(&event->child_total_time_enabled);
56461
56462 userpg->time_running = event->total_time_running +
56463 - atomic64_read(&event->child_total_time_running);
56464 + atomic64_read_unchecked(&event->child_total_time_running);
56465
56466 barrier();
56467 ++userpg->lock;
56468 @@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
56469 values[n++] = perf_event_count(event);
56470 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
56471 values[n++] = enabled +
56472 - atomic64_read(&event->child_total_time_enabled);
56473 + atomic64_read_unchecked(&event->child_total_time_enabled);
56474 }
56475 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
56476 values[n++] = running +
56477 - atomic64_read(&event->child_total_time_running);
56478 + atomic64_read_unchecked(&event->child_total_time_running);
56479 }
56480 if (read_format & PERF_FORMAT_ID)
56481 values[n++] = primary_event_id(event);
56482 @@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
56483 event->parent = parent_event;
56484
56485 event->ns = get_pid_ns(current->nsproxy->pid_ns);
56486 - event->id = atomic64_inc_return(&perf_event_id);
56487 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
56488
56489 event->state = PERF_EVENT_STATE_INACTIVE;
56490
56491 @@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
56492 /*
56493 * Add back the child's count to the parent's count:
56494 */
56495 - atomic64_add(child_val, &parent_event->child_count);
56496 - atomic64_add(child_event->total_time_enabled,
56497 + atomic64_add_unchecked(child_val, &parent_event->child_count);
56498 + atomic64_add_unchecked(child_event->total_time_enabled,
56499 &parent_event->child_total_time_enabled);
56500 - atomic64_add(child_event->total_time_running,
56501 + atomic64_add_unchecked(child_event->total_time_running,
56502 &parent_event->child_total_time_running);
56503
56504 /*
56505 diff -urNp linux-3.0.4/kernel/exit.c linux-3.0.4/kernel/exit.c
56506 --- linux-3.0.4/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
56507 +++ linux-3.0.4/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
56508 @@ -57,6 +57,10 @@
56509 #include <asm/pgtable.h>
56510 #include <asm/mmu_context.h>
56511
56512 +#ifdef CONFIG_GRKERNSEC
56513 +extern rwlock_t grsec_exec_file_lock;
56514 +#endif
56515 +
56516 static void exit_mm(struct task_struct * tsk);
56517
56518 static void __unhash_process(struct task_struct *p, bool group_dead)
56519 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
56520 struct task_struct *leader;
56521 int zap_leader;
56522 repeat:
56523 +#ifdef CONFIG_NET
56524 + gr_del_task_from_ip_table(p);
56525 +#endif
56526 +
56527 tracehook_prepare_release_task(p);
56528 /* don't need to get the RCU readlock here - the process is dead and
56529 * can't be modifying its own credentials. But shut RCU-lockdep up */
56530 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
56531 {
56532 write_lock_irq(&tasklist_lock);
56533
56534 +#ifdef CONFIG_GRKERNSEC
56535 + write_lock(&grsec_exec_file_lock);
56536 + if (current->exec_file) {
56537 + fput(current->exec_file);
56538 + current->exec_file = NULL;
56539 + }
56540 + write_unlock(&grsec_exec_file_lock);
56541 +#endif
56542 +
56543 ptrace_unlink(current);
56544 /* Reparent to init */
56545 current->real_parent = current->parent = kthreadd_task;
56546 list_move_tail(&current->sibling, &current->real_parent->children);
56547
56548 + gr_set_kernel_label(current);
56549 +
56550 /* Set the exit signal to SIGCHLD so we signal init on exit */
56551 current->exit_signal = SIGCHLD;
56552
56553 @@ -394,7 +413,7 @@ int allow_signal(int sig)
56554 * know it'll be handled, so that they don't get converted to
56555 * SIGKILL or just silently dropped.
56556 */
56557 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56558 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56559 recalc_sigpending();
56560 spin_unlock_irq(&current->sighand->siglock);
56561 return 0;
56562 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
56563 vsnprintf(current->comm, sizeof(current->comm), name, args);
56564 va_end(args);
56565
56566 +#ifdef CONFIG_GRKERNSEC
56567 + write_lock(&grsec_exec_file_lock);
56568 + if (current->exec_file) {
56569 + fput(current->exec_file);
56570 + current->exec_file = NULL;
56571 + }
56572 + write_unlock(&grsec_exec_file_lock);
56573 +#endif
56574 +
56575 + gr_set_kernel_label(current);
56576 +
56577 /*
56578 * If we were started as result of loading a module, close all of the
56579 * user space pages. We don't need them, and if we didn't close them
56580 @@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
56581 struct task_struct *tsk = current;
56582 int group_dead;
56583
56584 - profile_task_exit(tsk);
56585 -
56586 - WARN_ON(atomic_read(&tsk->fs_excl));
56587 - WARN_ON(blk_needs_flush_plug(tsk));
56588 -
56589 if (unlikely(in_interrupt()))
56590 panic("Aiee, killing interrupt handler!");
56591 - if (unlikely(!tsk->pid))
56592 - panic("Attempted to kill the idle task!");
56593
56594 /*
56595 * If do_exit is called because this processes oopsed, it's possible
56596 @@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
56597 */
56598 set_fs(USER_DS);
56599
56600 + profile_task_exit(tsk);
56601 +
56602 + WARN_ON(atomic_read(&tsk->fs_excl));
56603 + WARN_ON(blk_needs_flush_plug(tsk));
56604 +
56605 + if (unlikely(!tsk->pid))
56606 + panic("Attempted to kill the idle task!");
56607 +
56608 tracehook_report_exit(&code);
56609
56610 validate_creds_for_do_exit(tsk);
56611 @@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
56612 tsk->exit_code = code;
56613 taskstats_exit(tsk, group_dead);
56614
56615 + gr_acl_handle_psacct(tsk, code);
56616 + gr_acl_handle_exit();
56617 +
56618 exit_mm(tsk);
56619
56620 if (group_dead)
56621 diff -urNp linux-3.0.4/kernel/fork.c linux-3.0.4/kernel/fork.c
56622 --- linux-3.0.4/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
56623 +++ linux-3.0.4/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
56624 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
56625 *stackend = STACK_END_MAGIC; /* for overflow detection */
56626
56627 #ifdef CONFIG_CC_STACKPROTECTOR
56628 - tsk->stack_canary = get_random_int();
56629 + tsk->stack_canary = pax_get_random_long();
56630 #endif
56631
56632 /* One for us, one for whoever does the "release_task()" (usually parent) */
56633 @@ -308,13 +308,77 @@ out:
56634 }
56635
56636 #ifdef CONFIG_MMU
56637 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56638 +{
56639 + struct vm_area_struct *tmp;
56640 + unsigned long charge;
56641 + struct mempolicy *pol;
56642 + struct file *file;
56643 +
56644 + charge = 0;
56645 + if (mpnt->vm_flags & VM_ACCOUNT) {
56646 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56647 + if (security_vm_enough_memory(len))
56648 + goto fail_nomem;
56649 + charge = len;
56650 + }
56651 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56652 + if (!tmp)
56653 + goto fail_nomem;
56654 + *tmp = *mpnt;
56655 + tmp->vm_mm = mm;
56656 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
56657 + pol = mpol_dup(vma_policy(mpnt));
56658 + if (IS_ERR(pol))
56659 + goto fail_nomem_policy;
56660 + vma_set_policy(tmp, pol);
56661 + if (anon_vma_fork(tmp, mpnt))
56662 + goto fail_nomem_anon_vma_fork;
56663 + tmp->vm_flags &= ~VM_LOCKED;
56664 + tmp->vm_next = tmp->vm_prev = NULL;
56665 + tmp->vm_mirror = NULL;
56666 + file = tmp->vm_file;
56667 + if (file) {
56668 + struct inode *inode = file->f_path.dentry->d_inode;
56669 + struct address_space *mapping = file->f_mapping;
56670 +
56671 + get_file(file);
56672 + if (tmp->vm_flags & VM_DENYWRITE)
56673 + atomic_dec(&inode->i_writecount);
56674 + mutex_lock(&mapping->i_mmap_mutex);
56675 + if (tmp->vm_flags & VM_SHARED)
56676 + mapping->i_mmap_writable++;
56677 + flush_dcache_mmap_lock(mapping);
56678 + /* insert tmp into the share list, just after mpnt */
56679 + vma_prio_tree_add(tmp, mpnt);
56680 + flush_dcache_mmap_unlock(mapping);
56681 + mutex_unlock(&mapping->i_mmap_mutex);
56682 + }
56683 +
56684 + /*
56685 + * Clear hugetlb-related page reserves for children. This only
56686 + * affects MAP_PRIVATE mappings. Faults generated by the child
56687 + * are not guaranteed to succeed, even if read-only
56688 + */
56689 + if (is_vm_hugetlb_page(tmp))
56690 + reset_vma_resv_huge_pages(tmp);
56691 +
56692 + return tmp;
56693 +
56694 +fail_nomem_anon_vma_fork:
56695 + mpol_put(pol);
56696 +fail_nomem_policy:
56697 + kmem_cache_free(vm_area_cachep, tmp);
56698 +fail_nomem:
56699 + vm_unacct_memory(charge);
56700 + return NULL;
56701 +}
56702 +
56703 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56704 {
56705 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56706 struct rb_node **rb_link, *rb_parent;
56707 int retval;
56708 - unsigned long charge;
56709 - struct mempolicy *pol;
56710
56711 down_write(&oldmm->mmap_sem);
56712 flush_cache_dup_mm(oldmm);
56713 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
56714 mm->locked_vm = 0;
56715 mm->mmap = NULL;
56716 mm->mmap_cache = NULL;
56717 - mm->free_area_cache = oldmm->mmap_base;
56718 - mm->cached_hole_size = ~0UL;
56719 + mm->free_area_cache = oldmm->free_area_cache;
56720 + mm->cached_hole_size = oldmm->cached_hole_size;
56721 mm->map_count = 0;
56722 cpumask_clear(mm_cpumask(mm));
56723 mm->mm_rb = RB_ROOT;
56724 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
56725
56726 prev = NULL;
56727 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56728 - struct file *file;
56729 -
56730 if (mpnt->vm_flags & VM_DONTCOPY) {
56731 long pages = vma_pages(mpnt);
56732 mm->total_vm -= pages;
56733 @@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
56734 -pages);
56735 continue;
56736 }
56737 - charge = 0;
56738 - if (mpnt->vm_flags & VM_ACCOUNT) {
56739 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56740 - if (security_vm_enough_memory(len))
56741 - goto fail_nomem;
56742 - charge = len;
56743 - }
56744 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56745 - if (!tmp)
56746 - goto fail_nomem;
56747 - *tmp = *mpnt;
56748 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
56749 - pol = mpol_dup(vma_policy(mpnt));
56750 - retval = PTR_ERR(pol);
56751 - if (IS_ERR(pol))
56752 - goto fail_nomem_policy;
56753 - vma_set_policy(tmp, pol);
56754 - tmp->vm_mm = mm;
56755 - if (anon_vma_fork(tmp, mpnt))
56756 - goto fail_nomem_anon_vma_fork;
56757 - tmp->vm_flags &= ~VM_LOCKED;
56758 - tmp->vm_next = tmp->vm_prev = NULL;
56759 - file = tmp->vm_file;
56760 - if (file) {
56761 - struct inode *inode = file->f_path.dentry->d_inode;
56762 - struct address_space *mapping = file->f_mapping;
56763 -
56764 - get_file(file);
56765 - if (tmp->vm_flags & VM_DENYWRITE)
56766 - atomic_dec(&inode->i_writecount);
56767 - mutex_lock(&mapping->i_mmap_mutex);
56768 - if (tmp->vm_flags & VM_SHARED)
56769 - mapping->i_mmap_writable++;
56770 - flush_dcache_mmap_lock(mapping);
56771 - /* insert tmp into the share list, just after mpnt */
56772 - vma_prio_tree_add(tmp, mpnt);
56773 - flush_dcache_mmap_unlock(mapping);
56774 - mutex_unlock(&mapping->i_mmap_mutex);
56775 + tmp = dup_vma(mm, mpnt);
56776 + if (!tmp) {
56777 + retval = -ENOMEM;
56778 + goto out;
56779 }
56780
56781 /*
56782 - * Clear hugetlb-related page reserves for children. This only
56783 - * affects MAP_PRIVATE mappings. Faults generated by the child
56784 - * are not guaranteed to succeed, even if read-only
56785 - */
56786 - if (is_vm_hugetlb_page(tmp))
56787 - reset_vma_resv_huge_pages(tmp);
56788 -
56789 - /*
56790 * Link in the new vma and copy the page table entries.
56791 */
56792 *pprev = tmp;
56793 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
56794 if (retval)
56795 goto out;
56796 }
56797 +
56798 +#ifdef CONFIG_PAX_SEGMEXEC
56799 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56800 + struct vm_area_struct *mpnt_m;
56801 +
56802 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56803 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56804 +
56805 + if (!mpnt->vm_mirror)
56806 + continue;
56807 +
56808 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56809 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
56810 + mpnt->vm_mirror = mpnt_m;
56811 + } else {
56812 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
56813 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
56814 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
56815 + mpnt->vm_mirror->vm_mirror = mpnt;
56816 + }
56817 + }
56818 + BUG_ON(mpnt_m);
56819 + }
56820 +#endif
56821 +
56822 /* a new mm has just been created */
56823 arch_dup_mmap(oldmm, mm);
56824 retval = 0;
56825 @@ -429,14 +474,6 @@ out:
56826 flush_tlb_mm(oldmm);
56827 up_write(&oldmm->mmap_sem);
56828 return retval;
56829 -fail_nomem_anon_vma_fork:
56830 - mpol_put(pol);
56831 -fail_nomem_policy:
56832 - kmem_cache_free(vm_area_cachep, tmp);
56833 -fail_nomem:
56834 - retval = -ENOMEM;
56835 - vm_unacct_memory(charge);
56836 - goto out;
56837 }
56838
56839 static inline int mm_alloc_pgd(struct mm_struct * mm)
56840 @@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
56841 spin_unlock(&fs->lock);
56842 return -EAGAIN;
56843 }
56844 - fs->users++;
56845 + atomic_inc(&fs->users);
56846 spin_unlock(&fs->lock);
56847 return 0;
56848 }
56849 tsk->fs = copy_fs_struct(fs);
56850 if (!tsk->fs)
56851 return -ENOMEM;
56852 + gr_set_chroot_entries(tsk, &tsk->fs->root);
56853 return 0;
56854 }
56855
56856 @@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
56857 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
56858 #endif
56859 retval = -EAGAIN;
56860 +
56861 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
56862 +
56863 if (atomic_read(&p->real_cred->user->processes) >=
56864 task_rlimit(p, RLIMIT_NPROC)) {
56865 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
56866 - p->real_cred->user != INIT_USER)
56867 + if (p->real_cred->user != INIT_USER &&
56868 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
56869 goto bad_fork_free;
56870 }
56871 + current->flags &= ~PF_NPROC_EXCEEDED;
56872
56873 retval = copy_creds(p, clone_flags);
56874 if (retval < 0)
56875 @@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
56876 if (clone_flags & CLONE_THREAD)
56877 p->tgid = current->tgid;
56878
56879 + gr_copy_label(p);
56880 +
56881 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
56882 /*
56883 * Clear TID on mm_release()?
56884 @@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
56885 bad_fork_free:
56886 free_task(p);
56887 fork_out:
56888 + gr_log_forkfail(retval);
56889 +
56890 return ERR_PTR(retval);
56891 }
56892
56893 @@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
56894 if (clone_flags & CLONE_PARENT_SETTID)
56895 put_user(nr, parent_tidptr);
56896
56897 + gr_handle_brute_check();
56898 +
56899 if (clone_flags & CLONE_VFORK) {
56900 p->vfork_done = &vfork;
56901 init_completion(&vfork);
56902 @@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
56903 return 0;
56904
56905 /* don't need lock here; in the worst case we'll do useless copy */
56906 - if (fs->users == 1)
56907 + if (atomic_read(&fs->users) == 1)
56908 return 0;
56909
56910 *new_fsp = copy_fs_struct(fs);
56911 @@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
56912 fs = current->fs;
56913 spin_lock(&fs->lock);
56914 current->fs = new_fs;
56915 - if (--fs->users)
56916 + gr_set_chroot_entries(current, &current->fs->root);
56917 + if (atomic_dec_return(&fs->users))
56918 new_fs = NULL;
56919 else
56920 new_fs = fs;
56921 diff -urNp linux-3.0.4/kernel/futex.c linux-3.0.4/kernel/futex.c
56922 --- linux-3.0.4/kernel/futex.c 2011-08-23 21:44:40.000000000 -0400
56923 +++ linux-3.0.4/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
56924 @@ -54,6 +54,7 @@
56925 #include <linux/mount.h>
56926 #include <linux/pagemap.h>
56927 #include <linux/syscalls.h>
56928 +#include <linux/ptrace.h>
56929 #include <linux/signal.h>
56930 #include <linux/module.h>
56931 #include <linux/magic.h>
56932 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
56933 struct page *page, *page_head;
56934 int err, ro = 0;
56935
56936 +#ifdef CONFIG_PAX_SEGMEXEC
56937 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
56938 + return -EFAULT;
56939 +#endif
56940 +
56941 /*
56942 * The futex address must be "naturally" aligned.
56943 */
56944 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
56945 struct futex_q q = futex_q_init;
56946 int ret;
56947
56948 + pax_track_stack();
56949 +
56950 if (!bitset)
56951 return -EINVAL;
56952 q.bitset = bitset;
56953 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
56954 struct futex_q q = futex_q_init;
56955 int res, ret;
56956
56957 + pax_track_stack();
56958 +
56959 if (!bitset)
56960 return -EINVAL;
56961
56962 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56963 {
56964 struct robust_list_head __user *head;
56965 unsigned long ret;
56966 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
56967 const struct cred *cred = current_cred(), *pcred;
56968 +#endif
56969
56970 if (!futex_cmpxchg_enabled)
56971 return -ENOSYS;
56972 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56973 if (!p)
56974 goto err_unlock;
56975 ret = -EPERM;
56976 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56977 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
56978 + goto err_unlock;
56979 +#else
56980 pcred = __task_cred(p);
56981 /* If victim is in different user_ns, then uids are not
56982 comparable, so we must have CAP_SYS_PTRACE */
56983 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56984 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
56985 goto err_unlock;
56986 ok:
56987 +#endif
56988 head = p->robust_list;
56989 rcu_read_unlock();
56990 }
56991 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
56992 {
56993 u32 curval;
56994 int i;
56995 + mm_segment_t oldfs;
56996
56997 /*
56998 * This will fail and we want it. Some arch implementations do
56999 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
57000 * implementation, the non-functional ones will return
57001 * -ENOSYS.
57002 */
57003 + oldfs = get_fs();
57004 + set_fs(USER_DS);
57005 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57006 futex_cmpxchg_enabled = 1;
57007 + set_fs(oldfs);
57008
57009 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57010 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57011 diff -urNp linux-3.0.4/kernel/futex_compat.c linux-3.0.4/kernel/futex_compat.c
57012 --- linux-3.0.4/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
57013 +++ linux-3.0.4/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
57014 @@ -10,6 +10,7 @@
57015 #include <linux/compat.h>
57016 #include <linux/nsproxy.h>
57017 #include <linux/futex.h>
57018 +#include <linux/ptrace.h>
57019
57020 #include <asm/uaccess.h>
57021
57022 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57023 {
57024 struct compat_robust_list_head __user *head;
57025 unsigned long ret;
57026 - const struct cred *cred = current_cred(), *pcred;
57027 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57028 + const struct cred *cred = current_cred();
57029 + const struct cred *pcred;
57030 +#endif
57031
57032 if (!futex_cmpxchg_enabled)
57033 return -ENOSYS;
57034 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57035 if (!p)
57036 goto err_unlock;
57037 ret = -EPERM;
57038 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57039 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57040 + goto err_unlock;
57041 +#else
57042 pcred = __task_cred(p);
57043 /* If victim is in different user_ns, then uids are not
57044 comparable, so we must have CAP_SYS_PTRACE */
57045 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57046 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57047 goto err_unlock;
57048 ok:
57049 +#endif
57050 head = p->compat_robust_list;
57051 rcu_read_unlock();
57052 }
57053 diff -urNp linux-3.0.4/kernel/gcov/base.c linux-3.0.4/kernel/gcov/base.c
57054 --- linux-3.0.4/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
57055 +++ linux-3.0.4/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
57056 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
57057 }
57058
57059 #ifdef CONFIG_MODULES
57060 -static inline int within(void *addr, void *start, unsigned long size)
57061 -{
57062 - return ((addr >= start) && (addr < start + size));
57063 -}
57064 -
57065 /* Update list and generate events when modules are unloaded. */
57066 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57067 void *data)
57068 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57069 prev = NULL;
57070 /* Remove entries located in module from linked list. */
57071 for (info = gcov_info_head; info; info = info->next) {
57072 - if (within(info, mod->module_core, mod->core_size)) {
57073 + if (within_module_core_rw((unsigned long)info, mod)) {
57074 if (prev)
57075 prev->next = info->next;
57076 else
57077 diff -urNp linux-3.0.4/kernel/hrtimer.c linux-3.0.4/kernel/hrtimer.c
57078 --- linux-3.0.4/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
57079 +++ linux-3.0.4/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
57080 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
57081 local_irq_restore(flags);
57082 }
57083
57084 -static void run_hrtimer_softirq(struct softirq_action *h)
57085 +static void run_hrtimer_softirq(void)
57086 {
57087 hrtimer_peek_ahead_timers();
57088 }
57089 diff -urNp linux-3.0.4/kernel/jump_label.c linux-3.0.4/kernel/jump_label.c
57090 --- linux-3.0.4/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
57091 +++ linux-3.0.4/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
57092 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
57093
57094 size = (((unsigned long)stop - (unsigned long)start)
57095 / sizeof(struct jump_entry));
57096 + pax_open_kernel();
57097 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57098 + pax_close_kernel();
57099 }
57100
57101 static void jump_label_update(struct jump_label_key *key, int enable);
57102 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module
57103 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
57104 struct jump_entry *iter;
57105
57106 + pax_open_kernel();
57107 for (iter = iter_start; iter < iter_stop; iter++) {
57108 if (within_module_init(iter->code, mod))
57109 iter->code = 0;
57110 }
57111 + pax_close_kernel();
57112 }
57113
57114 static int
57115 diff -urNp linux-3.0.4/kernel/kallsyms.c linux-3.0.4/kernel/kallsyms.c
57116 --- linux-3.0.4/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
57117 +++ linux-3.0.4/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
57118 @@ -11,6 +11,9 @@
57119 * Changed the compression method from stem compression to "table lookup"
57120 * compression (see scripts/kallsyms.c for a more complete description)
57121 */
57122 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57123 +#define __INCLUDED_BY_HIDESYM 1
57124 +#endif
57125 #include <linux/kallsyms.h>
57126 #include <linux/module.h>
57127 #include <linux/init.h>
57128 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57129
57130 static inline int is_kernel_inittext(unsigned long addr)
57131 {
57132 + if (system_state != SYSTEM_BOOTING)
57133 + return 0;
57134 +
57135 if (addr >= (unsigned long)_sinittext
57136 && addr <= (unsigned long)_einittext)
57137 return 1;
57138 return 0;
57139 }
57140
57141 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57142 +#ifdef CONFIG_MODULES
57143 +static inline int is_module_text(unsigned long addr)
57144 +{
57145 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57146 + return 1;
57147 +
57148 + addr = ktla_ktva(addr);
57149 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57150 +}
57151 +#else
57152 +static inline int is_module_text(unsigned long addr)
57153 +{
57154 + return 0;
57155 +}
57156 +#endif
57157 +#endif
57158 +
57159 static inline int is_kernel_text(unsigned long addr)
57160 {
57161 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57162 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57163
57164 static inline int is_kernel(unsigned long addr)
57165 {
57166 +
57167 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57168 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
57169 + return 1;
57170 +
57171 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57172 +#else
57173 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57174 +#endif
57175 +
57176 return 1;
57177 return in_gate_area_no_mm(addr);
57178 }
57179
57180 static int is_ksym_addr(unsigned long addr)
57181 {
57182 +
57183 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57184 + if (is_module_text(addr))
57185 + return 0;
57186 +#endif
57187 +
57188 if (all_var)
57189 return is_kernel(addr);
57190
57191 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57192
57193 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57194 {
57195 - iter->name[0] = '\0';
57196 iter->nameoff = get_symbol_offset(new_pos);
57197 iter->pos = new_pos;
57198 }
57199 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57200 {
57201 struct kallsym_iter *iter = m->private;
57202
57203 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57204 + if (current_uid())
57205 + return 0;
57206 +#endif
57207 +
57208 /* Some debugging symbols have no name. Ignore them. */
57209 if (!iter->name[0])
57210 return 0;
57211 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57212 struct kallsym_iter *iter;
57213 int ret;
57214
57215 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57216 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57217 if (!iter)
57218 return -ENOMEM;
57219 reset_iter(iter, 0);
57220 diff -urNp linux-3.0.4/kernel/kmod.c linux-3.0.4/kernel/kmod.c
57221 --- linux-3.0.4/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
57222 +++ linux-3.0.4/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
57223 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57224 * If module auto-loading support is disabled then this function
57225 * becomes a no-operation.
57226 */
57227 -int __request_module(bool wait, const char *fmt, ...)
57228 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57229 {
57230 - va_list args;
57231 char module_name[MODULE_NAME_LEN];
57232 unsigned int max_modprobes;
57233 int ret;
57234 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57235 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57236 static char *envp[] = { "HOME=/",
57237 "TERM=linux",
57238 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57239 @@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
57240 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57241 static int kmod_loop_msg;
57242
57243 - va_start(args, fmt);
57244 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57245 - va_end(args);
57246 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57247 if (ret >= MODULE_NAME_LEN)
57248 return -ENAMETOOLONG;
57249
57250 @@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
57251 if (ret)
57252 return ret;
57253
57254 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57255 + if (!current_uid()) {
57256 + /* hack to workaround consolekit/udisks stupidity */
57257 + read_lock(&tasklist_lock);
57258 + if (!strcmp(current->comm, "mount") &&
57259 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57260 + read_unlock(&tasklist_lock);
57261 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57262 + return -EPERM;
57263 + }
57264 + read_unlock(&tasklist_lock);
57265 + }
57266 +#endif
57267 +
57268 /* If modprobe needs a service that is in a module, we get a recursive
57269 * loop. Limit the number of running kmod threads to max_threads/2 or
57270 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57271 @@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
57272 atomic_dec(&kmod_concurrent);
57273 return ret;
57274 }
57275 +
57276 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57277 +{
57278 + va_list args;
57279 + int ret;
57280 +
57281 + va_start(args, fmt);
57282 + ret = ____request_module(wait, module_param, fmt, args);
57283 + va_end(args);
57284 +
57285 + return ret;
57286 +}
57287 +
57288 +int __request_module(bool wait, const char *fmt, ...)
57289 +{
57290 + va_list args;
57291 + int ret;
57292 +
57293 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57294 + if (current_uid()) {
57295 + char module_param[MODULE_NAME_LEN];
57296 +
57297 + memset(module_param, 0, sizeof(module_param));
57298 +
57299 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57300 +
57301 + va_start(args, fmt);
57302 + ret = ____request_module(wait, module_param, fmt, args);
57303 + va_end(args);
57304 +
57305 + return ret;
57306 + }
57307 +#endif
57308 +
57309 + va_start(args, fmt);
57310 + ret = ____request_module(wait, NULL, fmt, args);
57311 + va_end(args);
57312 +
57313 + return ret;
57314 +}
57315 +
57316 EXPORT_SYMBOL(__request_module);
57317 #endif /* CONFIG_MODULES */
57318
57319 diff -urNp linux-3.0.4/kernel/kprobes.c linux-3.0.4/kernel/kprobes.c
57320 --- linux-3.0.4/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
57321 +++ linux-3.0.4/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
57322 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57323 * kernel image and loaded module images reside. This is required
57324 * so x86_64 can correctly handle the %rip-relative fixups.
57325 */
57326 - kip->insns = module_alloc(PAGE_SIZE);
57327 + kip->insns = module_alloc_exec(PAGE_SIZE);
57328 if (!kip->insns) {
57329 kfree(kip);
57330 return NULL;
57331 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57332 */
57333 if (!list_is_singular(&kip->list)) {
57334 list_del(&kip->list);
57335 - module_free(NULL, kip->insns);
57336 + module_free_exec(NULL, kip->insns);
57337 kfree(kip);
57338 }
57339 return 1;
57340 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57341 {
57342 int i, err = 0;
57343 unsigned long offset = 0, size = 0;
57344 - char *modname, namebuf[128];
57345 + char *modname, namebuf[KSYM_NAME_LEN];
57346 const char *symbol_name;
57347 void *addr;
57348 struct kprobe_blackpoint *kb;
57349 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57350 const char *sym = NULL;
57351 unsigned int i = *(loff_t *) v;
57352 unsigned long offset = 0;
57353 - char *modname, namebuf[128];
57354 + char *modname, namebuf[KSYM_NAME_LEN];
57355
57356 head = &kprobe_table[i];
57357 preempt_disable();
57358 diff -urNp linux-3.0.4/kernel/lockdep.c linux-3.0.4/kernel/lockdep.c
57359 --- linux-3.0.4/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
57360 +++ linux-3.0.4/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
57361 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
57362 end = (unsigned long) &_end,
57363 addr = (unsigned long) obj;
57364
57365 +#ifdef CONFIG_PAX_KERNEXEC
57366 + start = ktla_ktva(start);
57367 +#endif
57368 +
57369 /*
57370 * static variable?
57371 */
57372 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
57373 if (!static_obj(lock->key)) {
57374 debug_locks_off();
57375 printk("INFO: trying to register non-static key.\n");
57376 + printk("lock:%pS key:%pS.\n", lock, lock->key);
57377 printk("the code is fine but needs lockdep annotation.\n");
57378 printk("turning off the locking correctness validator.\n");
57379 dump_stack();
57380 @@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
57381 if (!class)
57382 return 0;
57383 }
57384 - atomic_inc((atomic_t *)&class->ops);
57385 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57386 if (very_verbose(class)) {
57387 printk("\nacquire class [%p] %s", class->key, class->name);
57388 if (class->name_version > 1)
57389 diff -urNp linux-3.0.4/kernel/lockdep_proc.c linux-3.0.4/kernel/lockdep_proc.c
57390 --- linux-3.0.4/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
57391 +++ linux-3.0.4/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
57392 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57393
57394 static void print_name(struct seq_file *m, struct lock_class *class)
57395 {
57396 - char str[128];
57397 + char str[KSYM_NAME_LEN];
57398 const char *name = class->name;
57399
57400 if (!name) {
57401 diff -urNp linux-3.0.4/kernel/module.c linux-3.0.4/kernel/module.c
57402 --- linux-3.0.4/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
57403 +++ linux-3.0.4/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
57404 @@ -58,6 +58,7 @@
57405 #include <linux/jump_label.h>
57406 #include <linux/pfn.h>
57407 #include <linux/bsearch.h>
57408 +#include <linux/grsecurity.h>
57409
57410 #define CREATE_TRACE_POINTS
57411 #include <trace/events/module.h>
57412 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57413
57414 /* Bounds of module allocation, for speeding __module_address.
57415 * Protected by module_mutex. */
57416 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57417 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57418 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57419
57420 int register_module_notifier(struct notifier_block * nb)
57421 {
57422 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
57423 return true;
57424
57425 list_for_each_entry_rcu(mod, &modules, list) {
57426 - struct symsearch arr[] = {
57427 + struct symsearch modarr[] = {
57428 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57429 NOT_GPL_ONLY, false },
57430 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57431 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
57432 #endif
57433 };
57434
57435 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57436 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57437 return true;
57438 }
57439 return false;
57440 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
57441 static int percpu_modalloc(struct module *mod,
57442 unsigned long size, unsigned long align)
57443 {
57444 - if (align > PAGE_SIZE) {
57445 + if (align-1 >= PAGE_SIZE) {
57446 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57447 mod->name, align, PAGE_SIZE);
57448 align = PAGE_SIZE;
57449 @@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
57450 */
57451 #ifdef CONFIG_SYSFS
57452
57453 -#ifdef CONFIG_KALLSYMS
57454 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57455 static inline bool sect_empty(const Elf_Shdr *sect)
57456 {
57457 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57458 @@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
57459
57460 static void unset_module_core_ro_nx(struct module *mod)
57461 {
57462 - set_page_attributes(mod->module_core + mod->core_text_size,
57463 - mod->module_core + mod->core_size,
57464 + set_page_attributes(mod->module_core_rw,
57465 + mod->module_core_rw + mod->core_size_rw,
57466 set_memory_x);
57467 - set_page_attributes(mod->module_core,
57468 - mod->module_core + mod->core_ro_size,
57469 + set_page_attributes(mod->module_core_rx,
57470 + mod->module_core_rx + mod->core_size_rx,
57471 set_memory_rw);
57472 }
57473
57474 static void unset_module_init_ro_nx(struct module *mod)
57475 {
57476 - set_page_attributes(mod->module_init + mod->init_text_size,
57477 - mod->module_init + mod->init_size,
57478 + set_page_attributes(mod->module_init_rw,
57479 + mod->module_init_rw + mod->init_size_rw,
57480 set_memory_x);
57481 - set_page_attributes(mod->module_init,
57482 - mod->module_init + mod->init_ro_size,
57483 + set_page_attributes(mod->module_init_rx,
57484 + mod->module_init_rx + mod->init_size_rx,
57485 set_memory_rw);
57486 }
57487
57488 @@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
57489
57490 mutex_lock(&module_mutex);
57491 list_for_each_entry_rcu(mod, &modules, list) {
57492 - if ((mod->module_core) && (mod->core_text_size)) {
57493 - set_page_attributes(mod->module_core,
57494 - mod->module_core + mod->core_text_size,
57495 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57496 + set_page_attributes(mod->module_core_rx,
57497 + mod->module_core_rx + mod->core_size_rx,
57498 set_memory_rw);
57499 }
57500 - if ((mod->module_init) && (mod->init_text_size)) {
57501 - set_page_attributes(mod->module_init,
57502 - mod->module_init + mod->init_text_size,
57503 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57504 + set_page_attributes(mod->module_init_rx,
57505 + mod->module_init_rx + mod->init_size_rx,
57506 set_memory_rw);
57507 }
57508 }
57509 @@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
57510
57511 mutex_lock(&module_mutex);
57512 list_for_each_entry_rcu(mod, &modules, list) {
57513 - if ((mod->module_core) && (mod->core_text_size)) {
57514 - set_page_attributes(mod->module_core,
57515 - mod->module_core + mod->core_text_size,
57516 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57517 + set_page_attributes(mod->module_core_rx,
57518 + mod->module_core_rx + mod->core_size_rx,
57519 set_memory_ro);
57520 }
57521 - if ((mod->module_init) && (mod->init_text_size)) {
57522 - set_page_attributes(mod->module_init,
57523 - mod->module_init + mod->init_text_size,
57524 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57525 + set_page_attributes(mod->module_init_rx,
57526 + mod->module_init_rx + mod->init_size_rx,
57527 set_memory_ro);
57528 }
57529 }
57530 @@ -1722,16 +1724,19 @@ static void free_module(struct module *m
57531
57532 /* This may be NULL, but that's OK */
57533 unset_module_init_ro_nx(mod);
57534 - module_free(mod, mod->module_init);
57535 + module_free(mod, mod->module_init_rw);
57536 + module_free_exec(mod, mod->module_init_rx);
57537 kfree(mod->args);
57538 percpu_modfree(mod);
57539
57540 /* Free lock-classes: */
57541 - lockdep_free_key_range(mod->module_core, mod->core_size);
57542 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57543 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57544
57545 /* Finally, free the core (containing the module structure) */
57546 unset_module_core_ro_nx(mod);
57547 - module_free(mod, mod->module_core);
57548 + module_free_exec(mod, mod->module_core_rx);
57549 + module_free(mod, mod->module_core_rw);
57550
57551 #ifdef CONFIG_MPU
57552 update_protections(current->mm);
57553 @@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
57554 unsigned int i;
57555 int ret = 0;
57556 const struct kernel_symbol *ksym;
57557 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57558 + int is_fs_load = 0;
57559 + int register_filesystem_found = 0;
57560 + char *p;
57561 +
57562 + p = strstr(mod->args, "grsec_modharden_fs");
57563 + if (p) {
57564 + char *endptr = p + strlen("grsec_modharden_fs");
57565 + /* copy \0 as well */
57566 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57567 + is_fs_load = 1;
57568 + }
57569 +#endif
57570
57571 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57572 const char *name = info->strtab + sym[i].st_name;
57573
57574 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57575 + /* it's a real shame this will never get ripped and copied
57576 + upstream! ;(
57577 + */
57578 + if (is_fs_load && !strcmp(name, "register_filesystem"))
57579 + register_filesystem_found = 1;
57580 +#endif
57581 +
57582 switch (sym[i].st_shndx) {
57583 case SHN_COMMON:
57584 /* We compiled with -fno-common. These are not
57585 @@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
57586 ksym = resolve_symbol_wait(mod, info, name);
57587 /* Ok if resolved. */
57588 if (ksym && !IS_ERR(ksym)) {
57589 + pax_open_kernel();
57590 sym[i].st_value = ksym->value;
57591 + pax_close_kernel();
57592 break;
57593 }
57594
57595 @@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
57596 secbase = (unsigned long)mod_percpu(mod);
57597 else
57598 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57599 + pax_open_kernel();
57600 sym[i].st_value += secbase;
57601 + pax_close_kernel();
57602 break;
57603 }
57604 }
57605
57606 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57607 + if (is_fs_load && !register_filesystem_found) {
57608 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57609 + ret = -EPERM;
57610 + }
57611 +#endif
57612 +
57613 return ret;
57614 }
57615
57616 @@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
57617 || s->sh_entsize != ~0UL
57618 || strstarts(sname, ".init"))
57619 continue;
57620 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57621 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57622 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57623 + else
57624 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57625 DEBUGP("\t%s\n", name);
57626 }
57627 - switch (m) {
57628 - case 0: /* executable */
57629 - mod->core_size = debug_align(mod->core_size);
57630 - mod->core_text_size = mod->core_size;
57631 - break;
57632 - case 1: /* RO: text and ro-data */
57633 - mod->core_size = debug_align(mod->core_size);
57634 - mod->core_ro_size = mod->core_size;
57635 - break;
57636 - case 3: /* whole core */
57637 - mod->core_size = debug_align(mod->core_size);
57638 - break;
57639 - }
57640 }
57641
57642 DEBUGP("Init section allocation order:\n");
57643 @@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
57644 || s->sh_entsize != ~0UL
57645 || !strstarts(sname, ".init"))
57646 continue;
57647 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57648 - | INIT_OFFSET_MASK);
57649 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57650 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57651 + else
57652 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57653 + s->sh_entsize |= INIT_OFFSET_MASK;
57654 DEBUGP("\t%s\n", sname);
57655 }
57656 - switch (m) {
57657 - case 0: /* executable */
57658 - mod->init_size = debug_align(mod->init_size);
57659 - mod->init_text_size = mod->init_size;
57660 - break;
57661 - case 1: /* RO: text and ro-data */
57662 - mod->init_size = debug_align(mod->init_size);
57663 - mod->init_ro_size = mod->init_size;
57664 - break;
57665 - case 3: /* whole init */
57666 - mod->init_size = debug_align(mod->init_size);
57667 - break;
57668 - }
57669 }
57670 }
57671
57672 @@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
57673
57674 /* Put symbol section at end of init part of module. */
57675 symsect->sh_flags |= SHF_ALLOC;
57676 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57677 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57678 info->index.sym) | INIT_OFFSET_MASK;
57679 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57680
57681 @@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
57682 }
57683
57684 /* Append room for core symbols at end of core part. */
57685 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57686 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57687 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57688 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57689
57690 /* Put string table section at end of init part of module. */
57691 strsect->sh_flags |= SHF_ALLOC;
57692 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57693 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57694 info->index.str) | INIT_OFFSET_MASK;
57695 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57696
57697 /* Append room for core symbols' strings at end of core part. */
57698 - info->stroffs = mod->core_size;
57699 + info->stroffs = mod->core_size_rx;
57700 __set_bit(0, info->strmap);
57701 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57702 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57703 }
57704
57705 static void add_kallsyms(struct module *mod, const struct load_info *info)
57706 @@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
57707 /* Make sure we get permanent strtab: don't use info->strtab. */
57708 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57709
57710 + pax_open_kernel();
57711 +
57712 /* Set types up while we still have access to sections. */
57713 for (i = 0; i < mod->num_symtab; i++)
57714 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57715
57716 - mod->core_symtab = dst = mod->module_core + info->symoffs;
57717 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57718 src = mod->symtab;
57719 *dst = *src;
57720 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57721 @@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
57722 }
57723 mod->core_num_syms = ndst;
57724
57725 - mod->core_strtab = s = mod->module_core + info->stroffs;
57726 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57727 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57728 if (test_bit(i, info->strmap))
57729 *++s = mod->strtab[i];
57730 +
57731 + pax_close_kernel();
57732 }
57733 #else
57734 static inline void layout_symtab(struct module *mod, struct load_info *info)
57735 @@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
57736 ddebug_remove_module(debug->modname);
57737 }
57738
57739 -static void *module_alloc_update_bounds(unsigned long size)
57740 +static void *module_alloc_update_bounds_rw(unsigned long size)
57741 {
57742 void *ret = module_alloc(size);
57743
57744 if (ret) {
57745 mutex_lock(&module_mutex);
57746 /* Update module bounds. */
57747 - if ((unsigned long)ret < module_addr_min)
57748 - module_addr_min = (unsigned long)ret;
57749 - if ((unsigned long)ret + size > module_addr_max)
57750 - module_addr_max = (unsigned long)ret + size;
57751 + if ((unsigned long)ret < module_addr_min_rw)
57752 + module_addr_min_rw = (unsigned long)ret;
57753 + if ((unsigned long)ret + size > module_addr_max_rw)
57754 + module_addr_max_rw = (unsigned long)ret + size;
57755 + mutex_unlock(&module_mutex);
57756 + }
57757 + return ret;
57758 +}
57759 +
57760 +static void *module_alloc_update_bounds_rx(unsigned long size)
57761 +{
57762 + void *ret = module_alloc_exec(size);
57763 +
57764 + if (ret) {
57765 + mutex_lock(&module_mutex);
57766 + /* Update module bounds. */
57767 + if ((unsigned long)ret < module_addr_min_rx)
57768 + module_addr_min_rx = (unsigned long)ret;
57769 + if ((unsigned long)ret + size > module_addr_max_rx)
57770 + module_addr_max_rx = (unsigned long)ret + size;
57771 mutex_unlock(&module_mutex);
57772 }
57773 return ret;
57774 @@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
57775 void *ptr;
57776
57777 /* Do the allocs. */
57778 - ptr = module_alloc_update_bounds(mod->core_size);
57779 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57780 /*
57781 * The pointer to this block is stored in the module structure
57782 * which is inside the block. Just mark it as not being a
57783 @@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
57784 if (!ptr)
57785 return -ENOMEM;
57786
57787 - memset(ptr, 0, mod->core_size);
57788 - mod->module_core = ptr;
57789 + memset(ptr, 0, mod->core_size_rw);
57790 + mod->module_core_rw = ptr;
57791
57792 - ptr = module_alloc_update_bounds(mod->init_size);
57793 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
57794 /*
57795 * The pointer to this block is stored in the module structure
57796 * which is inside the block. This block doesn't need to be
57797 * scanned as it contains data and code that will be freed
57798 * after the module is initialized.
57799 */
57800 - kmemleak_ignore(ptr);
57801 - if (!ptr && mod->init_size) {
57802 - module_free(mod, mod->module_core);
57803 + kmemleak_not_leak(ptr);
57804 + if (!ptr && mod->init_size_rw) {
57805 + module_free(mod, mod->module_core_rw);
57806 return -ENOMEM;
57807 }
57808 - memset(ptr, 0, mod->init_size);
57809 - mod->module_init = ptr;
57810 + memset(ptr, 0, mod->init_size_rw);
57811 + mod->module_init_rw = ptr;
57812 +
57813 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
57814 + kmemleak_not_leak(ptr);
57815 + if (!ptr) {
57816 + module_free(mod, mod->module_init_rw);
57817 + module_free(mod, mod->module_core_rw);
57818 + return -ENOMEM;
57819 + }
57820 +
57821 + pax_open_kernel();
57822 + memset(ptr, 0, mod->core_size_rx);
57823 + pax_close_kernel();
57824 + mod->module_core_rx = ptr;
57825 +
57826 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
57827 + kmemleak_not_leak(ptr);
57828 + if (!ptr && mod->init_size_rx) {
57829 + module_free_exec(mod, mod->module_core_rx);
57830 + module_free(mod, mod->module_init_rw);
57831 + module_free(mod, mod->module_core_rw);
57832 + return -ENOMEM;
57833 + }
57834 +
57835 + pax_open_kernel();
57836 + memset(ptr, 0, mod->init_size_rx);
57837 + pax_close_kernel();
57838 + mod->module_init_rx = ptr;
57839
57840 /* Transfer each section which specifies SHF_ALLOC */
57841 DEBUGP("final section addresses:\n");
57842 @@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
57843 if (!(shdr->sh_flags & SHF_ALLOC))
57844 continue;
57845
57846 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
57847 - dest = mod->module_init
57848 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57849 - else
57850 - dest = mod->module_core + shdr->sh_entsize;
57851 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
57852 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57853 + dest = mod->module_init_rw
57854 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57855 + else
57856 + dest = mod->module_init_rx
57857 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57858 + } else {
57859 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57860 + dest = mod->module_core_rw + shdr->sh_entsize;
57861 + else
57862 + dest = mod->module_core_rx + shdr->sh_entsize;
57863 + }
57864 +
57865 + if (shdr->sh_type != SHT_NOBITS) {
57866 +
57867 +#ifdef CONFIG_PAX_KERNEXEC
57868 +#ifdef CONFIG_X86_64
57869 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
57870 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
57871 +#endif
57872 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
57873 + pax_open_kernel();
57874 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57875 + pax_close_kernel();
57876 + } else
57877 +#endif
57878
57879 - if (shdr->sh_type != SHT_NOBITS)
57880 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57881 + }
57882 /* Update sh_addr to point to copy in image. */
57883 - shdr->sh_addr = (unsigned long)dest;
57884 +
57885 +#ifdef CONFIG_PAX_KERNEXEC
57886 + if (shdr->sh_flags & SHF_EXECINSTR)
57887 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
57888 + else
57889 +#endif
57890 +
57891 + shdr->sh_addr = (unsigned long)dest;
57892 DEBUGP("\t0x%lx %s\n",
57893 shdr->sh_addr, info->secstrings + shdr->sh_name);
57894 }
57895 @@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
57896 * Do it before processing of module parameters, so the module
57897 * can provide parameter accessor functions of its own.
57898 */
57899 - if (mod->module_init)
57900 - flush_icache_range((unsigned long)mod->module_init,
57901 - (unsigned long)mod->module_init
57902 - + mod->init_size);
57903 - flush_icache_range((unsigned long)mod->module_core,
57904 - (unsigned long)mod->module_core + mod->core_size);
57905 + if (mod->module_init_rx)
57906 + flush_icache_range((unsigned long)mod->module_init_rx,
57907 + (unsigned long)mod->module_init_rx
57908 + + mod->init_size_rx);
57909 + flush_icache_range((unsigned long)mod->module_core_rx,
57910 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
57911
57912 set_fs(old_fs);
57913 }
57914 @@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
57915 {
57916 kfree(info->strmap);
57917 percpu_modfree(mod);
57918 - module_free(mod, mod->module_init);
57919 - module_free(mod, mod->module_core);
57920 + module_free_exec(mod, mod->module_init_rx);
57921 + module_free_exec(mod, mod->module_core_rx);
57922 + module_free(mod, mod->module_init_rw);
57923 + module_free(mod, mod->module_core_rw);
57924 }
57925
57926 static int post_relocation(struct module *mod, const struct load_info *info)
57927 @@ -2770,9 +2865,38 @@ static struct module *load_module(void _
57928 if (err)
57929 goto free_unload;
57930
57931 + /* Now copy in args */
57932 + mod->args = strndup_user(uargs, ~0UL >> 1);
57933 + if (IS_ERR(mod->args)) {
57934 + err = PTR_ERR(mod->args);
57935 + goto free_unload;
57936 + }
57937 +
57938 /* Set up MODINFO_ATTR fields */
57939 setup_modinfo(mod, &info);
57940
57941 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57942 + {
57943 + char *p, *p2;
57944 +
57945 + if (strstr(mod->args, "grsec_modharden_netdev")) {
57946 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
57947 + err = -EPERM;
57948 + goto free_modinfo;
57949 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
57950 + p += strlen("grsec_modharden_normal");
57951 + p2 = strstr(p, "_");
57952 + if (p2) {
57953 + *p2 = '\0';
57954 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
57955 + *p2 = '_';
57956 + }
57957 + err = -EPERM;
57958 + goto free_modinfo;
57959 + }
57960 + }
57961 +#endif
57962 +
57963 /* Fix up syms, so that st_value is a pointer to location. */
57964 err = simplify_symbols(mod, &info);
57965 if (err < 0)
57966 @@ -2788,13 +2912,6 @@ static struct module *load_module(void _
57967
57968 flush_module_icache(mod);
57969
57970 - /* Now copy in args */
57971 - mod->args = strndup_user(uargs, ~0UL >> 1);
57972 - if (IS_ERR(mod->args)) {
57973 - err = PTR_ERR(mod->args);
57974 - goto free_arch_cleanup;
57975 - }
57976 -
57977 /* Mark state as coming so strong_try_module_get() ignores us. */
57978 mod->state = MODULE_STATE_COMING;
57979
57980 @@ -2854,11 +2971,10 @@ static struct module *load_module(void _
57981 unlock:
57982 mutex_unlock(&module_mutex);
57983 synchronize_sched();
57984 - kfree(mod->args);
57985 - free_arch_cleanup:
57986 module_arch_cleanup(mod);
57987 free_modinfo:
57988 free_modinfo(mod);
57989 + kfree(mod->args);
57990 free_unload:
57991 module_unload_free(mod);
57992 free_module:
57993 @@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
57994 MODULE_STATE_COMING, mod);
57995
57996 /* Set RO and NX regions for core */
57997 - set_section_ro_nx(mod->module_core,
57998 - mod->core_text_size,
57999 - mod->core_ro_size,
58000 - mod->core_size);
58001 + set_section_ro_nx(mod->module_core_rx,
58002 + mod->core_size_rx,
58003 + mod->core_size_rx,
58004 + mod->core_size_rx);
58005
58006 /* Set RO and NX regions for init */
58007 - set_section_ro_nx(mod->module_init,
58008 - mod->init_text_size,
58009 - mod->init_ro_size,
58010 - mod->init_size);
58011 + set_section_ro_nx(mod->module_init_rx,
58012 + mod->init_size_rx,
58013 + mod->init_size_rx,
58014 + mod->init_size_rx);
58015
58016 do_mod_ctors(mod);
58017 /* Start the module */
58018 @@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
58019 mod->strtab = mod->core_strtab;
58020 #endif
58021 unset_module_init_ro_nx(mod);
58022 - module_free(mod, mod->module_init);
58023 - mod->module_init = NULL;
58024 - mod->init_size = 0;
58025 - mod->init_ro_size = 0;
58026 - mod->init_text_size = 0;
58027 + module_free(mod, mod->module_init_rw);
58028 + module_free_exec(mod, mod->module_init_rx);
58029 + mod->module_init_rw = NULL;
58030 + mod->module_init_rx = NULL;
58031 + mod->init_size_rw = 0;
58032 + mod->init_size_rx = 0;
58033 mutex_unlock(&module_mutex);
58034
58035 return 0;
58036 @@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
58037 unsigned long nextval;
58038
58039 /* At worse, next value is at end of module */
58040 - if (within_module_init(addr, mod))
58041 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
58042 + if (within_module_init_rx(addr, mod))
58043 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58044 + else if (within_module_init_rw(addr, mod))
58045 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58046 + else if (within_module_core_rx(addr, mod))
58047 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58048 + else if (within_module_core_rw(addr, mod))
58049 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58050 else
58051 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
58052 + return NULL;
58053
58054 /* Scan for closest preceding symbol, and next symbol. (ELF
58055 starts real symbols at 1). */
58056 @@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
58057 char buf[8];
58058
58059 seq_printf(m, "%s %u",
58060 - mod->name, mod->init_size + mod->core_size);
58061 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58062 print_unload_info(m, mod);
58063
58064 /* Informative for users. */
58065 @@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
58066 mod->state == MODULE_STATE_COMING ? "Loading":
58067 "Live");
58068 /* Used by oprofile and other similar tools. */
58069 - seq_printf(m, " 0x%pK", mod->module_core);
58070 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58071
58072 /* Taints info */
58073 if (mod->taints)
58074 @@ -3283,7 +3406,17 @@ static const struct file_operations proc
58075
58076 static int __init proc_modules_init(void)
58077 {
58078 +#ifndef CONFIG_GRKERNSEC_HIDESYM
58079 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58080 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58081 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58082 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58083 +#else
58084 proc_create("modules", 0, NULL, &proc_modules_operations);
58085 +#endif
58086 +#else
58087 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58088 +#endif
58089 return 0;
58090 }
58091 module_init(proc_modules_init);
58092 @@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
58093 {
58094 struct module *mod;
58095
58096 - if (addr < module_addr_min || addr > module_addr_max)
58097 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58098 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
58099 return NULL;
58100
58101 list_for_each_entry_rcu(mod, &modules, list)
58102 - if (within_module_core(addr, mod)
58103 - || within_module_init(addr, mod))
58104 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
58105 return mod;
58106 return NULL;
58107 }
58108 @@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
58109 */
58110 struct module *__module_text_address(unsigned long addr)
58111 {
58112 - struct module *mod = __module_address(addr);
58113 + struct module *mod;
58114 +
58115 +#ifdef CONFIG_X86_32
58116 + addr = ktla_ktva(addr);
58117 +#endif
58118 +
58119 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58120 + return NULL;
58121 +
58122 + mod = __module_address(addr);
58123 +
58124 if (mod) {
58125 /* Make sure it's within the text section. */
58126 - if (!within(addr, mod->module_init, mod->init_text_size)
58127 - && !within(addr, mod->module_core, mod->core_text_size))
58128 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58129 mod = NULL;
58130 }
58131 return mod;
58132 diff -urNp linux-3.0.4/kernel/mutex.c linux-3.0.4/kernel/mutex.c
58133 --- linux-3.0.4/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
58134 +++ linux-3.0.4/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
58135 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
58136 spin_lock_mutex(&lock->wait_lock, flags);
58137
58138 debug_mutex_lock_common(lock, &waiter);
58139 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58140 + debug_mutex_add_waiter(lock, &waiter, task);
58141
58142 /* add waiting tasks to the end of the waitqueue (FIFO): */
58143 list_add_tail(&waiter.list, &lock->wait_list);
58144 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
58145 * TASK_UNINTERRUPTIBLE case.)
58146 */
58147 if (unlikely(signal_pending_state(state, task))) {
58148 - mutex_remove_waiter(lock, &waiter,
58149 - task_thread_info(task));
58150 + mutex_remove_waiter(lock, &waiter, task);
58151 mutex_release(&lock->dep_map, 1, ip);
58152 spin_unlock_mutex(&lock->wait_lock, flags);
58153
58154 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
58155 done:
58156 lock_acquired(&lock->dep_map, ip);
58157 /* got the lock - rejoice! */
58158 - mutex_remove_waiter(lock, &waiter, current_thread_info());
58159 + mutex_remove_waiter(lock, &waiter, task);
58160 mutex_set_owner(lock);
58161
58162 /* set it to 0 if there are no waiters left: */
58163 diff -urNp linux-3.0.4/kernel/mutex-debug.c linux-3.0.4/kernel/mutex-debug.c
58164 --- linux-3.0.4/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
58165 +++ linux-3.0.4/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
58166 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58167 }
58168
58169 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58170 - struct thread_info *ti)
58171 + struct task_struct *task)
58172 {
58173 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58174
58175 /* Mark the current thread as blocked on the lock: */
58176 - ti->task->blocked_on = waiter;
58177 + task->blocked_on = waiter;
58178 }
58179
58180 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58181 - struct thread_info *ti)
58182 + struct task_struct *task)
58183 {
58184 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58185 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58186 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58187 - ti->task->blocked_on = NULL;
58188 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
58189 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58190 + task->blocked_on = NULL;
58191
58192 list_del_init(&waiter->list);
58193 waiter->task = NULL;
58194 diff -urNp linux-3.0.4/kernel/mutex-debug.h linux-3.0.4/kernel/mutex-debug.h
58195 --- linux-3.0.4/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
58196 +++ linux-3.0.4/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
58197 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
58198 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58199 extern void debug_mutex_add_waiter(struct mutex *lock,
58200 struct mutex_waiter *waiter,
58201 - struct thread_info *ti);
58202 + struct task_struct *task);
58203 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58204 - struct thread_info *ti);
58205 + struct task_struct *task);
58206 extern void debug_mutex_unlock(struct mutex *lock);
58207 extern void debug_mutex_init(struct mutex *lock, const char *name,
58208 struct lock_class_key *key);
58209 diff -urNp linux-3.0.4/kernel/padata.c linux-3.0.4/kernel/padata.c
58210 --- linux-3.0.4/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
58211 +++ linux-3.0.4/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
58212 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58213 padata->pd = pd;
58214 padata->cb_cpu = cb_cpu;
58215
58216 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58217 - atomic_set(&pd->seq_nr, -1);
58218 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58219 + atomic_set_unchecked(&pd->seq_nr, -1);
58220
58221 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58222 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58223
58224 target_cpu = padata_cpu_hash(padata);
58225 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58226 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58227 padata_init_pqueues(pd);
58228 padata_init_squeues(pd);
58229 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58230 - atomic_set(&pd->seq_nr, -1);
58231 + atomic_set_unchecked(&pd->seq_nr, -1);
58232 atomic_set(&pd->reorder_objects, 0);
58233 atomic_set(&pd->refcnt, 0);
58234 pd->pinst = pinst;
58235 diff -urNp linux-3.0.4/kernel/panic.c linux-3.0.4/kernel/panic.c
58236 --- linux-3.0.4/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
58237 +++ linux-3.0.4/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
58238 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58239 const char *board;
58240
58241 printk(KERN_WARNING "------------[ cut here ]------------\n");
58242 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58243 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58244 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58245 if (board)
58246 printk(KERN_WARNING "Hardware name: %s\n", board);
58247 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58248 */
58249 void __stack_chk_fail(void)
58250 {
58251 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
58252 + dump_stack();
58253 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58254 __builtin_return_address(0));
58255 }
58256 EXPORT_SYMBOL(__stack_chk_fail);
58257 diff -urNp linux-3.0.4/kernel/pid.c linux-3.0.4/kernel/pid.c
58258 --- linux-3.0.4/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
58259 +++ linux-3.0.4/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
58260 @@ -33,6 +33,7 @@
58261 #include <linux/rculist.h>
58262 #include <linux/bootmem.h>
58263 #include <linux/hash.h>
58264 +#include <linux/security.h>
58265 #include <linux/pid_namespace.h>
58266 #include <linux/init_task.h>
58267 #include <linux/syscalls.h>
58268 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58269
58270 int pid_max = PID_MAX_DEFAULT;
58271
58272 -#define RESERVED_PIDS 300
58273 +#define RESERVED_PIDS 500
58274
58275 int pid_max_min = RESERVED_PIDS + 1;
58276 int pid_max_max = PID_MAX_LIMIT;
58277 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58278 */
58279 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58280 {
58281 + struct task_struct *task;
58282 +
58283 rcu_lockdep_assert(rcu_read_lock_held());
58284 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58285 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58286 +
58287 + if (gr_pid_is_chrooted(task))
58288 + return NULL;
58289 +
58290 + return task;
58291 }
58292
58293 struct task_struct *find_task_by_vpid(pid_t vnr)
58294 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58295 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58296 }
58297
58298 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58299 +{
58300 + rcu_lockdep_assert(rcu_read_lock_held());
58301 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58302 +}
58303 +
58304 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58305 {
58306 struct pid *pid;
58307 diff -urNp linux-3.0.4/kernel/posix-cpu-timers.c linux-3.0.4/kernel/posix-cpu-timers.c
58308 --- linux-3.0.4/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
58309 +++ linux-3.0.4/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
58310 @@ -6,6 +6,7 @@
58311 #include <linux/posix-timers.h>
58312 #include <linux/errno.h>
58313 #include <linux/math64.h>
58314 +#include <linux/security.h>
58315 #include <asm/uaccess.h>
58316 #include <linux/kernel_stat.h>
58317 #include <trace/events/timer.h>
58318 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58319
58320 static __init int init_posix_cpu_timers(void)
58321 {
58322 - struct k_clock process = {
58323 + static struct k_clock process = {
58324 .clock_getres = process_cpu_clock_getres,
58325 .clock_get = process_cpu_clock_get,
58326 .timer_create = process_cpu_timer_create,
58327 .nsleep = process_cpu_nsleep,
58328 .nsleep_restart = process_cpu_nsleep_restart,
58329 };
58330 - struct k_clock thread = {
58331 + static struct k_clock thread = {
58332 .clock_getres = thread_cpu_clock_getres,
58333 .clock_get = thread_cpu_clock_get,
58334 .timer_create = thread_cpu_timer_create,
58335 diff -urNp linux-3.0.4/kernel/posix-timers.c linux-3.0.4/kernel/posix-timers.c
58336 --- linux-3.0.4/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
58337 +++ linux-3.0.4/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
58338 @@ -43,6 +43,7 @@
58339 #include <linux/idr.h>
58340 #include <linux/posix-clock.h>
58341 #include <linux/posix-timers.h>
58342 +#include <linux/grsecurity.h>
58343 #include <linux/syscalls.h>
58344 #include <linux/wait.h>
58345 #include <linux/workqueue.h>
58346 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58347 * which we beg off on and pass to do_sys_settimeofday().
58348 */
58349
58350 -static struct k_clock posix_clocks[MAX_CLOCKS];
58351 +static struct k_clock *posix_clocks[MAX_CLOCKS];
58352
58353 /*
58354 * These ones are defined below.
58355 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58356 */
58357 static __init int init_posix_timers(void)
58358 {
58359 - struct k_clock clock_realtime = {
58360 + static struct k_clock clock_realtime = {
58361 .clock_getres = hrtimer_get_res,
58362 .clock_get = posix_clock_realtime_get,
58363 .clock_set = posix_clock_realtime_set,
58364 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58365 .timer_get = common_timer_get,
58366 .timer_del = common_timer_del,
58367 };
58368 - struct k_clock clock_monotonic = {
58369 + static struct k_clock clock_monotonic = {
58370 .clock_getres = hrtimer_get_res,
58371 .clock_get = posix_ktime_get_ts,
58372 .nsleep = common_nsleep,
58373 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58374 .timer_get = common_timer_get,
58375 .timer_del = common_timer_del,
58376 };
58377 - struct k_clock clock_monotonic_raw = {
58378 + static struct k_clock clock_monotonic_raw = {
58379 .clock_getres = hrtimer_get_res,
58380 .clock_get = posix_get_monotonic_raw,
58381 };
58382 - struct k_clock clock_realtime_coarse = {
58383 + static struct k_clock clock_realtime_coarse = {
58384 .clock_getres = posix_get_coarse_res,
58385 .clock_get = posix_get_realtime_coarse,
58386 };
58387 - struct k_clock clock_monotonic_coarse = {
58388 + static struct k_clock clock_monotonic_coarse = {
58389 .clock_getres = posix_get_coarse_res,
58390 .clock_get = posix_get_monotonic_coarse,
58391 };
58392 - struct k_clock clock_boottime = {
58393 + static struct k_clock clock_boottime = {
58394 .clock_getres = hrtimer_get_res,
58395 .clock_get = posix_get_boottime,
58396 .nsleep = common_nsleep,
58397 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58398 .timer_del = common_timer_del,
58399 };
58400
58401 + pax_track_stack();
58402 +
58403 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58404 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58405 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58406 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58407 return;
58408 }
58409
58410 - posix_clocks[clock_id] = *new_clock;
58411 + posix_clocks[clock_id] = new_clock;
58412 }
58413 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58414
58415 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
58416 return (id & CLOCKFD_MASK) == CLOCKFD ?
58417 &clock_posix_dynamic : &clock_posix_cpu;
58418
58419 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58420 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58421 return NULL;
58422 - return &posix_clocks[id];
58423 + return posix_clocks[id];
58424 }
58425
58426 static int common_timer_create(struct k_itimer *new_timer)
58427 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58428 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58429 return -EFAULT;
58430
58431 + /* only the CLOCK_REALTIME clock can be set, all other clocks
58432 + have their clock_set fptr set to a nosettime dummy function
58433 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58434 + call common_clock_set, which calls do_sys_settimeofday, which
58435 + we hook
58436 + */
58437 +
58438 return kc->clock_set(which_clock, &new_tp);
58439 }
58440
58441 diff -urNp linux-3.0.4/kernel/power/poweroff.c linux-3.0.4/kernel/power/poweroff.c
58442 --- linux-3.0.4/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
58443 +++ linux-3.0.4/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
58444 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58445 .enable_mask = SYSRQ_ENABLE_BOOT,
58446 };
58447
58448 -static int pm_sysrq_init(void)
58449 +static int __init pm_sysrq_init(void)
58450 {
58451 register_sysrq_key('o', &sysrq_poweroff_op);
58452 return 0;
58453 diff -urNp linux-3.0.4/kernel/power/process.c linux-3.0.4/kernel/power/process.c
58454 --- linux-3.0.4/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
58455 +++ linux-3.0.4/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
58456 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58457 u64 elapsed_csecs64;
58458 unsigned int elapsed_csecs;
58459 bool wakeup = false;
58460 + bool timedout = false;
58461
58462 do_gettimeofday(&start);
58463
58464 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58465
58466 while (true) {
58467 todo = 0;
58468 + if (time_after(jiffies, end_time))
58469 + timedout = true;
58470 read_lock(&tasklist_lock);
58471 do_each_thread(g, p) {
58472 if (frozen(p) || !freezable(p))
58473 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58474 * try_to_stop() after schedule() in ptrace/signal
58475 * stop sees TIF_FREEZE.
58476 */
58477 - if (!task_is_stopped_or_traced(p) &&
58478 - !freezer_should_skip(p))
58479 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58480 todo++;
58481 + if (timedout) {
58482 + printk(KERN_ERR "Task refusing to freeze:\n");
58483 + sched_show_task(p);
58484 + }
58485 + }
58486 } while_each_thread(g, p);
58487 read_unlock(&tasklist_lock);
58488
58489 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58490 todo += wq_busy;
58491 }
58492
58493 - if (!todo || time_after(jiffies, end_time))
58494 + if (!todo || timedout)
58495 break;
58496
58497 if (pm_wakeup_pending()) {
58498 diff -urNp linux-3.0.4/kernel/printk.c linux-3.0.4/kernel/printk.c
58499 --- linux-3.0.4/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
58500 +++ linux-3.0.4/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
58501 @@ -313,12 +313,17 @@ static int check_syslog_permissions(int
58502 if (from_file && type != SYSLOG_ACTION_OPEN)
58503 return 0;
58504
58505 +#ifdef CONFIG_GRKERNSEC_DMESG
58506 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58507 + return -EPERM;
58508 +#endif
58509 +
58510 if (syslog_action_restricted(type)) {
58511 if (capable(CAP_SYSLOG))
58512 return 0;
58513 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58514 if (capable(CAP_SYS_ADMIN)) {
58515 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58516 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58517 "but no CAP_SYSLOG (deprecated).\n");
58518 return 0;
58519 }
58520 diff -urNp linux-3.0.4/kernel/profile.c linux-3.0.4/kernel/profile.c
58521 --- linux-3.0.4/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
58522 +++ linux-3.0.4/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
58523 @@ -39,7 +39,7 @@ struct profile_hit {
58524 /* Oprofile timer tick hook */
58525 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58526
58527 -static atomic_t *prof_buffer;
58528 +static atomic_unchecked_t *prof_buffer;
58529 static unsigned long prof_len, prof_shift;
58530
58531 int prof_on __read_mostly;
58532 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
58533 hits[i].pc = 0;
58534 continue;
58535 }
58536 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58537 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58538 hits[i].hits = hits[i].pc = 0;
58539 }
58540 }
58541 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
58542 * Add the current hit(s) and flush the write-queue out
58543 * to the global buffer:
58544 */
58545 - atomic_add(nr_hits, &prof_buffer[pc]);
58546 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58547 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58548 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58549 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58550 hits[i].pc = hits[i].hits = 0;
58551 }
58552 out:
58553 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
58554 {
58555 unsigned long pc;
58556 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58557 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58558 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58559 }
58560 #endif /* !CONFIG_SMP */
58561
58562 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58563 return -EFAULT;
58564 buf++; p++; count--; read++;
58565 }
58566 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58567 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58568 if (copy_to_user(buf, (void *)pnt, count))
58569 return -EFAULT;
58570 read += count;
58571 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58572 }
58573 #endif
58574 profile_discard_flip_buffers();
58575 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58576 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58577 return count;
58578 }
58579
58580 diff -urNp linux-3.0.4/kernel/ptrace.c linux-3.0.4/kernel/ptrace.c
58581 --- linux-3.0.4/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
58582 +++ linux-3.0.4/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
58583 @@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
58584 return ret;
58585 }
58586
58587 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58588 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58589 + unsigned int log)
58590 {
58591 const struct cred *cred = current_cred(), *tcred;
58592
58593 @@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
58594 cred->gid == tcred->sgid &&
58595 cred->gid == tcred->gid))
58596 goto ok;
58597 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58598 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58599 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58600 goto ok;
58601 rcu_read_unlock();
58602 return -EPERM;
58603 @@ -167,7 +169,9 @@ ok:
58604 smp_rmb();
58605 if (task->mm)
58606 dumpable = get_dumpable(task->mm);
58607 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58608 + if (!dumpable &&
58609 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58610 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58611 return -EPERM;
58612
58613 return security_ptrace_access_check(task, mode);
58614 @@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
58615 {
58616 int err;
58617 task_lock(task);
58618 - err = __ptrace_may_access(task, mode);
58619 + err = __ptrace_may_access(task, mode, 0);
58620 + task_unlock(task);
58621 + return !err;
58622 +}
58623 +
58624 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58625 +{
58626 + int err;
58627 + task_lock(task);
58628 + err = __ptrace_may_access(task, mode, 1);
58629 task_unlock(task);
58630 return !err;
58631 }
58632 @@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
58633 goto out;
58634
58635 task_lock(task);
58636 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58637 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58638 task_unlock(task);
58639 if (retval)
58640 goto unlock_creds;
58641 @@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
58642 goto unlock_tasklist;
58643
58644 task->ptrace = PT_PTRACED;
58645 - if (task_ns_capable(task, CAP_SYS_PTRACE))
58646 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58647 task->ptrace |= PT_PTRACE_CAP;
58648
58649 __ptrace_link(task, current);
58650 @@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
58651 {
58652 int copied = 0;
58653
58654 + pax_track_stack();
58655 +
58656 while (len > 0) {
58657 char buf[128];
58658 int this_len, retval;
58659 @@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
58660 break;
58661 return -EIO;
58662 }
58663 - if (copy_to_user(dst, buf, retval))
58664 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
58665 return -EFAULT;
58666 copied += retval;
58667 src += retval;
58668 @@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
58669 {
58670 int copied = 0;
58671
58672 + pax_track_stack();
58673 +
58674 while (len > 0) {
58675 char buf[128];
58676 int this_len, retval;
58677 @@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
58678 {
58679 int ret = -EIO;
58680 siginfo_t siginfo;
58681 - void __user *datavp = (void __user *) data;
58682 + void __user *datavp = (__force void __user *) data;
58683 unsigned long __user *datalp = datavp;
58684
58685 + pax_track_stack();
58686 +
58687 switch (request) {
58688 case PTRACE_PEEKTEXT:
58689 case PTRACE_PEEKDATA:
58690 @@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
58691 goto out;
58692 }
58693
58694 + if (gr_handle_ptrace(child, request)) {
58695 + ret = -EPERM;
58696 + goto out_put_task_struct;
58697 + }
58698 +
58699 if (request == PTRACE_ATTACH) {
58700 ret = ptrace_attach(child);
58701 /*
58702 * Some architectures need to do book-keeping after
58703 * a ptrace attach.
58704 */
58705 - if (!ret)
58706 + if (!ret) {
58707 arch_ptrace_attach(child);
58708 + gr_audit_ptrace(child);
58709 + }
58710 goto out_put_task_struct;
58711 }
58712
58713 @@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
58714 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
58715 if (copied != sizeof(tmp))
58716 return -EIO;
58717 - return put_user(tmp, (unsigned long __user *)data);
58718 + return put_user(tmp, (__force unsigned long __user *)data);
58719 }
58720
58721 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
58722 @@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
58723 siginfo_t siginfo;
58724 int ret;
58725
58726 + pax_track_stack();
58727 +
58728 switch (request) {
58729 case PTRACE_PEEKTEXT:
58730 case PTRACE_PEEKDATA:
58731 @@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
58732 goto out;
58733 }
58734
58735 + if (gr_handle_ptrace(child, request)) {
58736 + ret = -EPERM;
58737 + goto out_put_task_struct;
58738 + }
58739 +
58740 if (request == PTRACE_ATTACH) {
58741 ret = ptrace_attach(child);
58742 /*
58743 * Some architectures need to do book-keeping after
58744 * a ptrace attach.
58745 */
58746 - if (!ret)
58747 + if (!ret) {
58748 arch_ptrace_attach(child);
58749 + gr_audit_ptrace(child);
58750 + }
58751 goto out_put_task_struct;
58752 }
58753
58754 diff -urNp linux-3.0.4/kernel/rcutorture.c linux-3.0.4/kernel/rcutorture.c
58755 --- linux-3.0.4/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
58756 +++ linux-3.0.4/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
58757 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
58758 { 0 };
58759 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
58760 { 0 };
58761 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58762 -static atomic_t n_rcu_torture_alloc;
58763 -static atomic_t n_rcu_torture_alloc_fail;
58764 -static atomic_t n_rcu_torture_free;
58765 -static atomic_t n_rcu_torture_mberror;
58766 -static atomic_t n_rcu_torture_error;
58767 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58768 +static atomic_unchecked_t n_rcu_torture_alloc;
58769 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
58770 +static atomic_unchecked_t n_rcu_torture_free;
58771 +static atomic_unchecked_t n_rcu_torture_mberror;
58772 +static atomic_unchecked_t n_rcu_torture_error;
58773 static long n_rcu_torture_boost_ktrerror;
58774 static long n_rcu_torture_boost_rterror;
58775 static long n_rcu_torture_boost_failure;
58776 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
58777
58778 spin_lock_bh(&rcu_torture_lock);
58779 if (list_empty(&rcu_torture_freelist)) {
58780 - atomic_inc(&n_rcu_torture_alloc_fail);
58781 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
58782 spin_unlock_bh(&rcu_torture_lock);
58783 return NULL;
58784 }
58785 - atomic_inc(&n_rcu_torture_alloc);
58786 + atomic_inc_unchecked(&n_rcu_torture_alloc);
58787 p = rcu_torture_freelist.next;
58788 list_del_init(p);
58789 spin_unlock_bh(&rcu_torture_lock);
58790 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
58791 static void
58792 rcu_torture_free(struct rcu_torture *p)
58793 {
58794 - atomic_inc(&n_rcu_torture_free);
58795 + atomic_inc_unchecked(&n_rcu_torture_free);
58796 spin_lock_bh(&rcu_torture_lock);
58797 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
58798 spin_unlock_bh(&rcu_torture_lock);
58799 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
58800 i = rp->rtort_pipe_count;
58801 if (i > RCU_TORTURE_PIPE_LEN)
58802 i = RCU_TORTURE_PIPE_LEN;
58803 - atomic_inc(&rcu_torture_wcount[i]);
58804 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58805 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58806 rp->rtort_mbtest = 0;
58807 rcu_torture_free(rp);
58808 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
58809 i = rp->rtort_pipe_count;
58810 if (i > RCU_TORTURE_PIPE_LEN)
58811 i = RCU_TORTURE_PIPE_LEN;
58812 - atomic_inc(&rcu_torture_wcount[i]);
58813 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58814 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58815 rp->rtort_mbtest = 0;
58816 list_del(&rp->rtort_free);
58817 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
58818 i = old_rp->rtort_pipe_count;
58819 if (i > RCU_TORTURE_PIPE_LEN)
58820 i = RCU_TORTURE_PIPE_LEN;
58821 - atomic_inc(&rcu_torture_wcount[i]);
58822 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58823 old_rp->rtort_pipe_count++;
58824 cur_ops->deferred_free(old_rp);
58825 }
58826 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
58827 return;
58828 }
58829 if (p->rtort_mbtest == 0)
58830 - atomic_inc(&n_rcu_torture_mberror);
58831 + atomic_inc_unchecked(&n_rcu_torture_mberror);
58832 spin_lock(&rand_lock);
58833 cur_ops->read_delay(&rand);
58834 n_rcu_torture_timers++;
58835 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
58836 continue;
58837 }
58838 if (p->rtort_mbtest == 0)
58839 - atomic_inc(&n_rcu_torture_mberror);
58840 + atomic_inc_unchecked(&n_rcu_torture_mberror);
58841 cur_ops->read_delay(&rand);
58842 preempt_disable();
58843 pipe_count = p->rtort_pipe_count;
58844 @@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
58845 rcu_torture_current,
58846 rcu_torture_current_version,
58847 list_empty(&rcu_torture_freelist),
58848 - atomic_read(&n_rcu_torture_alloc),
58849 - atomic_read(&n_rcu_torture_alloc_fail),
58850 - atomic_read(&n_rcu_torture_free),
58851 - atomic_read(&n_rcu_torture_mberror),
58852 + atomic_read_unchecked(&n_rcu_torture_alloc),
58853 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
58854 + atomic_read_unchecked(&n_rcu_torture_free),
58855 + atomic_read_unchecked(&n_rcu_torture_mberror),
58856 n_rcu_torture_boost_ktrerror,
58857 n_rcu_torture_boost_rterror,
58858 n_rcu_torture_boost_failure,
58859 n_rcu_torture_boosts,
58860 n_rcu_torture_timers);
58861 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
58862 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
58863 n_rcu_torture_boost_ktrerror != 0 ||
58864 n_rcu_torture_boost_rterror != 0 ||
58865 n_rcu_torture_boost_failure != 0)
58866 @@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
58867 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
58868 if (i > 1) {
58869 cnt += sprintf(&page[cnt], "!!! ");
58870 - atomic_inc(&n_rcu_torture_error);
58871 + atomic_inc_unchecked(&n_rcu_torture_error);
58872 WARN_ON_ONCE(1);
58873 }
58874 cnt += sprintf(&page[cnt], "Reader Pipe: ");
58875 @@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
58876 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
58877 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58878 cnt += sprintf(&page[cnt], " %d",
58879 - atomic_read(&rcu_torture_wcount[i]));
58880 + atomic_read_unchecked(&rcu_torture_wcount[i]));
58881 }
58882 cnt += sprintf(&page[cnt], "\n");
58883 if (cur_ops->stats)
58884 @@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
58885
58886 if (cur_ops->cleanup)
58887 cur_ops->cleanup();
58888 - if (atomic_read(&n_rcu_torture_error))
58889 + if (atomic_read_unchecked(&n_rcu_torture_error))
58890 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
58891 else
58892 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
58893 @@ -1476,17 +1476,17 @@ rcu_torture_init(void)
58894
58895 rcu_torture_current = NULL;
58896 rcu_torture_current_version = 0;
58897 - atomic_set(&n_rcu_torture_alloc, 0);
58898 - atomic_set(&n_rcu_torture_alloc_fail, 0);
58899 - atomic_set(&n_rcu_torture_free, 0);
58900 - atomic_set(&n_rcu_torture_mberror, 0);
58901 - atomic_set(&n_rcu_torture_error, 0);
58902 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
58903 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
58904 + atomic_set_unchecked(&n_rcu_torture_free, 0);
58905 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
58906 + atomic_set_unchecked(&n_rcu_torture_error, 0);
58907 n_rcu_torture_boost_ktrerror = 0;
58908 n_rcu_torture_boost_rterror = 0;
58909 n_rcu_torture_boost_failure = 0;
58910 n_rcu_torture_boosts = 0;
58911 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
58912 - atomic_set(&rcu_torture_wcount[i], 0);
58913 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
58914 for_each_possible_cpu(cpu) {
58915 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58916 per_cpu(rcu_torture_count, cpu)[i] = 0;
58917 diff -urNp linux-3.0.4/kernel/rcutree.c linux-3.0.4/kernel/rcutree.c
58918 --- linux-3.0.4/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
58919 +++ linux-3.0.4/kernel/rcutree.c 2011-08-23 21:47:56.000000000 -0400
58920 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
58921 /*
58922 * Do softirq processing for the current CPU.
58923 */
58924 -static void rcu_process_callbacks(struct softirq_action *unused)
58925 +static void rcu_process_callbacks(void)
58926 {
58927 __rcu_process_callbacks(&rcu_sched_state,
58928 &__get_cpu_var(rcu_sched_data));
58929 diff -urNp linux-3.0.4/kernel/rcutree_plugin.h linux-3.0.4/kernel/rcutree_plugin.h
58930 --- linux-3.0.4/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
58931 +++ linux-3.0.4/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
58932 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
58933
58934 /* Clean up and exit. */
58935 smp_mb(); /* ensure expedited GP seen before counter increment. */
58936 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
58937 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
58938 unlock_mb_ret:
58939 mutex_unlock(&sync_rcu_preempt_exp_mutex);
58940 mb_ret:
58941 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
58942
58943 #else /* #ifndef CONFIG_SMP */
58944
58945 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
58946 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
58947 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
58948 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
58949
58950 static int synchronize_sched_expedited_cpu_stop(void *data)
58951 {
58952 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
58953 int firstsnap, s, snap, trycount = 0;
58954
58955 /* Note that atomic_inc_return() implies full memory barrier. */
58956 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
58957 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
58958 get_online_cpus();
58959
58960 /*
58961 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
58962 }
58963
58964 /* Check to see if someone else did our work for us. */
58965 - s = atomic_read(&sync_sched_expedited_done);
58966 + s = atomic_read_unchecked(&sync_sched_expedited_done);
58967 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
58968 smp_mb(); /* ensure test happens before caller kfree */
58969 return;
58970 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
58971 * grace period works for us.
58972 */
58973 get_online_cpus();
58974 - snap = atomic_read(&sync_sched_expedited_started) - 1;
58975 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
58976 smp_mb(); /* ensure read is before try_stop_cpus(). */
58977 }
58978
58979 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
58980 * than we did beat us to the punch.
58981 */
58982 do {
58983 - s = atomic_read(&sync_sched_expedited_done);
58984 + s = atomic_read_unchecked(&sync_sched_expedited_done);
58985 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
58986 smp_mb(); /* ensure test happens before caller kfree */
58987 break;
58988 }
58989 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
58990 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
58991
58992 put_online_cpus();
58993 }
58994 diff -urNp linux-3.0.4/kernel/relay.c linux-3.0.4/kernel/relay.c
58995 --- linux-3.0.4/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
58996 +++ linux-3.0.4/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
58997 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
58998 };
58999 ssize_t ret;
59000
59001 + pax_track_stack();
59002 +
59003 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59004 return 0;
59005 if (splice_grow_spd(pipe, &spd))
59006 diff -urNp linux-3.0.4/kernel/resource.c linux-3.0.4/kernel/resource.c
59007 --- linux-3.0.4/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
59008 +++ linux-3.0.4/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
59009 @@ -141,8 +141,18 @@ static const struct file_operations proc
59010
59011 static int __init ioresources_init(void)
59012 {
59013 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59014 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59015 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59016 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59017 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59018 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59019 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59020 +#endif
59021 +#else
59022 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59023 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59024 +#endif
59025 return 0;
59026 }
59027 __initcall(ioresources_init);
59028 diff -urNp linux-3.0.4/kernel/rtmutex-tester.c linux-3.0.4/kernel/rtmutex-tester.c
59029 --- linux-3.0.4/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
59030 +++ linux-3.0.4/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
59031 @@ -20,7 +20,7 @@
59032 #define MAX_RT_TEST_MUTEXES 8
59033
59034 static spinlock_t rttest_lock;
59035 -static atomic_t rttest_event;
59036 +static atomic_unchecked_t rttest_event;
59037
59038 struct test_thread_data {
59039 int opcode;
59040 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59041
59042 case RTTEST_LOCKCONT:
59043 td->mutexes[td->opdata] = 1;
59044 - td->event = atomic_add_return(1, &rttest_event);
59045 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59046 return 0;
59047
59048 case RTTEST_RESET:
59049 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59050 return 0;
59051
59052 case RTTEST_RESETEVENT:
59053 - atomic_set(&rttest_event, 0);
59054 + atomic_set_unchecked(&rttest_event, 0);
59055 return 0;
59056
59057 default:
59058 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59059 return ret;
59060
59061 td->mutexes[id] = 1;
59062 - td->event = atomic_add_return(1, &rttest_event);
59063 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59064 rt_mutex_lock(&mutexes[id]);
59065 - td->event = atomic_add_return(1, &rttest_event);
59066 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59067 td->mutexes[id] = 4;
59068 return 0;
59069
59070 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59071 return ret;
59072
59073 td->mutexes[id] = 1;
59074 - td->event = atomic_add_return(1, &rttest_event);
59075 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59076 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59077 - td->event = atomic_add_return(1, &rttest_event);
59078 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59079 td->mutexes[id] = ret ? 0 : 4;
59080 return ret ? -EINTR : 0;
59081
59082 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59083 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59084 return ret;
59085
59086 - td->event = atomic_add_return(1, &rttest_event);
59087 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59088 rt_mutex_unlock(&mutexes[id]);
59089 - td->event = atomic_add_return(1, &rttest_event);
59090 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59091 td->mutexes[id] = 0;
59092 return 0;
59093
59094 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59095 break;
59096
59097 td->mutexes[dat] = 2;
59098 - td->event = atomic_add_return(1, &rttest_event);
59099 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59100 break;
59101
59102 default:
59103 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59104 return;
59105
59106 td->mutexes[dat] = 3;
59107 - td->event = atomic_add_return(1, &rttest_event);
59108 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59109 break;
59110
59111 case RTTEST_LOCKNOWAIT:
59112 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59113 return;
59114
59115 td->mutexes[dat] = 1;
59116 - td->event = atomic_add_return(1, &rttest_event);
59117 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59118 return;
59119
59120 default:
59121 diff -urNp linux-3.0.4/kernel/sched_autogroup.c linux-3.0.4/kernel/sched_autogroup.c
59122 --- linux-3.0.4/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
59123 +++ linux-3.0.4/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
59124 @@ -7,7 +7,7 @@
59125
59126 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59127 static struct autogroup autogroup_default;
59128 -static atomic_t autogroup_seq_nr;
59129 +static atomic_unchecked_t autogroup_seq_nr;
59130
59131 static void __init autogroup_init(struct task_struct *init_task)
59132 {
59133 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59134
59135 kref_init(&ag->kref);
59136 init_rwsem(&ag->lock);
59137 - ag->id = atomic_inc_return(&autogroup_seq_nr);
59138 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59139 ag->tg = tg;
59140 #ifdef CONFIG_RT_GROUP_SCHED
59141 /*
59142 diff -urNp linux-3.0.4/kernel/sched.c linux-3.0.4/kernel/sched.c
59143 --- linux-3.0.4/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
59144 +++ linux-3.0.4/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
59145 @@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
59146 struct rq *rq;
59147 int cpu;
59148
59149 + pax_track_stack();
59150 +
59151 need_resched:
59152 preempt_disable();
59153 cpu = smp_processor_id();
59154 @@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
59155 /* convert nice value [19,-20] to rlimit style value [1,40] */
59156 int nice_rlim = 20 - nice;
59157
59158 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59159 +
59160 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59161 capable(CAP_SYS_NICE));
59162 }
59163 @@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59164 if (nice > 19)
59165 nice = 19;
59166
59167 - if (increment < 0 && !can_nice(current, nice))
59168 + if (increment < 0 && (!can_nice(current, nice) ||
59169 + gr_handle_chroot_nice()))
59170 return -EPERM;
59171
59172 retval = security_task_setnice(current, nice);
59173 @@ -5111,6 +5116,7 @@ recheck:
59174 unsigned long rlim_rtprio =
59175 task_rlimit(p, RLIMIT_RTPRIO);
59176
59177 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59178 /* can't set/change the rt policy */
59179 if (policy != p->policy && !rlim_rtprio)
59180 return -EPERM;
59181 diff -urNp linux-3.0.4/kernel/sched_fair.c linux-3.0.4/kernel/sched_fair.c
59182 --- linux-3.0.4/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
59183 +++ linux-3.0.4/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
59184 @@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
59185 * run_rebalance_domains is triggered when needed from the scheduler tick.
59186 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59187 */
59188 -static void run_rebalance_domains(struct softirq_action *h)
59189 +static void run_rebalance_domains(void)
59190 {
59191 int this_cpu = smp_processor_id();
59192 struct rq *this_rq = cpu_rq(this_cpu);
59193 diff -urNp linux-3.0.4/kernel/signal.c linux-3.0.4/kernel/signal.c
59194 --- linux-3.0.4/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
59195 +++ linux-3.0.4/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
59196 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59197
59198 int print_fatal_signals __read_mostly;
59199
59200 -static void __user *sig_handler(struct task_struct *t, int sig)
59201 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
59202 {
59203 return t->sighand->action[sig - 1].sa.sa_handler;
59204 }
59205
59206 -static int sig_handler_ignored(void __user *handler, int sig)
59207 +static int sig_handler_ignored(__sighandler_t handler, int sig)
59208 {
59209 /* Is it explicitly or implicitly ignored? */
59210 return handler == SIG_IGN ||
59211 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59212 static int sig_task_ignored(struct task_struct *t, int sig,
59213 int from_ancestor_ns)
59214 {
59215 - void __user *handler;
59216 + __sighandler_t handler;
59217
59218 handler = sig_handler(t, sig);
59219
59220 @@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
59221 atomic_inc(&user->sigpending);
59222 rcu_read_unlock();
59223
59224 + if (!override_rlimit)
59225 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59226 +
59227 if (override_rlimit ||
59228 atomic_read(&user->sigpending) <=
59229 task_rlimit(t, RLIMIT_SIGPENDING)) {
59230 @@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
59231
59232 int unhandled_signal(struct task_struct *tsk, int sig)
59233 {
59234 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59235 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59236 if (is_global_init(tsk))
59237 return 1;
59238 if (handler != SIG_IGN && handler != SIG_DFL)
59239 @@ -770,6 +773,13 @@ static int check_kill_permission(int sig
59240 }
59241 }
59242
59243 + /* allow glibc communication via tgkill to other threads in our
59244 + thread group */
59245 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
59246 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
59247 + && gr_handle_signal(t, sig))
59248 + return -EPERM;
59249 +
59250 return security_task_kill(t, info, sig, 0);
59251 }
59252
59253 @@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
59254 return send_signal(sig, info, p, 1);
59255 }
59256
59257 -static int
59258 +int
59259 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59260 {
59261 return send_signal(sig, info, t, 0);
59262 @@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
59263 unsigned long int flags;
59264 int ret, blocked, ignored;
59265 struct k_sigaction *action;
59266 + int is_unhandled = 0;
59267
59268 spin_lock_irqsave(&t->sighand->siglock, flags);
59269 action = &t->sighand->action[sig-1];
59270 @@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
59271 }
59272 if (action->sa.sa_handler == SIG_DFL)
59273 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59274 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59275 + is_unhandled = 1;
59276 ret = specific_send_sig_info(sig, info, t);
59277 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59278
59279 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
59280 + normal operation */
59281 + if (is_unhandled) {
59282 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59283 + gr_handle_crash(t, sig);
59284 + }
59285 +
59286 return ret;
59287 }
59288
59289 @@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
59290 ret = check_kill_permission(sig, info, p);
59291 rcu_read_unlock();
59292
59293 - if (!ret && sig)
59294 + if (!ret && sig) {
59295 ret = do_send_sig_info(sig, info, p, true);
59296 + if (!ret)
59297 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59298 + }
59299
59300 return ret;
59301 }
59302 @@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
59303 {
59304 siginfo_t info;
59305
59306 + pax_track_stack();
59307 +
59308 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59309
59310 memset(&info, 0, sizeof info);
59311 @@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59312 int error = -ESRCH;
59313
59314 rcu_read_lock();
59315 - p = find_task_by_vpid(pid);
59316 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59317 + /* allow glibc communication via tgkill to other threads in our
59318 + thread group */
59319 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59320 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
59321 + p = find_task_by_vpid_unrestricted(pid);
59322 + else
59323 +#endif
59324 + p = find_task_by_vpid(pid);
59325 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59326 error = check_kill_permission(sig, info, p);
59327 /*
59328 diff -urNp linux-3.0.4/kernel/smp.c linux-3.0.4/kernel/smp.c
59329 --- linux-3.0.4/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
59330 +++ linux-3.0.4/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
59331 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
59332 }
59333 EXPORT_SYMBOL(smp_call_function);
59334
59335 -void ipi_call_lock(void)
59336 +void ipi_call_lock(void) __acquires(call_function.lock)
59337 {
59338 raw_spin_lock(&call_function.lock);
59339 }
59340
59341 -void ipi_call_unlock(void)
59342 +void ipi_call_unlock(void) __releases(call_function.lock)
59343 {
59344 raw_spin_unlock(&call_function.lock);
59345 }
59346
59347 -void ipi_call_lock_irq(void)
59348 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
59349 {
59350 raw_spin_lock_irq(&call_function.lock);
59351 }
59352
59353 -void ipi_call_unlock_irq(void)
59354 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
59355 {
59356 raw_spin_unlock_irq(&call_function.lock);
59357 }
59358 diff -urNp linux-3.0.4/kernel/softirq.c linux-3.0.4/kernel/softirq.c
59359 --- linux-3.0.4/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
59360 +++ linux-3.0.4/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
59361 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59362
59363 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59364
59365 -char *softirq_to_name[NR_SOFTIRQS] = {
59366 +const char * const softirq_to_name[NR_SOFTIRQS] = {
59367 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59368 "TASKLET", "SCHED", "HRTIMER", "RCU"
59369 };
59370 @@ -235,7 +235,7 @@ restart:
59371 kstat_incr_softirqs_this_cpu(vec_nr);
59372
59373 trace_softirq_entry(vec_nr);
59374 - h->action(h);
59375 + h->action();
59376 trace_softirq_exit(vec_nr);
59377 if (unlikely(prev_count != preempt_count())) {
59378 printk(KERN_ERR "huh, entered softirq %u %s %p"
59379 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
59380 local_irq_restore(flags);
59381 }
59382
59383 -void open_softirq(int nr, void (*action)(struct softirq_action *))
59384 +void open_softirq(int nr, void (*action)(void))
59385 {
59386 - softirq_vec[nr].action = action;
59387 + pax_open_kernel();
59388 + *(void **)&softirq_vec[nr].action = action;
59389 + pax_close_kernel();
59390 }
59391
59392 /*
59393 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
59394
59395 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59396
59397 -static void tasklet_action(struct softirq_action *a)
59398 +static void tasklet_action(void)
59399 {
59400 struct tasklet_struct *list;
59401
59402 @@ -476,7 +478,7 @@ static void tasklet_action(struct softir
59403 }
59404 }
59405
59406 -static void tasklet_hi_action(struct softirq_action *a)
59407 +static void tasklet_hi_action(void)
59408 {
59409 struct tasklet_struct *list;
59410
59411 diff -urNp linux-3.0.4/kernel/sys.c linux-3.0.4/kernel/sys.c
59412 --- linux-3.0.4/kernel/sys.c 2011-08-29 23:26:14.000000000 -0400
59413 +++ linux-3.0.4/kernel/sys.c 2011-08-29 23:26:27.000000000 -0400
59414 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
59415 error = -EACCES;
59416 goto out;
59417 }
59418 +
59419 + if (gr_handle_chroot_setpriority(p, niceval)) {
59420 + error = -EACCES;
59421 + goto out;
59422 + }
59423 +
59424 no_nice = security_task_setnice(p, niceval);
59425 if (no_nice) {
59426 error = no_nice;
59427 @@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59428 goto error;
59429 }
59430
59431 + if (gr_check_group_change(new->gid, new->egid, -1))
59432 + goto error;
59433 +
59434 if (rgid != (gid_t) -1 ||
59435 (egid != (gid_t) -1 && egid != old->gid))
59436 new->sgid = new->egid;
59437 @@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59438 old = current_cred();
59439
59440 retval = -EPERM;
59441 +
59442 + if (gr_check_group_change(gid, gid, gid))
59443 + goto error;
59444 +
59445 if (nsown_capable(CAP_SETGID))
59446 new->gid = new->egid = new->sgid = new->fsgid = gid;
59447 else if (gid == old->gid || gid == old->sgid)
59448 @@ -595,11 +608,18 @@ static int set_user(struct cred *new)
59449 if (!new_user)
59450 return -EAGAIN;
59451
59452 + /*
59453 + * We don't fail in case of NPROC limit excess here because too many
59454 + * poorly written programs don't check set*uid() return code, assuming
59455 + * it never fails if called by root. We may still enforce NPROC limit
59456 + * for programs doing set*uid()+execve() by harmlessly deferring the
59457 + * failure to the execve() stage.
59458 + */
59459 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
59460 - new_user != INIT_USER) {
59461 - free_uid(new_user);
59462 - return -EAGAIN;
59463 - }
59464 + new_user != INIT_USER)
59465 + current->flags |= PF_NPROC_EXCEEDED;
59466 + else
59467 + current->flags &= ~PF_NPROC_EXCEEDED;
59468
59469 free_uid(new->user);
59470 new->user = new_user;
59471 @@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59472 goto error;
59473 }
59474
59475 + if (gr_check_user_change(new->uid, new->euid, -1))
59476 + goto error;
59477 +
59478 if (new->uid != old->uid) {
59479 retval = set_user(new);
59480 if (retval < 0)
59481 @@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59482 old = current_cred();
59483
59484 retval = -EPERM;
59485 +
59486 + if (gr_check_crash_uid(uid))
59487 + goto error;
59488 + if (gr_check_user_change(uid, uid, uid))
59489 + goto error;
59490 +
59491 if (nsown_capable(CAP_SETUID)) {
59492 new->suid = new->uid = uid;
59493 if (uid != old->uid) {
59494 @@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59495 goto error;
59496 }
59497
59498 + if (gr_check_user_change(ruid, euid, -1))
59499 + goto error;
59500 +
59501 if (ruid != (uid_t) -1) {
59502 new->uid = ruid;
59503 if (ruid != old->uid) {
59504 @@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59505 goto error;
59506 }
59507
59508 + if (gr_check_group_change(rgid, egid, -1))
59509 + goto error;
59510 +
59511 if (rgid != (gid_t) -1)
59512 new->gid = rgid;
59513 if (egid != (gid_t) -1)
59514 @@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59515 old = current_cred();
59516 old_fsuid = old->fsuid;
59517
59518 + if (gr_check_user_change(-1, -1, uid))
59519 + goto error;
59520 +
59521 if (uid == old->uid || uid == old->euid ||
59522 uid == old->suid || uid == old->fsuid ||
59523 nsown_capable(CAP_SETUID)) {
59524 @@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59525 }
59526 }
59527
59528 +error:
59529 abort_creds(new);
59530 return old_fsuid;
59531
59532 @@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59533 if (gid == old->gid || gid == old->egid ||
59534 gid == old->sgid || gid == old->fsgid ||
59535 nsown_capable(CAP_SETGID)) {
59536 + if (gr_check_group_change(-1, -1, gid))
59537 + goto error;
59538 +
59539 if (gid != old_fsgid) {
59540 new->fsgid = gid;
59541 goto change_okay;
59542 }
59543 }
59544
59545 +error:
59546 abort_creds(new);
59547 return old_fsgid;
59548
59549 @@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59550 error = get_dumpable(me->mm);
59551 break;
59552 case PR_SET_DUMPABLE:
59553 - if (arg2 < 0 || arg2 > 1) {
59554 + if (arg2 > 1) {
59555 error = -EINVAL;
59556 break;
59557 }
59558 diff -urNp linux-3.0.4/kernel/sysctl.c linux-3.0.4/kernel/sysctl.c
59559 --- linux-3.0.4/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
59560 +++ linux-3.0.4/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
59561 @@ -85,6 +85,13 @@
59562
59563
59564 #if defined(CONFIG_SYSCTL)
59565 +#include <linux/grsecurity.h>
59566 +#include <linux/grinternal.h>
59567 +
59568 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59569 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59570 + const int op);
59571 +extern int gr_handle_chroot_sysctl(const int op);
59572
59573 /* External variables not in a header file. */
59574 extern int sysctl_overcommit_memory;
59575 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59576 }
59577
59578 #endif
59579 +extern struct ctl_table grsecurity_table[];
59580
59581 static struct ctl_table root_table[];
59582 static struct ctl_table_root sysctl_table_root;
59583 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
59584 int sysctl_legacy_va_layout;
59585 #endif
59586
59587 +#ifdef CONFIG_PAX_SOFTMODE
59588 +static ctl_table pax_table[] = {
59589 + {
59590 + .procname = "softmode",
59591 + .data = &pax_softmode,
59592 + .maxlen = sizeof(unsigned int),
59593 + .mode = 0600,
59594 + .proc_handler = &proc_dointvec,
59595 + },
59596 +
59597 + { }
59598 +};
59599 +#endif
59600 +
59601 /* The default sysctl tables: */
59602
59603 static struct ctl_table root_table[] = {
59604 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
59605 #endif
59606
59607 static struct ctl_table kern_table[] = {
59608 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59609 + {
59610 + .procname = "grsecurity",
59611 + .mode = 0500,
59612 + .child = grsecurity_table,
59613 + },
59614 +#endif
59615 +
59616 +#ifdef CONFIG_PAX_SOFTMODE
59617 + {
59618 + .procname = "pax",
59619 + .mode = 0500,
59620 + .child = pax_table,
59621 + },
59622 +#endif
59623 +
59624 {
59625 .procname = "sched_child_runs_first",
59626 .data = &sysctl_sched_child_runs_first,
59627 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
59628 .data = &modprobe_path,
59629 .maxlen = KMOD_PATH_LEN,
59630 .mode = 0644,
59631 - .proc_handler = proc_dostring,
59632 + .proc_handler = proc_dostring_modpriv,
59633 },
59634 {
59635 .procname = "modules_disabled",
59636 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
59637 .extra1 = &zero,
59638 .extra2 = &one,
59639 },
59640 +#endif
59641 {
59642 .procname = "kptr_restrict",
59643 .data = &kptr_restrict,
59644 .maxlen = sizeof(int),
59645 .mode = 0644,
59646 .proc_handler = proc_dmesg_restrict,
59647 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59648 + .extra1 = &two,
59649 +#else
59650 .extra1 = &zero,
59651 +#endif
59652 .extra2 = &two,
59653 },
59654 -#endif
59655 {
59656 .procname = "ngroups_max",
59657 .data = &ngroups_max,
59658 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
59659 .proc_handler = proc_dointvec_minmax,
59660 .extra1 = &zero,
59661 },
59662 + {
59663 + .procname = "heap_stack_gap",
59664 + .data = &sysctl_heap_stack_gap,
59665 + .maxlen = sizeof(sysctl_heap_stack_gap),
59666 + .mode = 0644,
59667 + .proc_handler = proc_doulongvec_minmax,
59668 + },
59669 #else
59670 {
59671 .procname = "nr_trim_pages",
59672 @@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
59673 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
59674 {
59675 int mode;
59676 + int error;
59677 +
59678 + if (table->parent != NULL && table->parent->procname != NULL &&
59679 + table->procname != NULL &&
59680 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
59681 + return -EACCES;
59682 + if (gr_handle_chroot_sysctl(op))
59683 + return -EACCES;
59684 + error = gr_handle_sysctl(table, op);
59685 + if (error)
59686 + return error;
59687
59688 if (root->permissions)
59689 mode = root->permissions(root, current->nsproxy, table);
59690 @@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
59691 buffer, lenp, ppos);
59692 }
59693
59694 +int proc_dostring_modpriv(struct ctl_table *table, int write,
59695 + void __user *buffer, size_t *lenp, loff_t *ppos)
59696 +{
59697 + if (write && !capable(CAP_SYS_MODULE))
59698 + return -EPERM;
59699 +
59700 + return _proc_do_string(table->data, table->maxlen, write,
59701 + buffer, lenp, ppos);
59702 +}
59703 +
59704 static size_t proc_skip_spaces(char **buf)
59705 {
59706 size_t ret;
59707 @@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
59708 len = strlen(tmp);
59709 if (len > *size)
59710 len = *size;
59711 + if (len > sizeof(tmp))
59712 + len = sizeof(tmp);
59713 if (copy_to_user(*buf, tmp, len))
59714 return -EFAULT;
59715 *size -= len;
59716 @@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
59717 *i = val;
59718 } else {
59719 val = convdiv * (*i) / convmul;
59720 - if (!first)
59721 + if (!first) {
59722 err = proc_put_char(&buffer, &left, '\t');
59723 + if (err)
59724 + break;
59725 + }
59726 err = proc_put_long(&buffer, &left, val, false);
59727 if (err)
59728 break;
59729 @@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
59730 return -ENOSYS;
59731 }
59732
59733 +int proc_dostring_modpriv(struct ctl_table *table, int write,
59734 + void __user *buffer, size_t *lenp, loff_t *ppos)
59735 +{
59736 + return -ENOSYS;
59737 +}
59738 +
59739 int proc_dointvec(struct ctl_table *table, int write,
59740 void __user *buffer, size_t *lenp, loff_t *ppos)
59741 {
59742 @@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
59743 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
59744 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
59745 EXPORT_SYMBOL(proc_dostring);
59746 +EXPORT_SYMBOL(proc_dostring_modpriv);
59747 EXPORT_SYMBOL(proc_doulongvec_minmax);
59748 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
59749 EXPORT_SYMBOL(register_sysctl_table);
59750 diff -urNp linux-3.0.4/kernel/sysctl_check.c linux-3.0.4/kernel/sysctl_check.c
59751 --- linux-3.0.4/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
59752 +++ linux-3.0.4/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
59753 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
59754 set_fail(&fail, table, "Directory with extra2");
59755 } else {
59756 if ((table->proc_handler == proc_dostring) ||
59757 + (table->proc_handler == proc_dostring_modpriv) ||
59758 (table->proc_handler == proc_dointvec) ||
59759 (table->proc_handler == proc_dointvec_minmax) ||
59760 (table->proc_handler == proc_dointvec_jiffies) ||
59761 diff -urNp linux-3.0.4/kernel/taskstats.c linux-3.0.4/kernel/taskstats.c
59762 --- linux-3.0.4/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
59763 +++ linux-3.0.4/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
59764 @@ -27,9 +27,12 @@
59765 #include <linux/cgroup.h>
59766 #include <linux/fs.h>
59767 #include <linux/file.h>
59768 +#include <linux/grsecurity.h>
59769 #include <net/genetlink.h>
59770 #include <asm/atomic.h>
59771
59772 +extern int gr_is_taskstats_denied(int pid);
59773 +
59774 /*
59775 * Maximum length of a cpumask that can be specified in
59776 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
59777 @@ -558,6 +561,9 @@ err:
59778
59779 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
59780 {
59781 + if (gr_is_taskstats_denied(current->pid))
59782 + return -EACCES;
59783 +
59784 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
59785 return cmd_attr_register_cpumask(info);
59786 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
59787 diff -urNp linux-3.0.4/kernel/time/alarmtimer.c linux-3.0.4/kernel/time/alarmtimer.c
59788 --- linux-3.0.4/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
59789 +++ linux-3.0.4/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
59790 @@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
59791 {
59792 int error = 0;
59793 int i;
59794 - struct k_clock alarm_clock = {
59795 + static struct k_clock alarm_clock = {
59796 .clock_getres = alarm_clock_getres,
59797 .clock_get = alarm_clock_get,
59798 .timer_create = alarm_timer_create,
59799 diff -urNp linux-3.0.4/kernel/time/tick-broadcast.c linux-3.0.4/kernel/time/tick-broadcast.c
59800 --- linux-3.0.4/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
59801 +++ linux-3.0.4/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
59802 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
59803 * then clear the broadcast bit.
59804 */
59805 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
59806 - int cpu = smp_processor_id();
59807 + cpu = smp_processor_id();
59808
59809 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
59810 tick_broadcast_clear_oneshot(cpu);
59811 diff -urNp linux-3.0.4/kernel/time/timekeeping.c linux-3.0.4/kernel/time/timekeeping.c
59812 --- linux-3.0.4/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
59813 +++ linux-3.0.4/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
59814 @@ -14,6 +14,7 @@
59815 #include <linux/init.h>
59816 #include <linux/mm.h>
59817 #include <linux/sched.h>
59818 +#include <linux/grsecurity.h>
59819 #include <linux/syscore_ops.h>
59820 #include <linux/clocksource.h>
59821 #include <linux/jiffies.h>
59822 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
59823 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
59824 return -EINVAL;
59825
59826 + gr_log_timechange();
59827 +
59828 write_seqlock_irqsave(&xtime_lock, flags);
59829
59830 timekeeping_forward_now();
59831 diff -urNp linux-3.0.4/kernel/time/timer_list.c linux-3.0.4/kernel/time/timer_list.c
59832 --- linux-3.0.4/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
59833 +++ linux-3.0.4/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
59834 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
59835
59836 static void print_name_offset(struct seq_file *m, void *sym)
59837 {
59838 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59839 + SEQ_printf(m, "<%p>", NULL);
59840 +#else
59841 char symname[KSYM_NAME_LEN];
59842
59843 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
59844 SEQ_printf(m, "<%pK>", sym);
59845 else
59846 SEQ_printf(m, "%s", symname);
59847 +#endif
59848 }
59849
59850 static void
59851 @@ -112,7 +116,11 @@ next_one:
59852 static void
59853 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
59854 {
59855 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59856 + SEQ_printf(m, " .base: %p\n", NULL);
59857 +#else
59858 SEQ_printf(m, " .base: %pK\n", base);
59859 +#endif
59860 SEQ_printf(m, " .index: %d\n",
59861 base->index);
59862 SEQ_printf(m, " .resolution: %Lu nsecs\n",
59863 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
59864 {
59865 struct proc_dir_entry *pe;
59866
59867 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59868 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
59869 +#else
59870 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
59871 +#endif
59872 if (!pe)
59873 return -ENOMEM;
59874 return 0;
59875 diff -urNp linux-3.0.4/kernel/time/timer_stats.c linux-3.0.4/kernel/time/timer_stats.c
59876 --- linux-3.0.4/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
59877 +++ linux-3.0.4/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
59878 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
59879 static unsigned long nr_entries;
59880 static struct entry entries[MAX_ENTRIES];
59881
59882 -static atomic_t overflow_count;
59883 +static atomic_unchecked_t overflow_count;
59884
59885 /*
59886 * The entries are in a hash-table, for fast lookup:
59887 @@ -140,7 +140,7 @@ static void reset_entries(void)
59888 nr_entries = 0;
59889 memset(entries, 0, sizeof(entries));
59890 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
59891 - atomic_set(&overflow_count, 0);
59892 + atomic_set_unchecked(&overflow_count, 0);
59893 }
59894
59895 static struct entry *alloc_entry(void)
59896 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
59897 if (likely(entry))
59898 entry->count++;
59899 else
59900 - atomic_inc(&overflow_count);
59901 + atomic_inc_unchecked(&overflow_count);
59902
59903 out_unlock:
59904 raw_spin_unlock_irqrestore(lock, flags);
59905 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
59906
59907 static void print_name_offset(struct seq_file *m, unsigned long addr)
59908 {
59909 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59910 + seq_printf(m, "<%p>", NULL);
59911 +#else
59912 char symname[KSYM_NAME_LEN];
59913
59914 if (lookup_symbol_name(addr, symname) < 0)
59915 seq_printf(m, "<%p>", (void *)addr);
59916 else
59917 seq_printf(m, "%s", symname);
59918 +#endif
59919 }
59920
59921 static int tstats_show(struct seq_file *m, void *v)
59922 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
59923
59924 seq_puts(m, "Timer Stats Version: v0.2\n");
59925 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
59926 - if (atomic_read(&overflow_count))
59927 + if (atomic_read_unchecked(&overflow_count))
59928 seq_printf(m, "Overflow: %d entries\n",
59929 - atomic_read(&overflow_count));
59930 + atomic_read_unchecked(&overflow_count));
59931
59932 for (i = 0; i < nr_entries; i++) {
59933 entry = entries + i;
59934 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
59935 {
59936 struct proc_dir_entry *pe;
59937
59938 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59939 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
59940 +#else
59941 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
59942 +#endif
59943 if (!pe)
59944 return -ENOMEM;
59945 return 0;
59946 diff -urNp linux-3.0.4/kernel/time.c linux-3.0.4/kernel/time.c
59947 --- linux-3.0.4/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
59948 +++ linux-3.0.4/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
59949 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
59950 return error;
59951
59952 if (tz) {
59953 + /* we log in do_settimeofday called below, so don't log twice
59954 + */
59955 + if (!tv)
59956 + gr_log_timechange();
59957 +
59958 /* SMP safe, global irq locking makes it work. */
59959 sys_tz = *tz;
59960 update_vsyscall_tz();
59961 diff -urNp linux-3.0.4/kernel/timer.c linux-3.0.4/kernel/timer.c
59962 --- linux-3.0.4/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
59963 +++ linux-3.0.4/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
59964 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
59965 /*
59966 * This function runs timers and the timer-tq in bottom half context.
59967 */
59968 -static void run_timer_softirq(struct softirq_action *h)
59969 +static void run_timer_softirq(void)
59970 {
59971 struct tvec_base *base = __this_cpu_read(tvec_bases);
59972
59973 diff -urNp linux-3.0.4/kernel/trace/blktrace.c linux-3.0.4/kernel/trace/blktrace.c
59974 --- linux-3.0.4/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
59975 +++ linux-3.0.4/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
59976 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
59977 struct blk_trace *bt = filp->private_data;
59978 char buf[16];
59979
59980 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
59981 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
59982
59983 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
59984 }
59985 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
59986 return 1;
59987
59988 bt = buf->chan->private_data;
59989 - atomic_inc(&bt->dropped);
59990 + atomic_inc_unchecked(&bt->dropped);
59991 return 0;
59992 }
59993
59994 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
59995
59996 bt->dir = dir;
59997 bt->dev = dev;
59998 - atomic_set(&bt->dropped, 0);
59999 + atomic_set_unchecked(&bt->dropped, 0);
60000
60001 ret = -EIO;
60002 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60003 diff -urNp linux-3.0.4/kernel/trace/ftrace.c linux-3.0.4/kernel/trace/ftrace.c
60004 --- linux-3.0.4/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
60005 +++ linux-3.0.4/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
60006 @@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
60007 if (unlikely(ftrace_disabled))
60008 return 0;
60009
60010 + ret = ftrace_arch_code_modify_prepare();
60011 + FTRACE_WARN_ON(ret);
60012 + if (ret)
60013 + return 0;
60014 +
60015 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60016 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60017 if (ret) {
60018 ftrace_bug(ret, ip);
60019 - return 0;
60020 }
60021 - return 1;
60022 + return ret ? 0 : 1;
60023 }
60024
60025 /*
60026 @@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
60027
60028 int
60029 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60030 - void *data)
60031 + void *data)
60032 {
60033 struct ftrace_func_probe *entry;
60034 struct ftrace_page *pg;
60035 diff -urNp linux-3.0.4/kernel/trace/trace.c linux-3.0.4/kernel/trace/trace.c
60036 --- linux-3.0.4/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
60037 +++ linux-3.0.4/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
60038 @@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
60039 size_t rem;
60040 unsigned int i;
60041
60042 + pax_track_stack();
60043 +
60044 if (splice_grow_spd(pipe, &spd))
60045 return -ENOMEM;
60046
60047 @@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
60048 int entries, size, i;
60049 size_t ret;
60050
60051 + pax_track_stack();
60052 +
60053 if (splice_grow_spd(pipe, &spd))
60054 return -ENOMEM;
60055
60056 @@ -3990,10 +3994,9 @@ static const struct file_operations trac
60057 };
60058 #endif
60059
60060 -static struct dentry *d_tracer;
60061 -
60062 struct dentry *tracing_init_dentry(void)
60063 {
60064 + static struct dentry *d_tracer;
60065 static int once;
60066
60067 if (d_tracer)
60068 @@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
60069 return d_tracer;
60070 }
60071
60072 -static struct dentry *d_percpu;
60073 -
60074 struct dentry *tracing_dentry_percpu(void)
60075 {
60076 + static struct dentry *d_percpu;
60077 static int once;
60078 struct dentry *d_tracer;
60079
60080 diff -urNp linux-3.0.4/kernel/trace/trace_events.c linux-3.0.4/kernel/trace/trace_events.c
60081 --- linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:44:40.000000000 -0400
60082 +++ linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
60083 @@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
60084 struct ftrace_module_file_ops {
60085 struct list_head list;
60086 struct module *mod;
60087 - struct file_operations id;
60088 - struct file_operations enable;
60089 - struct file_operations format;
60090 - struct file_operations filter;
60091 };
60092
60093 static struct ftrace_module_file_ops *
60094 @@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
60095
60096 file_ops->mod = mod;
60097
60098 - file_ops->id = ftrace_event_id_fops;
60099 - file_ops->id.owner = mod;
60100 -
60101 - file_ops->enable = ftrace_enable_fops;
60102 - file_ops->enable.owner = mod;
60103 -
60104 - file_ops->filter = ftrace_event_filter_fops;
60105 - file_ops->filter.owner = mod;
60106 -
60107 - file_ops->format = ftrace_event_format_fops;
60108 - file_ops->format.owner = mod;
60109 + pax_open_kernel();
60110 + *(void **)&mod->trace_id.owner = mod;
60111 + *(void **)&mod->trace_enable.owner = mod;
60112 + *(void **)&mod->trace_filter.owner = mod;
60113 + *(void **)&mod->trace_format.owner = mod;
60114 + pax_close_kernel();
60115
60116 list_add(&file_ops->list, &ftrace_module_file_list);
60117
60118 @@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
60119
60120 for_each_event(call, start, end) {
60121 __trace_add_event_call(*call, mod,
60122 - &file_ops->id, &file_ops->enable,
60123 - &file_ops->filter, &file_ops->format);
60124 + &mod->trace_id, &mod->trace_enable,
60125 + &mod->trace_filter, &mod->trace_format);
60126 }
60127 }
60128
60129 diff -urNp linux-3.0.4/kernel/trace/trace_mmiotrace.c linux-3.0.4/kernel/trace/trace_mmiotrace.c
60130 --- linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
60131 +++ linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
60132 @@ -24,7 +24,7 @@ struct header_iter {
60133 static struct trace_array *mmio_trace_array;
60134 static bool overrun_detected;
60135 static unsigned long prev_overruns;
60136 -static atomic_t dropped_count;
60137 +static atomic_unchecked_t dropped_count;
60138
60139 static void mmio_reset_data(struct trace_array *tr)
60140 {
60141 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60142
60143 static unsigned long count_overruns(struct trace_iterator *iter)
60144 {
60145 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
60146 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60147 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60148
60149 if (over > prev_overruns)
60150 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60151 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60152 sizeof(*entry), 0, pc);
60153 if (!event) {
60154 - atomic_inc(&dropped_count);
60155 + atomic_inc_unchecked(&dropped_count);
60156 return;
60157 }
60158 entry = ring_buffer_event_data(event);
60159 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60160 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60161 sizeof(*entry), 0, pc);
60162 if (!event) {
60163 - atomic_inc(&dropped_count);
60164 + atomic_inc_unchecked(&dropped_count);
60165 return;
60166 }
60167 entry = ring_buffer_event_data(event);
60168 diff -urNp linux-3.0.4/kernel/trace/trace_output.c linux-3.0.4/kernel/trace/trace_output.c
60169 --- linux-3.0.4/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
60170 +++ linux-3.0.4/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
60171 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60172
60173 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60174 if (!IS_ERR(p)) {
60175 - p = mangle_path(s->buffer + s->len, p, "\n");
60176 + p = mangle_path(s->buffer + s->len, p, "\n\\");
60177 if (p) {
60178 s->len = p - s->buffer;
60179 return 1;
60180 diff -urNp linux-3.0.4/kernel/trace/trace_stack.c linux-3.0.4/kernel/trace/trace_stack.c
60181 --- linux-3.0.4/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
60182 +++ linux-3.0.4/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
60183 @@ -50,7 +50,7 @@ static inline void check_stack(void)
60184 return;
60185
60186 /* we do not handle interrupt stacks yet */
60187 - if (!object_is_on_stack(&this_size))
60188 + if (!object_starts_on_stack(&this_size))
60189 return;
60190
60191 local_irq_save(flags);
60192 diff -urNp linux-3.0.4/kernel/trace/trace_workqueue.c linux-3.0.4/kernel/trace/trace_workqueue.c
60193 --- linux-3.0.4/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
60194 +++ linux-3.0.4/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
60195 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60196 int cpu;
60197 pid_t pid;
60198 /* Can be inserted from interrupt or user context, need to be atomic */
60199 - atomic_t inserted;
60200 + atomic_unchecked_t inserted;
60201 /*
60202 * Don't need to be atomic, works are serialized in a single workqueue thread
60203 * on a single CPU.
60204 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60205 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60206 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60207 if (node->pid == wq_thread->pid) {
60208 - atomic_inc(&node->inserted);
60209 + atomic_inc_unchecked(&node->inserted);
60210 goto found;
60211 }
60212 }
60213 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60214 tsk = get_pid_task(pid, PIDTYPE_PID);
60215 if (tsk) {
60216 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60217 - atomic_read(&cws->inserted), cws->executed,
60218 + atomic_read_unchecked(&cws->inserted), cws->executed,
60219 tsk->comm);
60220 put_task_struct(tsk);
60221 }
60222 diff -urNp linux-3.0.4/lib/bug.c linux-3.0.4/lib/bug.c
60223 --- linux-3.0.4/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
60224 +++ linux-3.0.4/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
60225 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60226 return BUG_TRAP_TYPE_NONE;
60227
60228 bug = find_bug(bugaddr);
60229 + if (!bug)
60230 + return BUG_TRAP_TYPE_NONE;
60231
60232 file = NULL;
60233 line = 0;
60234 diff -urNp linux-3.0.4/lib/debugobjects.c linux-3.0.4/lib/debugobjects.c
60235 --- linux-3.0.4/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
60236 +++ linux-3.0.4/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
60237 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60238 if (limit > 4)
60239 return;
60240
60241 - is_on_stack = object_is_on_stack(addr);
60242 + is_on_stack = object_starts_on_stack(addr);
60243 if (is_on_stack == onstack)
60244 return;
60245
60246 diff -urNp linux-3.0.4/lib/dma-debug.c linux-3.0.4/lib/dma-debug.c
60247 --- linux-3.0.4/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
60248 +++ linux-3.0.4/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
60249 @@ -870,7 +870,7 @@ out:
60250
60251 static void check_for_stack(struct device *dev, void *addr)
60252 {
60253 - if (object_is_on_stack(addr))
60254 + if (object_starts_on_stack(addr))
60255 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60256 "stack [addr=%p]\n", addr);
60257 }
60258 diff -urNp linux-3.0.4/lib/extable.c linux-3.0.4/lib/extable.c
60259 --- linux-3.0.4/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
60260 +++ linux-3.0.4/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
60261 @@ -13,6 +13,7 @@
60262 #include <linux/init.h>
60263 #include <linux/sort.h>
60264 #include <asm/uaccess.h>
60265 +#include <asm/pgtable.h>
60266
60267 #ifndef ARCH_HAS_SORT_EXTABLE
60268 /*
60269 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
60270 void sort_extable(struct exception_table_entry *start,
60271 struct exception_table_entry *finish)
60272 {
60273 + pax_open_kernel();
60274 sort(start, finish - start, sizeof(struct exception_table_entry),
60275 cmp_ex, NULL);
60276 + pax_close_kernel();
60277 }
60278
60279 #ifdef CONFIG_MODULES
60280 diff -urNp linux-3.0.4/lib/inflate.c linux-3.0.4/lib/inflate.c
60281 --- linux-3.0.4/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
60282 +++ linux-3.0.4/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
60283 @@ -269,7 +269,7 @@ static void free(void *where)
60284 malloc_ptr = free_mem_ptr;
60285 }
60286 #else
60287 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60288 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60289 #define free(a) kfree(a)
60290 #endif
60291
60292 diff -urNp linux-3.0.4/lib/Kconfig.debug linux-3.0.4/lib/Kconfig.debug
60293 --- linux-3.0.4/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
60294 +++ linux-3.0.4/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
60295 @@ -1088,6 +1088,7 @@ config LATENCYTOP
60296 depends on DEBUG_KERNEL
60297 depends on STACKTRACE_SUPPORT
60298 depends on PROC_FS
60299 + depends on !GRKERNSEC_HIDESYM
60300 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60301 select KALLSYMS
60302 select KALLSYMS_ALL
60303 diff -urNp linux-3.0.4/lib/kref.c linux-3.0.4/lib/kref.c
60304 --- linux-3.0.4/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
60305 +++ linux-3.0.4/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
60306 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60307 */
60308 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60309 {
60310 - WARN_ON(release == NULL);
60311 + BUG_ON(release == NULL);
60312 WARN_ON(release == (void (*)(struct kref *))kfree);
60313
60314 if (atomic_dec_and_test(&kref->refcount)) {
60315 diff -urNp linux-3.0.4/lib/radix-tree.c linux-3.0.4/lib/radix-tree.c
60316 --- linux-3.0.4/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
60317 +++ linux-3.0.4/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
60318 @@ -80,7 +80,7 @@ struct radix_tree_preload {
60319 int nr;
60320 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60321 };
60322 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60323 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60324
60325 static inline void *ptr_to_indirect(void *ptr)
60326 {
60327 diff -urNp linux-3.0.4/lib/vsprintf.c linux-3.0.4/lib/vsprintf.c
60328 --- linux-3.0.4/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
60329 +++ linux-3.0.4/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
60330 @@ -16,6 +16,9 @@
60331 * - scnprintf and vscnprintf
60332 */
60333
60334 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60335 +#define __INCLUDED_BY_HIDESYM 1
60336 +#endif
60337 #include <stdarg.h>
60338 #include <linux/module.h>
60339 #include <linux/types.h>
60340 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60341 char sym[KSYM_SYMBOL_LEN];
60342 if (ext == 'B')
60343 sprint_backtrace(sym, value);
60344 - else if (ext != 'f' && ext != 's')
60345 + else if (ext != 'f' && ext != 's' && ext != 'a')
60346 sprint_symbol(sym, value);
60347 else
60348 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60349 @@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
60350 return string(buf, end, uuid, spec);
60351 }
60352
60353 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60354 +int kptr_restrict __read_mostly = 2;
60355 +#else
60356 int kptr_restrict __read_mostly;
60357 +#endif
60358
60359 /*
60360 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60361 @@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
60362 * - 'S' For symbolic direct pointers with offset
60363 * - 's' For symbolic direct pointers without offset
60364 * - 'B' For backtraced symbolic direct pointers with offset
60365 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60366 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60367 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60368 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60369 * - 'M' For a 6-byte MAC address, it prints the address in the
60370 @@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
60371 {
60372 if (!ptr && *fmt != 'K') {
60373 /*
60374 - * Print (null) with the same width as a pointer so it makes
60375 + * Print (nil) with the same width as a pointer so it makes
60376 * tabular output look nice.
60377 */
60378 if (spec.field_width == -1)
60379 spec.field_width = 2 * sizeof(void *);
60380 - return string(buf, end, "(null)", spec);
60381 + return string(buf, end, "(nil)", spec);
60382 }
60383
60384 switch (*fmt) {
60385 @@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
60386 /* Fallthrough */
60387 case 'S':
60388 case 's':
60389 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60390 + break;
60391 +#else
60392 + return symbol_string(buf, end, ptr, spec, *fmt);
60393 +#endif
60394 + case 'A':
60395 + case 'a':
60396 case 'B':
60397 return symbol_string(buf, end, ptr, spec, *fmt);
60398 case 'R':
60399 @@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
60400 typeof(type) value; \
60401 if (sizeof(type) == 8) { \
60402 args = PTR_ALIGN(args, sizeof(u32)); \
60403 - *(u32 *)&value = *(u32 *)args; \
60404 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60405 + *(u32 *)&value = *(const u32 *)args; \
60406 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60407 } else { \
60408 args = PTR_ALIGN(args, sizeof(type)); \
60409 - value = *(typeof(type) *)args; \
60410 + value = *(const typeof(type) *)args; \
60411 } \
60412 args += sizeof(type); \
60413 value; \
60414 @@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
60415 case FORMAT_TYPE_STR: {
60416 const char *str_arg = args;
60417 args += strlen(str_arg) + 1;
60418 - str = string(str, end, (char *)str_arg, spec);
60419 + str = string(str, end, str_arg, spec);
60420 break;
60421 }
60422
60423 diff -urNp linux-3.0.4/localversion-grsec linux-3.0.4/localversion-grsec
60424 --- linux-3.0.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60425 +++ linux-3.0.4/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
60426 @@ -0,0 +1 @@
60427 +-grsec
60428 diff -urNp linux-3.0.4/Makefile linux-3.0.4/Makefile
60429 --- linux-3.0.4/Makefile 2011-08-29 23:26:13.000000000 -0400
60430 +++ linux-3.0.4/Makefile 2011-08-29 23:26:21.000000000 -0400
60431 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60432
60433 HOSTCC = gcc
60434 HOSTCXX = g++
60435 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60436 -HOSTCXXFLAGS = -O2
60437 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60438 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60439 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60440
60441 # Decide whether to build built-in, modular, or both.
60442 # Normally, just do built-in.
60443 @@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60444 KBUILD_CPPFLAGS := -D__KERNEL__
60445
60446 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60447 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
60448 -fno-strict-aliasing -fno-common \
60449 -Werror-implicit-function-declaration \
60450 -Wno-format-security \
60451 -fno-delete-null-pointer-checks
60452 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60453 KBUILD_AFLAGS_KERNEL :=
60454 KBUILD_CFLAGS_KERNEL :=
60455 KBUILD_AFLAGS := -D__ASSEMBLY__
60456 @@ -564,6 +567,25 @@ else
60457 KBUILD_CFLAGS += -O2
60458 endif
60459
60460 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60461 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
60462 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
60463 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60464 +endif
60465 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60466 +gcc-plugins0:
60467 + $(Q)$(MAKE) $(build)=tools/gcc
60468 +gcc-plugins: scripts_basic gcc-plugins0
60469 +else
60470 +gcc-plugins:
60471 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60472 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60473 +else
60474 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60475 +endif
60476 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60477 +endif
60478 +
60479 include $(srctree)/arch/$(SRCARCH)/Makefile
60480
60481 ifneq ($(CONFIG_FRAME_WARN),0)
60482 @@ -708,7 +730,7 @@ export mod_strip_cmd
60483
60484
60485 ifeq ($(KBUILD_EXTMOD),)
60486 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60487 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60488
60489 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60490 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60491 @@ -907,6 +929,7 @@ define rule_vmlinux-modpost
60492 endef
60493
60494 # vmlinux image - including updated kernel symbols
60495 +vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60496 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
60497 ifdef CONFIG_HEADERS_CHECK
60498 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
60499 @@ -973,7 +996,7 @@ ifneq ($(KBUILD_SRC),)
60500 endif
60501
60502 # prepare2 creates a makefile if using a separate output directory
60503 -prepare2: prepare3 outputmakefile asm-generic
60504 +prepare2: prepare3 outputmakefile asm-generic gcc-plugins
60505
60506 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60507 include/config/auto.conf
60508 @@ -1087,6 +1110,7 @@ all: modules
60509 # using awk while concatenating to the final file.
60510
60511 PHONY += modules
60512 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60513 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
60514 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
60515 @$(kecho) ' Building modules, stage 2.';
60516 @@ -1359,6 +1383,7 @@ PHONY += $(module-dirs) modules
60517 $(module-dirs): crmodverdir $(objtree)/Module.symvers
60518 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
60519
60520 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60521 modules: $(module-dirs)
60522 @$(kecho) ' Building modules, stage 2.';
60523 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
60524 @@ -1404,7 +1429,7 @@ clean: $(clean-dirs)
60525 $(call cmd,rmdirs)
60526 $(call cmd,rmfiles)
60527 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60528 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60529 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60530 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60531 -o -name '*.symtypes' -o -name 'modules.order' \
60532 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60533 diff -urNp linux-3.0.4/mm/filemap.c linux-3.0.4/mm/filemap.c
60534 --- linux-3.0.4/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
60535 +++ linux-3.0.4/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
60536 @@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
60537 struct address_space *mapping = file->f_mapping;
60538
60539 if (!mapping->a_ops->readpage)
60540 - return -ENOEXEC;
60541 + return -ENODEV;
60542 file_accessed(file);
60543 vma->vm_ops = &generic_file_vm_ops;
60544 vma->vm_flags |= VM_CAN_NONLINEAR;
60545 @@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
60546 *pos = i_size_read(inode);
60547
60548 if (limit != RLIM_INFINITY) {
60549 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60550 if (*pos >= limit) {
60551 send_sig(SIGXFSZ, current, 0);
60552 return -EFBIG;
60553 diff -urNp linux-3.0.4/mm/fremap.c linux-3.0.4/mm/fremap.c
60554 --- linux-3.0.4/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
60555 +++ linux-3.0.4/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
60556 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60557 retry:
60558 vma = find_vma(mm, start);
60559
60560 +#ifdef CONFIG_PAX_SEGMEXEC
60561 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60562 + goto out;
60563 +#endif
60564 +
60565 /*
60566 * Make sure the vma is shared, that it supports prefaulting,
60567 * and that the remapped range is valid and fully within
60568 diff -urNp linux-3.0.4/mm/highmem.c linux-3.0.4/mm/highmem.c
60569 --- linux-3.0.4/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
60570 +++ linux-3.0.4/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
60571 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60572 * So no dangers, even with speculative execution.
60573 */
60574 page = pte_page(pkmap_page_table[i]);
60575 + pax_open_kernel();
60576 pte_clear(&init_mm, (unsigned long)page_address(page),
60577 &pkmap_page_table[i]);
60578 -
60579 + pax_close_kernel();
60580 set_page_address(page, NULL);
60581 need_flush = 1;
60582 }
60583 @@ -186,9 +187,11 @@ start:
60584 }
60585 }
60586 vaddr = PKMAP_ADDR(last_pkmap_nr);
60587 +
60588 + pax_open_kernel();
60589 set_pte_at(&init_mm, vaddr,
60590 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60591 -
60592 + pax_close_kernel();
60593 pkmap_count[last_pkmap_nr] = 1;
60594 set_page_address(page, (void *)vaddr);
60595
60596 diff -urNp linux-3.0.4/mm/huge_memory.c linux-3.0.4/mm/huge_memory.c
60597 --- linux-3.0.4/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
60598 +++ linux-3.0.4/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
60599 @@ -702,7 +702,7 @@ out:
60600 * run pte_offset_map on the pmd, if an huge pmd could
60601 * materialize from under us from a different thread.
60602 */
60603 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60604 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60605 return VM_FAULT_OOM;
60606 /* if an huge pmd materialized from under us just retry later */
60607 if (unlikely(pmd_trans_huge(*pmd)))
60608 diff -urNp linux-3.0.4/mm/hugetlb.c linux-3.0.4/mm/hugetlb.c
60609 --- linux-3.0.4/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
60610 +++ linux-3.0.4/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
60611 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60612 return 1;
60613 }
60614
60615 +#ifdef CONFIG_PAX_SEGMEXEC
60616 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60617 +{
60618 + struct mm_struct *mm = vma->vm_mm;
60619 + struct vm_area_struct *vma_m;
60620 + unsigned long address_m;
60621 + pte_t *ptep_m;
60622 +
60623 + vma_m = pax_find_mirror_vma(vma);
60624 + if (!vma_m)
60625 + return;
60626 +
60627 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60628 + address_m = address + SEGMEXEC_TASK_SIZE;
60629 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60630 + get_page(page_m);
60631 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
60632 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60633 +}
60634 +#endif
60635 +
60636 /*
60637 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60638 */
60639 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
60640 make_huge_pte(vma, new_page, 1));
60641 page_remove_rmap(old_page);
60642 hugepage_add_new_anon_rmap(new_page, vma, address);
60643 +
60644 +#ifdef CONFIG_PAX_SEGMEXEC
60645 + pax_mirror_huge_pte(vma, address, new_page);
60646 +#endif
60647 +
60648 /* Make the old page be freed below */
60649 new_page = old_page;
60650 mmu_notifier_invalidate_range_end(mm,
60651 @@ -2591,6 +2617,10 @@ retry:
60652 && (vma->vm_flags & VM_SHARED)));
60653 set_huge_pte_at(mm, address, ptep, new_pte);
60654
60655 +#ifdef CONFIG_PAX_SEGMEXEC
60656 + pax_mirror_huge_pte(vma, address, page);
60657 +#endif
60658 +
60659 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60660 /* Optimization, do the COW without a second fault */
60661 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60662 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60663 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60664 struct hstate *h = hstate_vma(vma);
60665
60666 +#ifdef CONFIG_PAX_SEGMEXEC
60667 + struct vm_area_struct *vma_m;
60668 +#endif
60669 +
60670 ptep = huge_pte_offset(mm, address);
60671 if (ptep) {
60672 entry = huge_ptep_get(ptep);
60673 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60674 VM_FAULT_SET_HINDEX(h - hstates);
60675 }
60676
60677 +#ifdef CONFIG_PAX_SEGMEXEC
60678 + vma_m = pax_find_mirror_vma(vma);
60679 + if (vma_m) {
60680 + unsigned long address_m;
60681 +
60682 + if (vma->vm_start > vma_m->vm_start) {
60683 + address_m = address;
60684 + address -= SEGMEXEC_TASK_SIZE;
60685 + vma = vma_m;
60686 + h = hstate_vma(vma);
60687 + } else
60688 + address_m = address + SEGMEXEC_TASK_SIZE;
60689 +
60690 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
60691 + return VM_FAULT_OOM;
60692 + address_m &= HPAGE_MASK;
60693 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
60694 + }
60695 +#endif
60696 +
60697 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
60698 if (!ptep)
60699 return VM_FAULT_OOM;
60700 diff -urNp linux-3.0.4/mm/internal.h linux-3.0.4/mm/internal.h
60701 --- linux-3.0.4/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
60702 +++ linux-3.0.4/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
60703 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
60704 * in mm/page_alloc.c
60705 */
60706 extern void __free_pages_bootmem(struct page *page, unsigned int order);
60707 +extern void free_compound_page(struct page *page);
60708 extern void prep_compound_page(struct page *page, unsigned long order);
60709 #ifdef CONFIG_MEMORY_FAILURE
60710 extern bool is_free_buddy_page(struct page *page);
60711 diff -urNp linux-3.0.4/mm/Kconfig linux-3.0.4/mm/Kconfig
60712 --- linux-3.0.4/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
60713 +++ linux-3.0.4/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
60714 @@ -240,7 +240,7 @@ config KSM
60715 config DEFAULT_MMAP_MIN_ADDR
60716 int "Low address space to protect from user allocation"
60717 depends on MMU
60718 - default 4096
60719 + default 65536
60720 help
60721 This is the portion of low virtual memory which should be protected
60722 from userspace allocation. Keeping a user from writing to low pages
60723 diff -urNp linux-3.0.4/mm/kmemleak.c linux-3.0.4/mm/kmemleak.c
60724 --- linux-3.0.4/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
60725 +++ linux-3.0.4/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
60726 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
60727
60728 for (i = 0; i < object->trace_len; i++) {
60729 void *ptr = (void *)object->trace[i];
60730 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
60731 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
60732 }
60733 }
60734
60735 diff -urNp linux-3.0.4/mm/madvise.c linux-3.0.4/mm/madvise.c
60736 --- linux-3.0.4/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
60737 +++ linux-3.0.4/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
60738 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
60739 pgoff_t pgoff;
60740 unsigned long new_flags = vma->vm_flags;
60741
60742 +#ifdef CONFIG_PAX_SEGMEXEC
60743 + struct vm_area_struct *vma_m;
60744 +#endif
60745 +
60746 switch (behavior) {
60747 case MADV_NORMAL:
60748 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
60749 @@ -110,6 +114,13 @@ success:
60750 /*
60751 * vm_flags is protected by the mmap_sem held in write mode.
60752 */
60753 +
60754 +#ifdef CONFIG_PAX_SEGMEXEC
60755 + vma_m = pax_find_mirror_vma(vma);
60756 + if (vma_m)
60757 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
60758 +#endif
60759 +
60760 vma->vm_flags = new_flags;
60761
60762 out:
60763 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
60764 struct vm_area_struct ** prev,
60765 unsigned long start, unsigned long end)
60766 {
60767 +
60768 +#ifdef CONFIG_PAX_SEGMEXEC
60769 + struct vm_area_struct *vma_m;
60770 +#endif
60771 +
60772 *prev = vma;
60773 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
60774 return -EINVAL;
60775 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
60776 zap_page_range(vma, start, end - start, &details);
60777 } else
60778 zap_page_range(vma, start, end - start, NULL);
60779 +
60780 +#ifdef CONFIG_PAX_SEGMEXEC
60781 + vma_m = pax_find_mirror_vma(vma);
60782 + if (vma_m) {
60783 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
60784 + struct zap_details details = {
60785 + .nonlinear_vma = vma_m,
60786 + .last_index = ULONG_MAX,
60787 + };
60788 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
60789 + } else
60790 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
60791 + }
60792 +#endif
60793 +
60794 return 0;
60795 }
60796
60797 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
60798 if (end < start)
60799 goto out;
60800
60801 +#ifdef CONFIG_PAX_SEGMEXEC
60802 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60803 + if (end > SEGMEXEC_TASK_SIZE)
60804 + goto out;
60805 + } else
60806 +#endif
60807 +
60808 + if (end > TASK_SIZE)
60809 + goto out;
60810 +
60811 error = 0;
60812 if (end == start)
60813 goto out;
60814 diff -urNp linux-3.0.4/mm/memory.c linux-3.0.4/mm/memory.c
60815 --- linux-3.0.4/mm/memory.c 2011-08-23 21:44:40.000000000 -0400
60816 +++ linux-3.0.4/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
60817 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
60818 return;
60819
60820 pmd = pmd_offset(pud, start);
60821 +
60822 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
60823 pud_clear(pud);
60824 pmd_free_tlb(tlb, pmd, start);
60825 +#endif
60826 +
60827 }
60828
60829 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
60830 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct
60831 if (end - 1 > ceiling - 1)
60832 return;
60833
60834 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
60835 pud = pud_offset(pgd, start);
60836 pgd_clear(pgd);
60837 pud_free_tlb(tlb, pud, start);
60838 +#endif
60839 +
60840 }
60841
60842 /*
60843 @@ -1577,12 +1584,6 @@ no_page_table:
60844 return page;
60845 }
60846
60847 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
60848 -{
60849 - return stack_guard_page_start(vma, addr) ||
60850 - stack_guard_page_end(vma, addr+PAGE_SIZE);
60851 -}
60852 -
60853 /**
60854 * __get_user_pages() - pin user pages in memory
60855 * @tsk: task_struct of target task
60856 @@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
60857 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
60858 i = 0;
60859
60860 - do {
60861 + while (nr_pages) {
60862 struct vm_area_struct *vma;
60863
60864 - vma = find_extend_vma(mm, start);
60865 + vma = find_vma(mm, start);
60866 if (!vma && in_gate_area(mm, start)) {
60867 unsigned long pg = start & PAGE_MASK;
60868 pgd_t *pgd;
60869 @@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
60870 goto next_page;
60871 }
60872
60873 - if (!vma ||
60874 + if (!vma || start < vma->vm_start ||
60875 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
60876 !(vm_flags & vma->vm_flags))
60877 return i ? : -EFAULT;
60878 @@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
60879 int ret;
60880 unsigned int fault_flags = 0;
60881
60882 - /* For mlock, just skip the stack guard page. */
60883 - if (foll_flags & FOLL_MLOCK) {
60884 - if (stack_guard_page(vma, start))
60885 - goto next_page;
60886 - }
60887 if (foll_flags & FOLL_WRITE)
60888 fault_flags |= FAULT_FLAG_WRITE;
60889 if (nonblocking)
60890 @@ -1811,7 +1807,7 @@ next_page:
60891 start += PAGE_SIZE;
60892 nr_pages--;
60893 } while (nr_pages && start < vma->vm_end);
60894 - } while (nr_pages);
60895 + }
60896 return i;
60897 }
60898 EXPORT_SYMBOL(__get_user_pages);
60899 @@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
60900 page_add_file_rmap(page);
60901 set_pte_at(mm, addr, pte, mk_pte(page, prot));
60902
60903 +#ifdef CONFIG_PAX_SEGMEXEC
60904 + pax_mirror_file_pte(vma, addr, page, ptl);
60905 +#endif
60906 +
60907 retval = 0;
60908 pte_unmap_unlock(pte, ptl);
60909 return retval;
60910 @@ -2052,10 +2052,22 @@ out:
60911 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
60912 struct page *page)
60913 {
60914 +
60915 +#ifdef CONFIG_PAX_SEGMEXEC
60916 + struct vm_area_struct *vma_m;
60917 +#endif
60918 +
60919 if (addr < vma->vm_start || addr >= vma->vm_end)
60920 return -EFAULT;
60921 if (!page_count(page))
60922 return -EINVAL;
60923 +
60924 +#ifdef CONFIG_PAX_SEGMEXEC
60925 + vma_m = pax_find_mirror_vma(vma);
60926 + if (vma_m)
60927 + vma_m->vm_flags |= VM_INSERTPAGE;
60928 +#endif
60929 +
60930 vma->vm_flags |= VM_INSERTPAGE;
60931 return insert_page(vma, addr, page, vma->vm_page_prot);
60932 }
60933 @@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
60934 unsigned long pfn)
60935 {
60936 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
60937 + BUG_ON(vma->vm_mirror);
60938
60939 if (addr < vma->vm_start || addr >= vma->vm_end)
60940 return -EFAULT;
60941 @@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
60942 copy_user_highpage(dst, src, va, vma);
60943 }
60944
60945 +#ifdef CONFIG_PAX_SEGMEXEC
60946 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
60947 +{
60948 + struct mm_struct *mm = vma->vm_mm;
60949 + spinlock_t *ptl;
60950 + pte_t *pte, entry;
60951 +
60952 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
60953 + entry = *pte;
60954 + if (!pte_present(entry)) {
60955 + if (!pte_none(entry)) {
60956 + BUG_ON(pte_file(entry));
60957 + free_swap_and_cache(pte_to_swp_entry(entry));
60958 + pte_clear_not_present_full(mm, address, pte, 0);
60959 + }
60960 + } else {
60961 + struct page *page;
60962 +
60963 + flush_cache_page(vma, address, pte_pfn(entry));
60964 + entry = ptep_clear_flush(vma, address, pte);
60965 + BUG_ON(pte_dirty(entry));
60966 + page = vm_normal_page(vma, address, entry);
60967 + if (page) {
60968 + update_hiwater_rss(mm);
60969 + if (PageAnon(page))
60970 + dec_mm_counter_fast(mm, MM_ANONPAGES);
60971 + else
60972 + dec_mm_counter_fast(mm, MM_FILEPAGES);
60973 + page_remove_rmap(page);
60974 + page_cache_release(page);
60975 + }
60976 + }
60977 + pte_unmap_unlock(pte, ptl);
60978 +}
60979 +
60980 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
60981 + *
60982 + * the ptl of the lower mapped page is held on entry and is not released on exit
60983 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
60984 + */
60985 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
60986 +{
60987 + struct mm_struct *mm = vma->vm_mm;
60988 + unsigned long address_m;
60989 + spinlock_t *ptl_m;
60990 + struct vm_area_struct *vma_m;
60991 + pmd_t *pmd_m;
60992 + pte_t *pte_m, entry_m;
60993 +
60994 + BUG_ON(!page_m || !PageAnon(page_m));
60995 +
60996 + vma_m = pax_find_mirror_vma(vma);
60997 + if (!vma_m)
60998 + return;
60999 +
61000 + BUG_ON(!PageLocked(page_m));
61001 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61002 + address_m = address + SEGMEXEC_TASK_SIZE;
61003 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61004 + pte_m = pte_offset_map(pmd_m, address_m);
61005 + ptl_m = pte_lockptr(mm, pmd_m);
61006 + if (ptl != ptl_m) {
61007 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61008 + if (!pte_none(*pte_m))
61009 + goto out;
61010 + }
61011 +
61012 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61013 + page_cache_get(page_m);
61014 + page_add_anon_rmap(page_m, vma_m, address_m);
61015 + inc_mm_counter_fast(mm, MM_ANONPAGES);
61016 + set_pte_at(mm, address_m, pte_m, entry_m);
61017 + update_mmu_cache(vma_m, address_m, entry_m);
61018 +out:
61019 + if (ptl != ptl_m)
61020 + spin_unlock(ptl_m);
61021 + pte_unmap(pte_m);
61022 + unlock_page(page_m);
61023 +}
61024 +
61025 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61026 +{
61027 + struct mm_struct *mm = vma->vm_mm;
61028 + unsigned long address_m;
61029 + spinlock_t *ptl_m;
61030 + struct vm_area_struct *vma_m;
61031 + pmd_t *pmd_m;
61032 + pte_t *pte_m, entry_m;
61033 +
61034 + BUG_ON(!page_m || PageAnon(page_m));
61035 +
61036 + vma_m = pax_find_mirror_vma(vma);
61037 + if (!vma_m)
61038 + return;
61039 +
61040 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61041 + address_m = address + SEGMEXEC_TASK_SIZE;
61042 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61043 + pte_m = pte_offset_map(pmd_m, address_m);
61044 + ptl_m = pte_lockptr(mm, pmd_m);
61045 + if (ptl != ptl_m) {
61046 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61047 + if (!pte_none(*pte_m))
61048 + goto out;
61049 + }
61050 +
61051 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61052 + page_cache_get(page_m);
61053 + page_add_file_rmap(page_m);
61054 + inc_mm_counter_fast(mm, MM_FILEPAGES);
61055 + set_pte_at(mm, address_m, pte_m, entry_m);
61056 + update_mmu_cache(vma_m, address_m, entry_m);
61057 +out:
61058 + if (ptl != ptl_m)
61059 + spin_unlock(ptl_m);
61060 + pte_unmap(pte_m);
61061 +}
61062 +
61063 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61064 +{
61065 + struct mm_struct *mm = vma->vm_mm;
61066 + unsigned long address_m;
61067 + spinlock_t *ptl_m;
61068 + struct vm_area_struct *vma_m;
61069 + pmd_t *pmd_m;
61070 + pte_t *pte_m, entry_m;
61071 +
61072 + vma_m = pax_find_mirror_vma(vma);
61073 + if (!vma_m)
61074 + return;
61075 +
61076 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61077 + address_m = address + SEGMEXEC_TASK_SIZE;
61078 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61079 + pte_m = pte_offset_map(pmd_m, address_m);
61080 + ptl_m = pte_lockptr(mm, pmd_m);
61081 + if (ptl != ptl_m) {
61082 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61083 + if (!pte_none(*pte_m))
61084 + goto out;
61085 + }
61086 +
61087 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61088 + set_pte_at(mm, address_m, pte_m, entry_m);
61089 +out:
61090 + if (ptl != ptl_m)
61091 + spin_unlock(ptl_m);
61092 + pte_unmap(pte_m);
61093 +}
61094 +
61095 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61096 +{
61097 + struct page *page_m;
61098 + pte_t entry;
61099 +
61100 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61101 + goto out;
61102 +
61103 + entry = *pte;
61104 + page_m = vm_normal_page(vma, address, entry);
61105 + if (!page_m)
61106 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61107 + else if (PageAnon(page_m)) {
61108 + if (pax_find_mirror_vma(vma)) {
61109 + pte_unmap_unlock(pte, ptl);
61110 + lock_page(page_m);
61111 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61112 + if (pte_same(entry, *pte))
61113 + pax_mirror_anon_pte(vma, address, page_m, ptl);
61114 + else
61115 + unlock_page(page_m);
61116 + }
61117 + } else
61118 + pax_mirror_file_pte(vma, address, page_m, ptl);
61119 +
61120 +out:
61121 + pte_unmap_unlock(pte, ptl);
61122 +}
61123 +#endif
61124 +
61125 /*
61126 * This routine handles present pages, when users try to write
61127 * to a shared page. It is done by copying the page to a new address
61128 @@ -2667,6 +2860,12 @@ gotten:
61129 */
61130 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61131 if (likely(pte_same(*page_table, orig_pte))) {
61132 +
61133 +#ifdef CONFIG_PAX_SEGMEXEC
61134 + if (pax_find_mirror_vma(vma))
61135 + BUG_ON(!trylock_page(new_page));
61136 +#endif
61137 +
61138 if (old_page) {
61139 if (!PageAnon(old_page)) {
61140 dec_mm_counter_fast(mm, MM_FILEPAGES);
61141 @@ -2718,6 +2917,10 @@ gotten:
61142 page_remove_rmap(old_page);
61143 }
61144
61145 +#ifdef CONFIG_PAX_SEGMEXEC
61146 + pax_mirror_anon_pte(vma, address, new_page, ptl);
61147 +#endif
61148 +
61149 /* Free the old page.. */
61150 new_page = old_page;
61151 ret |= VM_FAULT_WRITE;
61152 @@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
61153 swap_free(entry);
61154 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61155 try_to_free_swap(page);
61156 +
61157 +#ifdef CONFIG_PAX_SEGMEXEC
61158 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61159 +#endif
61160 +
61161 unlock_page(page);
61162 if (swapcache) {
61163 /*
61164 @@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
61165
61166 /* No need to invalidate - it was non-present before */
61167 update_mmu_cache(vma, address, page_table);
61168 +
61169 +#ifdef CONFIG_PAX_SEGMEXEC
61170 + pax_mirror_anon_pte(vma, address, page, ptl);
61171 +#endif
61172 +
61173 unlock:
61174 pte_unmap_unlock(page_table, ptl);
61175 out:
61176 @@ -3039,40 +3252,6 @@ out_release:
61177 }
61178
61179 /*
61180 - * This is like a special single-page "expand_{down|up}wards()",
61181 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
61182 - * doesn't hit another vma.
61183 - */
61184 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61185 -{
61186 - address &= PAGE_MASK;
61187 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61188 - struct vm_area_struct *prev = vma->vm_prev;
61189 -
61190 - /*
61191 - * Is there a mapping abutting this one below?
61192 - *
61193 - * That's only ok if it's the same stack mapping
61194 - * that has gotten split..
61195 - */
61196 - if (prev && prev->vm_end == address)
61197 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61198 -
61199 - expand_downwards(vma, address - PAGE_SIZE);
61200 - }
61201 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61202 - struct vm_area_struct *next = vma->vm_next;
61203 -
61204 - /* As VM_GROWSDOWN but s/below/above/ */
61205 - if (next && next->vm_start == address + PAGE_SIZE)
61206 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61207 -
61208 - expand_upwards(vma, address + PAGE_SIZE);
61209 - }
61210 - return 0;
61211 -}
61212 -
61213 -/*
61214 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61215 * but allow concurrent faults), and pte mapped but not yet locked.
61216 * We return with mmap_sem still held, but pte unmapped and unlocked.
61217 @@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
61218 unsigned long address, pte_t *page_table, pmd_t *pmd,
61219 unsigned int flags)
61220 {
61221 - struct page *page;
61222 + struct page *page = NULL;
61223 spinlock_t *ptl;
61224 pte_t entry;
61225
61226 - pte_unmap(page_table);
61227 -
61228 - /* Check if we need to add a guard page to the stack */
61229 - if (check_stack_guard_page(vma, address) < 0)
61230 - return VM_FAULT_SIGBUS;
61231 -
61232 - /* Use the zero-page for reads */
61233 if (!(flags & FAULT_FLAG_WRITE)) {
61234 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61235 vma->vm_page_prot));
61236 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61237 + ptl = pte_lockptr(mm, pmd);
61238 + spin_lock(ptl);
61239 if (!pte_none(*page_table))
61240 goto unlock;
61241 goto setpte;
61242 }
61243
61244 /* Allocate our own private page. */
61245 + pte_unmap(page_table);
61246 +
61247 if (unlikely(anon_vma_prepare(vma)))
61248 goto oom;
61249 page = alloc_zeroed_user_highpage_movable(vma, address);
61250 @@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
61251 if (!pte_none(*page_table))
61252 goto release;
61253
61254 +#ifdef CONFIG_PAX_SEGMEXEC
61255 + if (pax_find_mirror_vma(vma))
61256 + BUG_ON(!trylock_page(page));
61257 +#endif
61258 +
61259 inc_mm_counter_fast(mm, MM_ANONPAGES);
61260 page_add_new_anon_rmap(page, vma, address);
61261 setpte:
61262 @@ -3127,6 +3307,12 @@ setpte:
61263
61264 /* No need to invalidate - it was non-present before */
61265 update_mmu_cache(vma, address, page_table);
61266 +
61267 +#ifdef CONFIG_PAX_SEGMEXEC
61268 + if (page)
61269 + pax_mirror_anon_pte(vma, address, page, ptl);
61270 +#endif
61271 +
61272 unlock:
61273 pte_unmap_unlock(page_table, ptl);
61274 return 0;
61275 @@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
61276 */
61277 /* Only go through if we didn't race with anybody else... */
61278 if (likely(pte_same(*page_table, orig_pte))) {
61279 +
61280 +#ifdef CONFIG_PAX_SEGMEXEC
61281 + if (anon && pax_find_mirror_vma(vma))
61282 + BUG_ON(!trylock_page(page));
61283 +#endif
61284 +
61285 flush_icache_page(vma, page);
61286 entry = mk_pte(page, vma->vm_page_prot);
61287 if (flags & FAULT_FLAG_WRITE)
61288 @@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
61289
61290 /* no need to invalidate: a not-present page won't be cached */
61291 update_mmu_cache(vma, address, page_table);
61292 +
61293 +#ifdef CONFIG_PAX_SEGMEXEC
61294 + if (anon)
61295 + pax_mirror_anon_pte(vma, address, page, ptl);
61296 + else
61297 + pax_mirror_file_pte(vma, address, page, ptl);
61298 +#endif
61299 +
61300 } else {
61301 if (charged)
61302 mem_cgroup_uncharge_page(page);
61303 @@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
61304 if (flags & FAULT_FLAG_WRITE)
61305 flush_tlb_fix_spurious_fault(vma, address);
61306 }
61307 +
61308 +#ifdef CONFIG_PAX_SEGMEXEC
61309 + pax_mirror_pte(vma, address, pte, pmd, ptl);
61310 + return 0;
61311 +#endif
61312 +
61313 unlock:
61314 pte_unmap_unlock(pte, ptl);
61315 return 0;
61316 @@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
61317 pmd_t *pmd;
61318 pte_t *pte;
61319
61320 +#ifdef CONFIG_PAX_SEGMEXEC
61321 + struct vm_area_struct *vma_m;
61322 +#endif
61323 +
61324 __set_current_state(TASK_RUNNING);
61325
61326 count_vm_event(PGFAULT);
61327 @@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
61328 if (unlikely(is_vm_hugetlb_page(vma)))
61329 return hugetlb_fault(mm, vma, address, flags);
61330
61331 +#ifdef CONFIG_PAX_SEGMEXEC
61332 + vma_m = pax_find_mirror_vma(vma);
61333 + if (vma_m) {
61334 + unsigned long address_m;
61335 + pgd_t *pgd_m;
61336 + pud_t *pud_m;
61337 + pmd_t *pmd_m;
61338 +
61339 + if (vma->vm_start > vma_m->vm_start) {
61340 + address_m = address;
61341 + address -= SEGMEXEC_TASK_SIZE;
61342 + vma = vma_m;
61343 + } else
61344 + address_m = address + SEGMEXEC_TASK_SIZE;
61345 +
61346 + pgd_m = pgd_offset(mm, address_m);
61347 + pud_m = pud_alloc(mm, pgd_m, address_m);
61348 + if (!pud_m)
61349 + return VM_FAULT_OOM;
61350 + pmd_m = pmd_alloc(mm, pud_m, address_m);
61351 + if (!pmd_m)
61352 + return VM_FAULT_OOM;
61353 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61354 + return VM_FAULT_OOM;
61355 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61356 + }
61357 +#endif
61358 +
61359 pgd = pgd_offset(mm, address);
61360 pud = pud_alloc(mm, pgd, address);
61361 if (!pud)
61362 @@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
61363 * run pte_offset_map on the pmd, if an huge pmd could
61364 * materialize from under us from a different thread.
61365 */
61366 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61367 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61368 return VM_FAULT_OOM;
61369 /* if an huge pmd materialized from under us just retry later */
61370 if (unlikely(pmd_trans_huge(*pmd)))
61371 @@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
61372 gate_vma.vm_start = FIXADDR_USER_START;
61373 gate_vma.vm_end = FIXADDR_USER_END;
61374 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61375 - gate_vma.vm_page_prot = __P101;
61376 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61377 /*
61378 * Make sure the vDSO gets into every core dump.
61379 * Dumping its contents makes post-mortem fully interpretable later
61380 diff -urNp linux-3.0.4/mm/memory-failure.c linux-3.0.4/mm/memory-failure.c
61381 --- linux-3.0.4/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
61382 +++ linux-3.0.4/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
61383 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61384
61385 int sysctl_memory_failure_recovery __read_mostly = 1;
61386
61387 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61388 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61389
61390 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61391
61392 @@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
61393 }
61394
61395 nr_pages = 1 << compound_trans_order(hpage);
61396 - atomic_long_add(nr_pages, &mce_bad_pages);
61397 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61398
61399 /*
61400 * We need/can do nothing about count=0 pages.
61401 @@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
61402 if (!PageHWPoison(hpage)
61403 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61404 || (p != hpage && TestSetPageHWPoison(hpage))) {
61405 - atomic_long_sub(nr_pages, &mce_bad_pages);
61406 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61407 return 0;
61408 }
61409 set_page_hwpoison_huge_page(hpage);
61410 @@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
61411 }
61412 if (hwpoison_filter(p)) {
61413 if (TestClearPageHWPoison(p))
61414 - atomic_long_sub(nr_pages, &mce_bad_pages);
61415 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61416 unlock_page(hpage);
61417 put_page(hpage);
61418 return 0;
61419 @@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
61420 return 0;
61421 }
61422 if (TestClearPageHWPoison(p))
61423 - atomic_long_sub(nr_pages, &mce_bad_pages);
61424 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61425 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61426 return 0;
61427 }
61428 @@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
61429 */
61430 if (TestClearPageHWPoison(page)) {
61431 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61432 - atomic_long_sub(nr_pages, &mce_bad_pages);
61433 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61434 freeit = 1;
61435 if (PageHuge(page))
61436 clear_page_hwpoison_huge_page(page);
61437 @@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
61438 }
61439 done:
61440 if (!PageHWPoison(hpage))
61441 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61442 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61443 set_page_hwpoison_huge_page(hpage);
61444 dequeue_hwpoisoned_huge_page(hpage);
61445 /* keep elevated page count for bad page */
61446 @@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
61447 return ret;
61448
61449 done:
61450 - atomic_long_add(1, &mce_bad_pages);
61451 + atomic_long_add_unchecked(1, &mce_bad_pages);
61452 SetPageHWPoison(page);
61453 /* keep elevated page count for bad page */
61454 return ret;
61455 diff -urNp linux-3.0.4/mm/mempolicy.c linux-3.0.4/mm/mempolicy.c
61456 --- linux-3.0.4/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
61457 +++ linux-3.0.4/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
61458 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
61459 unsigned long vmstart;
61460 unsigned long vmend;
61461
61462 +#ifdef CONFIG_PAX_SEGMEXEC
61463 + struct vm_area_struct *vma_m;
61464 +#endif
61465 +
61466 vma = find_vma_prev(mm, start, &prev);
61467 if (!vma || vma->vm_start > start)
61468 return -EFAULT;
61469 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
61470 err = policy_vma(vma, new_pol);
61471 if (err)
61472 goto out;
61473 +
61474 +#ifdef CONFIG_PAX_SEGMEXEC
61475 + vma_m = pax_find_mirror_vma(vma);
61476 + if (vma_m) {
61477 + err = policy_vma(vma_m, new_pol);
61478 + if (err)
61479 + goto out;
61480 + }
61481 +#endif
61482 +
61483 }
61484
61485 out:
61486 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
61487
61488 if (end < start)
61489 return -EINVAL;
61490 +
61491 +#ifdef CONFIG_PAX_SEGMEXEC
61492 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61493 + if (end > SEGMEXEC_TASK_SIZE)
61494 + return -EINVAL;
61495 + } else
61496 +#endif
61497 +
61498 + if (end > TASK_SIZE)
61499 + return -EINVAL;
61500 +
61501 if (end == start)
61502 return 0;
61503
61504 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61505 if (!mm)
61506 goto out;
61507
61508 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61509 + if (mm != current->mm &&
61510 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61511 + err = -EPERM;
61512 + goto out;
61513 + }
61514 +#endif
61515 +
61516 /*
61517 * Check if this process has the right to modify the specified
61518 * process. The right exists if the process has administrative
61519 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61520 rcu_read_lock();
61521 tcred = __task_cred(task);
61522 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61523 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61524 - !capable(CAP_SYS_NICE)) {
61525 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61526 rcu_read_unlock();
61527 err = -EPERM;
61528 goto out;
61529 diff -urNp linux-3.0.4/mm/migrate.c linux-3.0.4/mm/migrate.c
61530 --- linux-3.0.4/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
61531 +++ linux-3.0.4/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
61532 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
61533 unsigned long chunk_start;
61534 int err;
61535
61536 + pax_track_stack();
61537 +
61538 task_nodes = cpuset_mems_allowed(task);
61539
61540 err = -ENOMEM;
61541 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61542 if (!mm)
61543 return -EINVAL;
61544
61545 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61546 + if (mm != current->mm &&
61547 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61548 + err = -EPERM;
61549 + goto out;
61550 + }
61551 +#endif
61552 +
61553 /*
61554 * Check if this process has the right to modify the specified
61555 * process. The right exists if the process has administrative
61556 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61557 rcu_read_lock();
61558 tcred = __task_cred(task);
61559 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61560 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61561 - !capable(CAP_SYS_NICE)) {
61562 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61563 rcu_read_unlock();
61564 err = -EPERM;
61565 goto out;
61566 diff -urNp linux-3.0.4/mm/mlock.c linux-3.0.4/mm/mlock.c
61567 --- linux-3.0.4/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
61568 +++ linux-3.0.4/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
61569 @@ -13,6 +13,7 @@
61570 #include <linux/pagemap.h>
61571 #include <linux/mempolicy.h>
61572 #include <linux/syscalls.h>
61573 +#include <linux/security.h>
61574 #include <linux/sched.h>
61575 #include <linux/module.h>
61576 #include <linux/rmap.h>
61577 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61578 return -EINVAL;
61579 if (end == start)
61580 return 0;
61581 + if (end > TASK_SIZE)
61582 + return -EINVAL;
61583 +
61584 vma = find_vma_prev(current->mm, start, &prev);
61585 if (!vma || vma->vm_start > start)
61586 return -ENOMEM;
61587 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61588 for (nstart = start ; ; ) {
61589 vm_flags_t newflags;
61590
61591 +#ifdef CONFIG_PAX_SEGMEXEC
61592 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61593 + break;
61594 +#endif
61595 +
61596 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61597
61598 newflags = vma->vm_flags | VM_LOCKED;
61599 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61600 lock_limit >>= PAGE_SHIFT;
61601
61602 /* check against resource limits */
61603 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61604 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61605 error = do_mlock(start, len, 1);
61606 up_write(&current->mm->mmap_sem);
61607 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61608 static int do_mlockall(int flags)
61609 {
61610 struct vm_area_struct * vma, * prev = NULL;
61611 - unsigned int def_flags = 0;
61612
61613 if (flags & MCL_FUTURE)
61614 - def_flags = VM_LOCKED;
61615 - current->mm->def_flags = def_flags;
61616 + current->mm->def_flags |= VM_LOCKED;
61617 + else
61618 + current->mm->def_flags &= ~VM_LOCKED;
61619 if (flags == MCL_FUTURE)
61620 goto out;
61621
61622 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61623 vm_flags_t newflags;
61624
61625 +#ifdef CONFIG_PAX_SEGMEXEC
61626 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61627 + break;
61628 +#endif
61629 +
61630 + BUG_ON(vma->vm_end > TASK_SIZE);
61631 newflags = vma->vm_flags | VM_LOCKED;
61632 if (!(flags & MCL_CURRENT))
61633 newflags &= ~VM_LOCKED;
61634 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61635 lock_limit >>= PAGE_SHIFT;
61636
61637 ret = -ENOMEM;
61638 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
61639 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
61640 capable(CAP_IPC_LOCK))
61641 ret = do_mlockall(flags);
61642 diff -urNp linux-3.0.4/mm/mmap.c linux-3.0.4/mm/mmap.c
61643 --- linux-3.0.4/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
61644 +++ linux-3.0.4/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
61645 @@ -46,6 +46,16 @@
61646 #define arch_rebalance_pgtables(addr, len) (addr)
61647 #endif
61648
61649 +static inline void verify_mm_writelocked(struct mm_struct *mm)
61650 +{
61651 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
61652 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
61653 + up_read(&mm->mmap_sem);
61654 + BUG();
61655 + }
61656 +#endif
61657 +}
61658 +
61659 static void unmap_region(struct mm_struct *mm,
61660 struct vm_area_struct *vma, struct vm_area_struct *prev,
61661 unsigned long start, unsigned long end);
61662 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
61663 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
61664 *
61665 */
61666 -pgprot_t protection_map[16] = {
61667 +pgprot_t protection_map[16] __read_only = {
61668 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
61669 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
61670 };
61671
61672 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
61673 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61674 {
61675 - return __pgprot(pgprot_val(protection_map[vm_flags &
61676 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
61677 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
61678 pgprot_val(arch_vm_get_page_prot(vm_flags)));
61679 +
61680 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61681 + if (!(__supported_pte_mask & _PAGE_NX) &&
61682 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
61683 + (vm_flags & (VM_READ | VM_WRITE)))
61684 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
61685 +#endif
61686 +
61687 + return prot;
61688 }
61689 EXPORT_SYMBOL(vm_get_page_prot);
61690
61691 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
61692 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
61693 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
61694 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
61695 /*
61696 * Make sure vm_committed_as in one cacheline and not cacheline shared with
61697 * other variables. It can be updated by several CPUs frequently.
61698 @@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
61699 struct vm_area_struct *next = vma->vm_next;
61700
61701 might_sleep();
61702 + BUG_ON(vma->vm_mirror);
61703 if (vma->vm_ops && vma->vm_ops->close)
61704 vma->vm_ops->close(vma);
61705 if (vma->vm_file) {
61706 @@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
61707 * not page aligned -Ram Gupta
61708 */
61709 rlim = rlimit(RLIMIT_DATA);
61710 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
61711 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
61712 (mm->end_data - mm->start_data) > rlim)
61713 goto out;
61714 @@ -697,6 +719,12 @@ static int
61715 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
61716 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61717 {
61718 +
61719 +#ifdef CONFIG_PAX_SEGMEXEC
61720 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
61721 + return 0;
61722 +#endif
61723 +
61724 if (is_mergeable_vma(vma, file, vm_flags) &&
61725 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61726 if (vma->vm_pgoff == vm_pgoff)
61727 @@ -716,6 +744,12 @@ static int
61728 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
61729 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61730 {
61731 +
61732 +#ifdef CONFIG_PAX_SEGMEXEC
61733 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
61734 + return 0;
61735 +#endif
61736 +
61737 if (is_mergeable_vma(vma, file, vm_flags) &&
61738 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61739 pgoff_t vm_pglen;
61740 @@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
61741 struct vm_area_struct *vma_merge(struct mm_struct *mm,
61742 struct vm_area_struct *prev, unsigned long addr,
61743 unsigned long end, unsigned long vm_flags,
61744 - struct anon_vma *anon_vma, struct file *file,
61745 + struct anon_vma *anon_vma, struct file *file,
61746 pgoff_t pgoff, struct mempolicy *policy)
61747 {
61748 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
61749 struct vm_area_struct *area, *next;
61750 int err;
61751
61752 +#ifdef CONFIG_PAX_SEGMEXEC
61753 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
61754 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
61755 +
61756 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
61757 +#endif
61758 +
61759 /*
61760 * We later require that vma->vm_flags == vm_flags,
61761 * so this tests vma->vm_flags & VM_SPECIAL, too.
61762 @@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
61763 if (next && next->vm_end == end) /* cases 6, 7, 8 */
61764 next = next->vm_next;
61765
61766 +#ifdef CONFIG_PAX_SEGMEXEC
61767 + if (prev)
61768 + prev_m = pax_find_mirror_vma(prev);
61769 + if (area)
61770 + area_m = pax_find_mirror_vma(area);
61771 + if (next)
61772 + next_m = pax_find_mirror_vma(next);
61773 +#endif
61774 +
61775 /*
61776 * Can it merge with the predecessor?
61777 */
61778 @@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
61779 /* cases 1, 6 */
61780 err = vma_adjust(prev, prev->vm_start,
61781 next->vm_end, prev->vm_pgoff, NULL);
61782 - } else /* cases 2, 5, 7 */
61783 +
61784 +#ifdef CONFIG_PAX_SEGMEXEC
61785 + if (!err && prev_m)
61786 + err = vma_adjust(prev_m, prev_m->vm_start,
61787 + next_m->vm_end, prev_m->vm_pgoff, NULL);
61788 +#endif
61789 +
61790 + } else { /* cases 2, 5, 7 */
61791 err = vma_adjust(prev, prev->vm_start,
61792 end, prev->vm_pgoff, NULL);
61793 +
61794 +#ifdef CONFIG_PAX_SEGMEXEC
61795 + if (!err && prev_m)
61796 + err = vma_adjust(prev_m, prev_m->vm_start,
61797 + end_m, prev_m->vm_pgoff, NULL);
61798 +#endif
61799 +
61800 + }
61801 if (err)
61802 return NULL;
61803 khugepaged_enter_vma_merge(prev);
61804 @@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
61805 mpol_equal(policy, vma_policy(next)) &&
61806 can_vma_merge_before(next, vm_flags,
61807 anon_vma, file, pgoff+pglen)) {
61808 - if (prev && addr < prev->vm_end) /* case 4 */
61809 + if (prev && addr < prev->vm_end) { /* case 4 */
61810 err = vma_adjust(prev, prev->vm_start,
61811 addr, prev->vm_pgoff, NULL);
61812 - else /* cases 3, 8 */
61813 +
61814 +#ifdef CONFIG_PAX_SEGMEXEC
61815 + if (!err && prev_m)
61816 + err = vma_adjust(prev_m, prev_m->vm_start,
61817 + addr_m, prev_m->vm_pgoff, NULL);
61818 +#endif
61819 +
61820 + } else { /* cases 3, 8 */
61821 err = vma_adjust(area, addr, next->vm_end,
61822 next->vm_pgoff - pglen, NULL);
61823 +
61824 +#ifdef CONFIG_PAX_SEGMEXEC
61825 + if (!err && area_m)
61826 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
61827 + next_m->vm_pgoff - pglen, NULL);
61828 +#endif
61829 +
61830 + }
61831 if (err)
61832 return NULL;
61833 khugepaged_enter_vma_merge(area);
61834 @@ -929,14 +1009,11 @@ none:
61835 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
61836 struct file *file, long pages)
61837 {
61838 - const unsigned long stack_flags
61839 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
61840 -
61841 if (file) {
61842 mm->shared_vm += pages;
61843 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
61844 mm->exec_vm += pages;
61845 - } else if (flags & stack_flags)
61846 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
61847 mm->stack_vm += pages;
61848 if (flags & (VM_RESERVED|VM_IO))
61849 mm->reserved_vm += pages;
61850 @@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
61851 * (the exception is when the underlying filesystem is noexec
61852 * mounted, in which case we dont add PROT_EXEC.)
61853 */
61854 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
61855 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
61856 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
61857 prot |= PROT_EXEC;
61858
61859 @@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
61860 /* Obtain the address to map to. we verify (or select) it and ensure
61861 * that it represents a valid section of the address space.
61862 */
61863 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
61864 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
61865 if (addr & ~PAGE_MASK)
61866 return addr;
61867
61868 @@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
61869 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
61870 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
61871
61872 +#ifdef CONFIG_PAX_MPROTECT
61873 + if (mm->pax_flags & MF_PAX_MPROTECT) {
61874 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
61875 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
61876 + gr_log_rwxmmap(file);
61877 +
61878 +#ifdef CONFIG_PAX_EMUPLT
61879 + vm_flags &= ~VM_EXEC;
61880 +#else
61881 + return -EPERM;
61882 +#endif
61883 +
61884 + }
61885 +
61886 + if (!(vm_flags & VM_EXEC))
61887 + vm_flags &= ~VM_MAYEXEC;
61888 +#else
61889 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
61890 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61891 +#endif
61892 + else
61893 + vm_flags &= ~VM_MAYWRITE;
61894 + }
61895 +#endif
61896 +
61897 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61898 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
61899 + vm_flags &= ~VM_PAGEEXEC;
61900 +#endif
61901 +
61902 if (flags & MAP_LOCKED)
61903 if (!can_do_mlock())
61904 return -EPERM;
61905 @@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
61906 locked += mm->locked_vm;
61907 lock_limit = rlimit(RLIMIT_MEMLOCK);
61908 lock_limit >>= PAGE_SHIFT;
61909 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
61910 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
61911 return -EAGAIN;
61912 }
61913 @@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
61914 if (error)
61915 return error;
61916
61917 + if (!gr_acl_handle_mmap(file, prot))
61918 + return -EACCES;
61919 +
61920 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
61921 }
61922 EXPORT_SYMBOL(do_mmap_pgoff);
61923 @@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
61924 vm_flags_t vm_flags = vma->vm_flags;
61925
61926 /* If it was private or non-writable, the write bit is already clear */
61927 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
61928 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
61929 return 0;
61930
61931 /* The backer wishes to know when pages are first written to? */
61932 @@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
61933 unsigned long charged = 0;
61934 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
61935
61936 +#ifdef CONFIG_PAX_SEGMEXEC
61937 + struct vm_area_struct *vma_m = NULL;
61938 +#endif
61939 +
61940 + /*
61941 + * mm->mmap_sem is required to protect against another thread
61942 + * changing the mappings in case we sleep.
61943 + */
61944 + verify_mm_writelocked(mm);
61945 +
61946 /* Clear old maps */
61947 error = -ENOMEM;
61948 -munmap_back:
61949 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
61950 if (vma && vma->vm_start < addr + len) {
61951 if (do_munmap(mm, addr, len))
61952 return -ENOMEM;
61953 - goto munmap_back;
61954 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
61955 + BUG_ON(vma && vma->vm_start < addr + len);
61956 }
61957
61958 /* Check against address space limit. */
61959 @@ -1266,6 +1387,16 @@ munmap_back:
61960 goto unacct_error;
61961 }
61962
61963 +#ifdef CONFIG_PAX_SEGMEXEC
61964 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
61965 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
61966 + if (!vma_m) {
61967 + error = -ENOMEM;
61968 + goto free_vma;
61969 + }
61970 + }
61971 +#endif
61972 +
61973 vma->vm_mm = mm;
61974 vma->vm_start = addr;
61975 vma->vm_end = addr + len;
61976 @@ -1289,6 +1420,19 @@ munmap_back:
61977 error = file->f_op->mmap(file, vma);
61978 if (error)
61979 goto unmap_and_free_vma;
61980 +
61981 +#ifdef CONFIG_PAX_SEGMEXEC
61982 + if (vma_m && (vm_flags & VM_EXECUTABLE))
61983 + added_exe_file_vma(mm);
61984 +#endif
61985 +
61986 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61987 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
61988 + vma->vm_flags |= VM_PAGEEXEC;
61989 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61990 + }
61991 +#endif
61992 +
61993 if (vm_flags & VM_EXECUTABLE)
61994 added_exe_file_vma(mm);
61995
61996 @@ -1324,6 +1468,11 @@ munmap_back:
61997 vma_link(mm, vma, prev, rb_link, rb_parent);
61998 file = vma->vm_file;
61999
62000 +#ifdef CONFIG_PAX_SEGMEXEC
62001 + if (vma_m)
62002 + BUG_ON(pax_mirror_vma(vma_m, vma));
62003 +#endif
62004 +
62005 /* Once vma denies write, undo our temporary denial count */
62006 if (correct_wcount)
62007 atomic_inc(&inode->i_writecount);
62008 @@ -1332,6 +1481,7 @@ out:
62009
62010 mm->total_vm += len >> PAGE_SHIFT;
62011 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62012 + track_exec_limit(mm, addr, addr + len, vm_flags);
62013 if (vm_flags & VM_LOCKED) {
62014 if (!mlock_vma_pages_range(vma, addr, addr + len))
62015 mm->locked_vm += (len >> PAGE_SHIFT);
62016 @@ -1349,6 +1499,12 @@ unmap_and_free_vma:
62017 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62018 charged = 0;
62019 free_vma:
62020 +
62021 +#ifdef CONFIG_PAX_SEGMEXEC
62022 + if (vma_m)
62023 + kmem_cache_free(vm_area_cachep, vma_m);
62024 +#endif
62025 +
62026 kmem_cache_free(vm_area_cachep, vma);
62027 unacct_error:
62028 if (charged)
62029 @@ -1356,6 +1512,44 @@ unacct_error:
62030 return error;
62031 }
62032
62033 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62034 +{
62035 + if (!vma) {
62036 +#ifdef CONFIG_STACK_GROWSUP
62037 + if (addr > sysctl_heap_stack_gap)
62038 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62039 + else
62040 + vma = find_vma(current->mm, 0);
62041 + if (vma && (vma->vm_flags & VM_GROWSUP))
62042 + return false;
62043 +#endif
62044 + return true;
62045 + }
62046 +
62047 + if (addr + len > vma->vm_start)
62048 + return false;
62049 +
62050 + if (vma->vm_flags & VM_GROWSDOWN)
62051 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62052 +#ifdef CONFIG_STACK_GROWSUP
62053 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62054 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62055 +#endif
62056 +
62057 + return true;
62058 +}
62059 +
62060 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62061 +{
62062 + if (vma->vm_start < len)
62063 + return -ENOMEM;
62064 + if (!(vma->vm_flags & VM_GROWSDOWN))
62065 + return vma->vm_start - len;
62066 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
62067 + return vma->vm_start - len - sysctl_heap_stack_gap;
62068 + return -ENOMEM;
62069 +}
62070 +
62071 /* Get an address range which is currently unmapped.
62072 * For shmat() with addr=0.
62073 *
62074 @@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
62075 if (flags & MAP_FIXED)
62076 return addr;
62077
62078 +#ifdef CONFIG_PAX_RANDMMAP
62079 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62080 +#endif
62081 +
62082 if (addr) {
62083 addr = PAGE_ALIGN(addr);
62084 - vma = find_vma(mm, addr);
62085 - if (TASK_SIZE - len >= addr &&
62086 - (!vma || addr + len <= vma->vm_start))
62087 - return addr;
62088 + if (TASK_SIZE - len >= addr) {
62089 + vma = find_vma(mm, addr);
62090 + if (check_heap_stack_gap(vma, addr, len))
62091 + return addr;
62092 + }
62093 }
62094 if (len > mm->cached_hole_size) {
62095 - start_addr = addr = mm->free_area_cache;
62096 + start_addr = addr = mm->free_area_cache;
62097 } else {
62098 - start_addr = addr = TASK_UNMAPPED_BASE;
62099 - mm->cached_hole_size = 0;
62100 + start_addr = addr = mm->mmap_base;
62101 + mm->cached_hole_size = 0;
62102 }
62103
62104 full_search:
62105 @@ -1404,34 +1603,40 @@ full_search:
62106 * Start a new search - just in case we missed
62107 * some holes.
62108 */
62109 - if (start_addr != TASK_UNMAPPED_BASE) {
62110 - addr = TASK_UNMAPPED_BASE;
62111 - start_addr = addr;
62112 + if (start_addr != mm->mmap_base) {
62113 + start_addr = addr = mm->mmap_base;
62114 mm->cached_hole_size = 0;
62115 goto full_search;
62116 }
62117 return -ENOMEM;
62118 }
62119 - if (!vma || addr + len <= vma->vm_start) {
62120 - /*
62121 - * Remember the place where we stopped the search:
62122 - */
62123 - mm->free_area_cache = addr + len;
62124 - return addr;
62125 - }
62126 + if (check_heap_stack_gap(vma, addr, len))
62127 + break;
62128 if (addr + mm->cached_hole_size < vma->vm_start)
62129 mm->cached_hole_size = vma->vm_start - addr;
62130 addr = vma->vm_end;
62131 }
62132 +
62133 + /*
62134 + * Remember the place where we stopped the search:
62135 + */
62136 + mm->free_area_cache = addr + len;
62137 + return addr;
62138 }
62139 #endif
62140
62141 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62142 {
62143 +
62144 +#ifdef CONFIG_PAX_SEGMEXEC
62145 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62146 + return;
62147 +#endif
62148 +
62149 /*
62150 * Is this a new hole at the lowest possible address?
62151 */
62152 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62153 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62154 mm->free_area_cache = addr;
62155 mm->cached_hole_size = ~0UL;
62156 }
62157 @@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
62158 {
62159 struct vm_area_struct *vma;
62160 struct mm_struct *mm = current->mm;
62161 - unsigned long addr = addr0;
62162 + unsigned long base = mm->mmap_base, addr = addr0;
62163
62164 /* requested length too big for entire address space */
62165 if (len > TASK_SIZE)
62166 @@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
62167 if (flags & MAP_FIXED)
62168 return addr;
62169
62170 +#ifdef CONFIG_PAX_RANDMMAP
62171 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62172 +#endif
62173 +
62174 /* requesting a specific address */
62175 if (addr) {
62176 addr = PAGE_ALIGN(addr);
62177 - vma = find_vma(mm, addr);
62178 - if (TASK_SIZE - len >= addr &&
62179 - (!vma || addr + len <= vma->vm_start))
62180 - return addr;
62181 + if (TASK_SIZE - len >= addr) {
62182 + vma = find_vma(mm, addr);
62183 + if (check_heap_stack_gap(vma, addr, len))
62184 + return addr;
62185 + }
62186 }
62187
62188 /* check if free_area_cache is useful for us */
62189 @@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
62190 /* make sure it can fit in the remaining address space */
62191 if (addr > len) {
62192 vma = find_vma(mm, addr-len);
62193 - if (!vma || addr <= vma->vm_start)
62194 + if (check_heap_stack_gap(vma, addr - len, len))
62195 /* remember the address as a hint for next time */
62196 return (mm->free_area_cache = addr-len);
62197 }
62198 @@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
62199 * return with success:
62200 */
62201 vma = find_vma(mm, addr);
62202 - if (!vma || addr+len <= vma->vm_start)
62203 + if (check_heap_stack_gap(vma, addr, len))
62204 /* remember the address as a hint for next time */
62205 return (mm->free_area_cache = addr);
62206
62207 @@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
62208 mm->cached_hole_size = vma->vm_start - addr;
62209
62210 /* try just below the current vma->vm_start */
62211 - addr = vma->vm_start-len;
62212 - } while (len < vma->vm_start);
62213 + addr = skip_heap_stack_gap(vma, len);
62214 + } while (!IS_ERR_VALUE(addr));
62215
62216 bottomup:
62217 /*
62218 @@ -1515,13 +1725,21 @@ bottomup:
62219 * can happen with large stack limits and large mmap()
62220 * allocations.
62221 */
62222 + mm->mmap_base = TASK_UNMAPPED_BASE;
62223 +
62224 +#ifdef CONFIG_PAX_RANDMMAP
62225 + if (mm->pax_flags & MF_PAX_RANDMMAP)
62226 + mm->mmap_base += mm->delta_mmap;
62227 +#endif
62228 +
62229 + mm->free_area_cache = mm->mmap_base;
62230 mm->cached_hole_size = ~0UL;
62231 - mm->free_area_cache = TASK_UNMAPPED_BASE;
62232 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62233 /*
62234 * Restore the topdown base:
62235 */
62236 - mm->free_area_cache = mm->mmap_base;
62237 + mm->mmap_base = base;
62238 + mm->free_area_cache = base;
62239 mm->cached_hole_size = ~0UL;
62240
62241 return addr;
62242 @@ -1530,6 +1748,12 @@ bottomup:
62243
62244 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62245 {
62246 +
62247 +#ifdef CONFIG_PAX_SEGMEXEC
62248 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62249 + return;
62250 +#endif
62251 +
62252 /*
62253 * Is this a new hole at the highest possible address?
62254 */
62255 @@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
62256 mm->free_area_cache = addr;
62257
62258 /* dont allow allocations above current base */
62259 - if (mm->free_area_cache > mm->mmap_base)
62260 + if (mm->free_area_cache > mm->mmap_base) {
62261 mm->free_area_cache = mm->mmap_base;
62262 + mm->cached_hole_size = ~0UL;
62263 + }
62264 }
62265
62266 unsigned long
62267 @@ -1646,6 +1872,28 @@ out:
62268 return prev ? prev->vm_next : vma;
62269 }
62270
62271 +#ifdef CONFIG_PAX_SEGMEXEC
62272 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62273 +{
62274 + struct vm_area_struct *vma_m;
62275 +
62276 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62277 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62278 + BUG_ON(vma->vm_mirror);
62279 + return NULL;
62280 + }
62281 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62282 + vma_m = vma->vm_mirror;
62283 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62284 + BUG_ON(vma->vm_file != vma_m->vm_file);
62285 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62286 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62287 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62288 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62289 + return vma_m;
62290 +}
62291 +#endif
62292 +
62293 /*
62294 * Verify that the stack growth is acceptable and
62295 * update accounting. This is shared with both the
62296 @@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
62297 return -ENOMEM;
62298
62299 /* Stack limit test */
62300 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
62301 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62302 return -ENOMEM;
62303
62304 @@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
62305 locked = mm->locked_vm + grow;
62306 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62307 limit >>= PAGE_SHIFT;
62308 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62309 if (locked > limit && !capable(CAP_IPC_LOCK))
62310 return -ENOMEM;
62311 }
62312 @@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
62313 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62314 * vma is the last one with address > vma->vm_end. Have to extend vma.
62315 */
62316 +#ifndef CONFIG_IA64
62317 +static
62318 +#endif
62319 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62320 {
62321 int error;
62322 + bool locknext;
62323
62324 if (!(vma->vm_flags & VM_GROWSUP))
62325 return -EFAULT;
62326
62327 + /* Also guard against wrapping around to address 0. */
62328 + if (address < PAGE_ALIGN(address+1))
62329 + address = PAGE_ALIGN(address+1);
62330 + else
62331 + return -ENOMEM;
62332 +
62333 /*
62334 * We must make sure the anon_vma is allocated
62335 * so that the anon_vma locking is not a noop.
62336 */
62337 if (unlikely(anon_vma_prepare(vma)))
62338 return -ENOMEM;
62339 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62340 + if (locknext && anon_vma_prepare(vma->vm_next))
62341 + return -ENOMEM;
62342 vma_lock_anon_vma(vma);
62343 + if (locknext)
62344 + vma_lock_anon_vma(vma->vm_next);
62345
62346 /*
62347 * vma->vm_start/vm_end cannot change under us because the caller
62348 * is required to hold the mmap_sem in read mode. We need the
62349 - * anon_vma lock to serialize against concurrent expand_stacks.
62350 - * Also guard against wrapping around to address 0.
62351 + * anon_vma locks to serialize against concurrent expand_stacks
62352 + * and expand_upwards.
62353 */
62354 - if (address < PAGE_ALIGN(address+4))
62355 - address = PAGE_ALIGN(address+4);
62356 - else {
62357 - vma_unlock_anon_vma(vma);
62358 - return -ENOMEM;
62359 - }
62360 error = 0;
62361
62362 /* Somebody else might have raced and expanded it already */
62363 - if (address > vma->vm_end) {
62364 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62365 + error = -ENOMEM;
62366 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62367 unsigned long size, grow;
62368
62369 size = address - vma->vm_start;
62370 @@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
62371 }
62372 }
62373 }
62374 + if (locknext)
62375 + vma_unlock_anon_vma(vma->vm_next);
62376 vma_unlock_anon_vma(vma);
62377 khugepaged_enter_vma_merge(vma);
62378 return error;
62379 @@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
62380 unsigned long address)
62381 {
62382 int error;
62383 + bool lockprev = false;
62384 + struct vm_area_struct *prev;
62385
62386 /*
62387 * We must make sure the anon_vma is allocated
62388 @@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
62389 if (error)
62390 return error;
62391
62392 + prev = vma->vm_prev;
62393 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62394 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62395 +#endif
62396 + if (lockprev && anon_vma_prepare(prev))
62397 + return -ENOMEM;
62398 + if (lockprev)
62399 + vma_lock_anon_vma(prev);
62400 +
62401 vma_lock_anon_vma(vma);
62402
62403 /*
62404 @@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
62405 */
62406
62407 /* Somebody else might have raced and expanded it already */
62408 - if (address < vma->vm_start) {
62409 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62410 + error = -ENOMEM;
62411 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62412 unsigned long size, grow;
62413
62414 +#ifdef CONFIG_PAX_SEGMEXEC
62415 + struct vm_area_struct *vma_m;
62416 +
62417 + vma_m = pax_find_mirror_vma(vma);
62418 +#endif
62419 +
62420 size = vma->vm_end - address;
62421 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62422
62423 @@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
62424 if (!error) {
62425 vma->vm_start = address;
62426 vma->vm_pgoff -= grow;
62427 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62428 +
62429 +#ifdef CONFIG_PAX_SEGMEXEC
62430 + if (vma_m) {
62431 + vma_m->vm_start -= grow << PAGE_SHIFT;
62432 + vma_m->vm_pgoff -= grow;
62433 + }
62434 +#endif
62435 +
62436 perf_event_mmap(vma);
62437 }
62438 }
62439 }
62440 vma_unlock_anon_vma(vma);
62441 + if (lockprev)
62442 + vma_unlock_anon_vma(prev);
62443 khugepaged_enter_vma_merge(vma);
62444 return error;
62445 }
62446 @@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
62447 do {
62448 long nrpages = vma_pages(vma);
62449
62450 +#ifdef CONFIG_PAX_SEGMEXEC
62451 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62452 + vma = remove_vma(vma);
62453 + continue;
62454 + }
62455 +#endif
62456 +
62457 mm->total_vm -= nrpages;
62458 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62459 vma = remove_vma(vma);
62460 @@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62461 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62462 vma->vm_prev = NULL;
62463 do {
62464 +
62465 +#ifdef CONFIG_PAX_SEGMEXEC
62466 + if (vma->vm_mirror) {
62467 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62468 + vma->vm_mirror->vm_mirror = NULL;
62469 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
62470 + vma->vm_mirror = NULL;
62471 + }
62472 +#endif
62473 +
62474 rb_erase(&vma->vm_rb, &mm->mm_rb);
62475 mm->map_count--;
62476 tail_vma = vma;
62477 @@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
62478 struct vm_area_struct *new;
62479 int err = -ENOMEM;
62480
62481 +#ifdef CONFIG_PAX_SEGMEXEC
62482 + struct vm_area_struct *vma_m, *new_m = NULL;
62483 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62484 +#endif
62485 +
62486 if (is_vm_hugetlb_page(vma) && (addr &
62487 ~(huge_page_mask(hstate_vma(vma)))))
62488 return -EINVAL;
62489
62490 +#ifdef CONFIG_PAX_SEGMEXEC
62491 + vma_m = pax_find_mirror_vma(vma);
62492 +#endif
62493 +
62494 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62495 if (!new)
62496 goto out_err;
62497
62498 +#ifdef CONFIG_PAX_SEGMEXEC
62499 + if (vma_m) {
62500 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62501 + if (!new_m) {
62502 + kmem_cache_free(vm_area_cachep, new);
62503 + goto out_err;
62504 + }
62505 + }
62506 +#endif
62507 +
62508 /* most fields are the same, copy all, and then fixup */
62509 *new = *vma;
62510
62511 @@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
62512 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62513 }
62514
62515 +#ifdef CONFIG_PAX_SEGMEXEC
62516 + if (vma_m) {
62517 + *new_m = *vma_m;
62518 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
62519 + new_m->vm_mirror = new;
62520 + new->vm_mirror = new_m;
62521 +
62522 + if (new_below)
62523 + new_m->vm_end = addr_m;
62524 + else {
62525 + new_m->vm_start = addr_m;
62526 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62527 + }
62528 + }
62529 +#endif
62530 +
62531 pol = mpol_dup(vma_policy(vma));
62532 if (IS_ERR(pol)) {
62533 err = PTR_ERR(pol);
62534 @@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
62535 else
62536 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62537
62538 +#ifdef CONFIG_PAX_SEGMEXEC
62539 + if (!err && vma_m) {
62540 + if (anon_vma_clone(new_m, vma_m))
62541 + goto out_free_mpol;
62542 +
62543 + mpol_get(pol);
62544 + vma_set_policy(new_m, pol);
62545 +
62546 + if (new_m->vm_file) {
62547 + get_file(new_m->vm_file);
62548 + if (vma_m->vm_flags & VM_EXECUTABLE)
62549 + added_exe_file_vma(mm);
62550 + }
62551 +
62552 + if (new_m->vm_ops && new_m->vm_ops->open)
62553 + new_m->vm_ops->open(new_m);
62554 +
62555 + if (new_below)
62556 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62557 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62558 + else
62559 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62560 +
62561 + if (err) {
62562 + if (new_m->vm_ops && new_m->vm_ops->close)
62563 + new_m->vm_ops->close(new_m);
62564 + if (new_m->vm_file) {
62565 + if (vma_m->vm_flags & VM_EXECUTABLE)
62566 + removed_exe_file_vma(mm);
62567 + fput(new_m->vm_file);
62568 + }
62569 + mpol_put(pol);
62570 + }
62571 + }
62572 +#endif
62573 +
62574 /* Success. */
62575 if (!err)
62576 return 0;
62577 @@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
62578 removed_exe_file_vma(mm);
62579 fput(new->vm_file);
62580 }
62581 - unlink_anon_vmas(new);
62582 out_free_mpol:
62583 mpol_put(pol);
62584 out_free_vma:
62585 +
62586 +#ifdef CONFIG_PAX_SEGMEXEC
62587 + if (new_m) {
62588 + unlink_anon_vmas(new_m);
62589 + kmem_cache_free(vm_area_cachep, new_m);
62590 + }
62591 +#endif
62592 +
62593 + unlink_anon_vmas(new);
62594 kmem_cache_free(vm_area_cachep, new);
62595 out_err:
62596 return err;
62597 @@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
62598 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62599 unsigned long addr, int new_below)
62600 {
62601 +
62602 +#ifdef CONFIG_PAX_SEGMEXEC
62603 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62604 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62605 + if (mm->map_count >= sysctl_max_map_count-1)
62606 + return -ENOMEM;
62607 + } else
62608 +#endif
62609 +
62610 if (mm->map_count >= sysctl_max_map_count)
62611 return -ENOMEM;
62612
62613 @@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
62614 * work. This now handles partial unmappings.
62615 * Jeremy Fitzhardinge <jeremy@goop.org>
62616 */
62617 +#ifdef CONFIG_PAX_SEGMEXEC
62618 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62619 {
62620 + int ret = __do_munmap(mm, start, len);
62621 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62622 + return ret;
62623 +
62624 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62625 +}
62626 +
62627 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62628 +#else
62629 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62630 +#endif
62631 +{
62632 unsigned long end;
62633 struct vm_area_struct *vma, *prev, *last;
62634
62635 + /*
62636 + * mm->mmap_sem is required to protect against another thread
62637 + * changing the mappings in case we sleep.
62638 + */
62639 + verify_mm_writelocked(mm);
62640 +
62641 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
62642 return -EINVAL;
62643
62644 @@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
62645 /* Fix up all other VM information */
62646 remove_vma_list(mm, vma);
62647
62648 + track_exec_limit(mm, start, end, 0UL);
62649 +
62650 return 0;
62651 }
62652
62653 @@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
62654
62655 profile_munmap(addr);
62656
62657 +#ifdef CONFIG_PAX_SEGMEXEC
62658 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
62659 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
62660 + return -EINVAL;
62661 +#endif
62662 +
62663 down_write(&mm->mmap_sem);
62664 ret = do_munmap(mm, addr, len);
62665 up_write(&mm->mmap_sem);
62666 return ret;
62667 }
62668
62669 -static inline void verify_mm_writelocked(struct mm_struct *mm)
62670 -{
62671 -#ifdef CONFIG_DEBUG_VM
62672 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62673 - WARN_ON(1);
62674 - up_read(&mm->mmap_sem);
62675 - }
62676 -#endif
62677 -}
62678 -
62679 /*
62680 * this is really a simplified "do_mmap". it only handles
62681 * anonymous maps. eventually we may be able to do some
62682 @@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
62683 struct rb_node ** rb_link, * rb_parent;
62684 pgoff_t pgoff = addr >> PAGE_SHIFT;
62685 int error;
62686 + unsigned long charged;
62687
62688 len = PAGE_ALIGN(len);
62689 if (!len)
62690 @@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
62691
62692 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
62693
62694 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62695 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62696 + flags &= ~VM_EXEC;
62697 +
62698 +#ifdef CONFIG_PAX_MPROTECT
62699 + if (mm->pax_flags & MF_PAX_MPROTECT)
62700 + flags &= ~VM_MAYEXEC;
62701 +#endif
62702 +
62703 + }
62704 +#endif
62705 +
62706 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
62707 if (error & ~PAGE_MASK)
62708 return error;
62709
62710 + charged = len >> PAGE_SHIFT;
62711 +
62712 /*
62713 * mlock MCL_FUTURE?
62714 */
62715 if (mm->def_flags & VM_LOCKED) {
62716 unsigned long locked, lock_limit;
62717 - locked = len >> PAGE_SHIFT;
62718 + locked = charged;
62719 locked += mm->locked_vm;
62720 lock_limit = rlimit(RLIMIT_MEMLOCK);
62721 lock_limit >>= PAGE_SHIFT;
62722 @@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
62723 /*
62724 * Clear old maps. this also does some error checking for us
62725 */
62726 - munmap_back:
62727 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62728 if (vma && vma->vm_start < addr + len) {
62729 if (do_munmap(mm, addr, len))
62730 return -ENOMEM;
62731 - goto munmap_back;
62732 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62733 + BUG_ON(vma && vma->vm_start < addr + len);
62734 }
62735
62736 /* Check against address space limits *after* clearing old maps... */
62737 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
62738 + if (!may_expand_vm(mm, charged))
62739 return -ENOMEM;
62740
62741 if (mm->map_count > sysctl_max_map_count)
62742 return -ENOMEM;
62743
62744 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
62745 + if (security_vm_enough_memory(charged))
62746 return -ENOMEM;
62747
62748 /* Can we just expand an old private anonymous mapping? */
62749 @@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
62750 */
62751 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62752 if (!vma) {
62753 - vm_unacct_memory(len >> PAGE_SHIFT);
62754 + vm_unacct_memory(charged);
62755 return -ENOMEM;
62756 }
62757
62758 @@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
62759 vma_link(mm, vma, prev, rb_link, rb_parent);
62760 out:
62761 perf_event_mmap(vma);
62762 - mm->total_vm += len >> PAGE_SHIFT;
62763 + mm->total_vm += charged;
62764 if (flags & VM_LOCKED) {
62765 if (!mlock_vma_pages_range(vma, addr, addr + len))
62766 - mm->locked_vm += (len >> PAGE_SHIFT);
62767 + mm->locked_vm += charged;
62768 }
62769 + track_exec_limit(mm, addr, addr + len, flags);
62770 return addr;
62771 }
62772
62773 @@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
62774 * Walk the list again, actually closing and freeing it,
62775 * with preemption enabled, without holding any MM locks.
62776 */
62777 - while (vma)
62778 + while (vma) {
62779 + vma->vm_mirror = NULL;
62780 vma = remove_vma(vma);
62781 + }
62782
62783 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
62784 }
62785 @@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
62786 struct vm_area_struct * __vma, * prev;
62787 struct rb_node ** rb_link, * rb_parent;
62788
62789 +#ifdef CONFIG_PAX_SEGMEXEC
62790 + struct vm_area_struct *vma_m = NULL;
62791 +#endif
62792 +
62793 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
62794 + return -EPERM;
62795 +
62796 /*
62797 * The vm_pgoff of a purely anonymous vma should be irrelevant
62798 * until its first write fault, when page's anon_vma and index
62799 @@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
62800 if ((vma->vm_flags & VM_ACCOUNT) &&
62801 security_vm_enough_memory_mm(mm, vma_pages(vma)))
62802 return -ENOMEM;
62803 +
62804 +#ifdef CONFIG_PAX_SEGMEXEC
62805 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
62806 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62807 + if (!vma_m)
62808 + return -ENOMEM;
62809 + }
62810 +#endif
62811 +
62812 vma_link(mm, vma, prev, rb_link, rb_parent);
62813 +
62814 +#ifdef CONFIG_PAX_SEGMEXEC
62815 + if (vma_m)
62816 + BUG_ON(pax_mirror_vma(vma_m, vma));
62817 +#endif
62818 +
62819 return 0;
62820 }
62821
62822 @@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
62823 struct rb_node **rb_link, *rb_parent;
62824 struct mempolicy *pol;
62825
62826 + BUG_ON(vma->vm_mirror);
62827 +
62828 /*
62829 * If anonymous vma has not yet been faulted, update new pgoff
62830 * to match new location, to increase its chance of merging.
62831 @@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
62832 return NULL;
62833 }
62834
62835 +#ifdef CONFIG_PAX_SEGMEXEC
62836 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
62837 +{
62838 + struct vm_area_struct *prev_m;
62839 + struct rb_node **rb_link_m, *rb_parent_m;
62840 + struct mempolicy *pol_m;
62841 +
62842 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
62843 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
62844 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
62845 + *vma_m = *vma;
62846 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
62847 + if (anon_vma_clone(vma_m, vma))
62848 + return -ENOMEM;
62849 + pol_m = vma_policy(vma_m);
62850 + mpol_get(pol_m);
62851 + vma_set_policy(vma_m, pol_m);
62852 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
62853 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
62854 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
62855 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
62856 + if (vma_m->vm_file)
62857 + get_file(vma_m->vm_file);
62858 + if (vma_m->vm_ops && vma_m->vm_ops->open)
62859 + vma_m->vm_ops->open(vma_m);
62860 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
62861 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
62862 + vma_m->vm_mirror = vma;
62863 + vma->vm_mirror = vma_m;
62864 + return 0;
62865 +}
62866 +#endif
62867 +
62868 /*
62869 * Return true if the calling process may expand its vm space by the passed
62870 * number of pages
62871 @@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
62872 unsigned long lim;
62873
62874 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
62875 -
62876 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
62877 if (cur + npages > lim)
62878 return 0;
62879 return 1;
62880 @@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
62881 vma->vm_start = addr;
62882 vma->vm_end = addr + len;
62883
62884 +#ifdef CONFIG_PAX_MPROTECT
62885 + if (mm->pax_flags & MF_PAX_MPROTECT) {
62886 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
62887 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
62888 + return -EPERM;
62889 + if (!(vm_flags & VM_EXEC))
62890 + vm_flags &= ~VM_MAYEXEC;
62891 +#else
62892 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62893 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62894 +#endif
62895 + else
62896 + vm_flags &= ~VM_MAYWRITE;
62897 + }
62898 +#endif
62899 +
62900 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
62901 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62902
62903 diff -urNp linux-3.0.4/mm/mprotect.c linux-3.0.4/mm/mprotect.c
62904 --- linux-3.0.4/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
62905 +++ linux-3.0.4/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
62906 @@ -23,10 +23,16 @@
62907 #include <linux/mmu_notifier.h>
62908 #include <linux/migrate.h>
62909 #include <linux/perf_event.h>
62910 +
62911 +#ifdef CONFIG_PAX_MPROTECT
62912 +#include <linux/elf.h>
62913 +#endif
62914 +
62915 #include <asm/uaccess.h>
62916 #include <asm/pgtable.h>
62917 #include <asm/cacheflush.h>
62918 #include <asm/tlbflush.h>
62919 +#include <asm/mmu_context.h>
62920
62921 #ifndef pgprot_modify
62922 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
62923 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
62924 flush_tlb_range(vma, start, end);
62925 }
62926
62927 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62928 +/* called while holding the mmap semaphor for writing except stack expansion */
62929 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
62930 +{
62931 + unsigned long oldlimit, newlimit = 0UL;
62932 +
62933 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
62934 + return;
62935 +
62936 + spin_lock(&mm->page_table_lock);
62937 + oldlimit = mm->context.user_cs_limit;
62938 + if ((prot & VM_EXEC) && oldlimit < end)
62939 + /* USER_CS limit moved up */
62940 + newlimit = end;
62941 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
62942 + /* USER_CS limit moved down */
62943 + newlimit = start;
62944 +
62945 + if (newlimit) {
62946 + mm->context.user_cs_limit = newlimit;
62947 +
62948 +#ifdef CONFIG_SMP
62949 + wmb();
62950 + cpus_clear(mm->context.cpu_user_cs_mask);
62951 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
62952 +#endif
62953 +
62954 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
62955 + }
62956 + spin_unlock(&mm->page_table_lock);
62957 + if (newlimit == end) {
62958 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
62959 +
62960 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
62961 + if (is_vm_hugetlb_page(vma))
62962 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
62963 + else
62964 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
62965 + }
62966 +}
62967 +#endif
62968 +
62969 int
62970 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
62971 unsigned long start, unsigned long end, unsigned long newflags)
62972 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
62973 int error;
62974 int dirty_accountable = 0;
62975
62976 +#ifdef CONFIG_PAX_SEGMEXEC
62977 + struct vm_area_struct *vma_m = NULL;
62978 + unsigned long start_m, end_m;
62979 +
62980 + start_m = start + SEGMEXEC_TASK_SIZE;
62981 + end_m = end + SEGMEXEC_TASK_SIZE;
62982 +#endif
62983 +
62984 if (newflags == oldflags) {
62985 *pprev = vma;
62986 return 0;
62987 }
62988
62989 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
62990 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
62991 +
62992 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
62993 + return -ENOMEM;
62994 +
62995 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
62996 + return -ENOMEM;
62997 + }
62998 +
62999 /*
63000 * If we make a private mapping writable we increase our commit;
63001 * but (without finer accounting) cannot reduce our commit if we
63002 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63003 }
63004 }
63005
63006 +#ifdef CONFIG_PAX_SEGMEXEC
63007 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63008 + if (start != vma->vm_start) {
63009 + error = split_vma(mm, vma, start, 1);
63010 + if (error)
63011 + goto fail;
63012 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63013 + *pprev = (*pprev)->vm_next;
63014 + }
63015 +
63016 + if (end != vma->vm_end) {
63017 + error = split_vma(mm, vma, end, 0);
63018 + if (error)
63019 + goto fail;
63020 + }
63021 +
63022 + if (pax_find_mirror_vma(vma)) {
63023 + error = __do_munmap(mm, start_m, end_m - start_m);
63024 + if (error)
63025 + goto fail;
63026 + } else {
63027 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63028 + if (!vma_m) {
63029 + error = -ENOMEM;
63030 + goto fail;
63031 + }
63032 + vma->vm_flags = newflags;
63033 + error = pax_mirror_vma(vma_m, vma);
63034 + if (error) {
63035 + vma->vm_flags = oldflags;
63036 + goto fail;
63037 + }
63038 + }
63039 + }
63040 +#endif
63041 +
63042 /*
63043 * First try to merge with previous and/or next vma.
63044 */
63045 @@ -204,9 +306,21 @@ success:
63046 * vm_flags and vm_page_prot are protected by the mmap_sem
63047 * held in write mode.
63048 */
63049 +
63050 +#ifdef CONFIG_PAX_SEGMEXEC
63051 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63052 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63053 +#endif
63054 +
63055 vma->vm_flags = newflags;
63056 +
63057 +#ifdef CONFIG_PAX_MPROTECT
63058 + if (mm->binfmt && mm->binfmt->handle_mprotect)
63059 + mm->binfmt->handle_mprotect(vma, newflags);
63060 +#endif
63061 +
63062 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63063 - vm_get_page_prot(newflags));
63064 + vm_get_page_prot(vma->vm_flags));
63065
63066 if (vma_wants_writenotify(vma)) {
63067 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63068 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63069 end = start + len;
63070 if (end <= start)
63071 return -ENOMEM;
63072 +
63073 +#ifdef CONFIG_PAX_SEGMEXEC
63074 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63075 + if (end > SEGMEXEC_TASK_SIZE)
63076 + return -EINVAL;
63077 + } else
63078 +#endif
63079 +
63080 + if (end > TASK_SIZE)
63081 + return -EINVAL;
63082 +
63083 if (!arch_validate_prot(prot))
63084 return -EINVAL;
63085
63086 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63087 /*
63088 * Does the application expect PROT_READ to imply PROT_EXEC:
63089 */
63090 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63091 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63092 prot |= PROT_EXEC;
63093
63094 vm_flags = calc_vm_prot_bits(prot);
63095 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63096 if (start > vma->vm_start)
63097 prev = vma;
63098
63099 +#ifdef CONFIG_PAX_MPROTECT
63100 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63101 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
63102 +#endif
63103 +
63104 for (nstart = start ; ; ) {
63105 unsigned long newflags;
63106
63107 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63108
63109 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63110 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63111 + if (prot & (PROT_WRITE | PROT_EXEC))
63112 + gr_log_rwxmprotect(vma->vm_file);
63113 +
63114 + error = -EACCES;
63115 + goto out;
63116 + }
63117 +
63118 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63119 error = -EACCES;
63120 goto out;
63121 }
63122 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63123 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63124 if (error)
63125 goto out;
63126 +
63127 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
63128 +
63129 nstart = tmp;
63130
63131 if (nstart < prev->vm_end)
63132 diff -urNp linux-3.0.4/mm/mremap.c linux-3.0.4/mm/mremap.c
63133 --- linux-3.0.4/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
63134 +++ linux-3.0.4/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
63135 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
63136 continue;
63137 pte = ptep_clear_flush(vma, old_addr, old_pte);
63138 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63139 +
63140 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63141 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63142 + pte = pte_exprotect(pte);
63143 +#endif
63144 +
63145 set_pte_at(mm, new_addr, new_pte, pte);
63146 }
63147
63148 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
63149 if (is_vm_hugetlb_page(vma))
63150 goto Einval;
63151
63152 +#ifdef CONFIG_PAX_SEGMEXEC
63153 + if (pax_find_mirror_vma(vma))
63154 + goto Einval;
63155 +#endif
63156 +
63157 /* We can't remap across vm area boundaries */
63158 if (old_len > vma->vm_end - addr)
63159 goto Efault;
63160 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
63161 unsigned long ret = -EINVAL;
63162 unsigned long charged = 0;
63163 unsigned long map_flags;
63164 + unsigned long pax_task_size = TASK_SIZE;
63165
63166 if (new_addr & ~PAGE_MASK)
63167 goto out;
63168
63169 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63170 +#ifdef CONFIG_PAX_SEGMEXEC
63171 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63172 + pax_task_size = SEGMEXEC_TASK_SIZE;
63173 +#endif
63174 +
63175 + pax_task_size -= PAGE_SIZE;
63176 +
63177 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63178 goto out;
63179
63180 /* Check if the location we're moving into overlaps the
63181 * old location at all, and fail if it does.
63182 */
63183 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
63184 - goto out;
63185 -
63186 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
63187 + if (addr + old_len > new_addr && new_addr + new_len > addr)
63188 goto out;
63189
63190 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63191 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
63192 struct vm_area_struct *vma;
63193 unsigned long ret = -EINVAL;
63194 unsigned long charged = 0;
63195 + unsigned long pax_task_size = TASK_SIZE;
63196
63197 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63198 goto out;
63199 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
63200 if (!new_len)
63201 goto out;
63202
63203 +#ifdef CONFIG_PAX_SEGMEXEC
63204 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63205 + pax_task_size = SEGMEXEC_TASK_SIZE;
63206 +#endif
63207 +
63208 + pax_task_size -= PAGE_SIZE;
63209 +
63210 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63211 + old_len > pax_task_size || addr > pax_task_size-old_len)
63212 + goto out;
63213 +
63214 if (flags & MREMAP_FIXED) {
63215 if (flags & MREMAP_MAYMOVE)
63216 ret = mremap_to(addr, old_len, new_addr, new_len);
63217 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
63218 addr + new_len);
63219 }
63220 ret = addr;
63221 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63222 goto out;
63223 }
63224 }
63225 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
63226 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63227 if (ret)
63228 goto out;
63229 +
63230 + map_flags = vma->vm_flags;
63231 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63232 + if (!(ret & ~PAGE_MASK)) {
63233 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63234 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63235 + }
63236 }
63237 out:
63238 if (ret & ~PAGE_MASK)
63239 diff -urNp linux-3.0.4/mm/nobootmem.c linux-3.0.4/mm/nobootmem.c
63240 --- linux-3.0.4/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
63241 +++ linux-3.0.4/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
63242 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63243 unsigned long __init free_all_memory_core_early(int nodeid)
63244 {
63245 int i;
63246 - u64 start, end;
63247 + u64 start, end, startrange, endrange;
63248 unsigned long count = 0;
63249 - struct range *range = NULL;
63250 + struct range *range = NULL, rangerange = { 0, 0 };
63251 int nr_range;
63252
63253 nr_range = get_free_all_memory_range(&range, nodeid);
63254 + startrange = __pa(range) >> PAGE_SHIFT;
63255 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63256
63257 for (i = 0; i < nr_range; i++) {
63258 start = range[i].start;
63259 end = range[i].end;
63260 + if (start <= endrange && startrange < end) {
63261 + BUG_ON(rangerange.start | rangerange.end);
63262 + rangerange = range[i];
63263 + continue;
63264 + }
63265 count += end - start;
63266 __free_pages_memory(start, end);
63267 }
63268 + start = rangerange.start;
63269 + end = rangerange.end;
63270 + count += end - start;
63271 + __free_pages_memory(start, end);
63272
63273 return count;
63274 }
63275 diff -urNp linux-3.0.4/mm/nommu.c linux-3.0.4/mm/nommu.c
63276 --- linux-3.0.4/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
63277 +++ linux-3.0.4/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
63278 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63279 int sysctl_overcommit_ratio = 50; /* default is 50% */
63280 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63281 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63282 -int heap_stack_gap = 0;
63283
63284 atomic_long_t mmap_pages_allocated;
63285
63286 @@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
63287 EXPORT_SYMBOL(find_vma);
63288
63289 /*
63290 - * find a VMA
63291 - * - we don't extend stack VMAs under NOMMU conditions
63292 - */
63293 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63294 -{
63295 - return find_vma(mm, addr);
63296 -}
63297 -
63298 -/*
63299 * expand a stack to a given address
63300 * - not supported under NOMMU conditions
63301 */
63302 @@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
63303
63304 /* most fields are the same, copy all, and then fixup */
63305 *new = *vma;
63306 + INIT_LIST_HEAD(&new->anon_vma_chain);
63307 *region = *vma->vm_region;
63308 new->vm_region = region;
63309
63310 diff -urNp linux-3.0.4/mm/page_alloc.c linux-3.0.4/mm/page_alloc.c
63311 --- linux-3.0.4/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
63312 +++ linux-3.0.4/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
63313 @@ -340,7 +340,7 @@ out:
63314 * This usage means that zero-order pages may not be compound.
63315 */
63316
63317 -static void free_compound_page(struct page *page)
63318 +void free_compound_page(struct page *page)
63319 {
63320 __free_pages_ok(page, compound_order(page));
63321 }
63322 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
63323 int i;
63324 int bad = 0;
63325
63326 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63327 + unsigned long index = 1UL << order;
63328 +#endif
63329 +
63330 trace_mm_page_free_direct(page, order);
63331 kmemcheck_free_shadow(page, order);
63332
63333 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
63334 debug_check_no_obj_freed(page_address(page),
63335 PAGE_SIZE << order);
63336 }
63337 +
63338 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63339 + for (; index; --index)
63340 + sanitize_highpage(page + index - 1);
63341 +#endif
63342 +
63343 arch_free_page(page, order);
63344 kernel_map_pages(page, 1 << order, 0);
63345
63346 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
63347 arch_alloc_page(page, order);
63348 kernel_map_pages(page, 1 << order, 1);
63349
63350 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
63351 if (gfp_flags & __GFP_ZERO)
63352 prep_zero_page(page, order, gfp_flags);
63353 +#endif
63354
63355 if (order && (gfp_flags & __GFP_COMP))
63356 prep_compound_page(page, order);
63357 @@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
63358 int cpu;
63359 struct zone *zone;
63360
63361 + pax_track_stack();
63362 +
63363 for_each_populated_zone(zone) {
63364 if (skip_free_areas_node(filter, zone_to_nid(zone)))
63365 continue;
63366 diff -urNp linux-3.0.4/mm/percpu.c linux-3.0.4/mm/percpu.c
63367 --- linux-3.0.4/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
63368 +++ linux-3.0.4/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
63369 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63370 static unsigned int pcpu_last_unit_cpu __read_mostly;
63371
63372 /* the address of the first chunk which starts with the kernel static area */
63373 -void *pcpu_base_addr __read_mostly;
63374 +void *pcpu_base_addr __read_only;
63375 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63376
63377 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63378 diff -urNp linux-3.0.4/mm/rmap.c linux-3.0.4/mm/rmap.c
63379 --- linux-3.0.4/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
63380 +++ linux-3.0.4/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
63381 @@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
63382 struct anon_vma *anon_vma = vma->anon_vma;
63383 struct anon_vma_chain *avc;
63384
63385 +#ifdef CONFIG_PAX_SEGMEXEC
63386 + struct anon_vma_chain *avc_m = NULL;
63387 +#endif
63388 +
63389 might_sleep();
63390 if (unlikely(!anon_vma)) {
63391 struct mm_struct *mm = vma->vm_mm;
63392 @@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
63393 if (!avc)
63394 goto out_enomem;
63395
63396 +#ifdef CONFIG_PAX_SEGMEXEC
63397 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
63398 + if (!avc_m)
63399 + goto out_enomem_free_avc;
63400 +#endif
63401 +
63402 anon_vma = find_mergeable_anon_vma(vma);
63403 allocated = NULL;
63404 if (!anon_vma) {
63405 @@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
63406 /* page_table_lock to protect against threads */
63407 spin_lock(&mm->page_table_lock);
63408 if (likely(!vma->anon_vma)) {
63409 +
63410 +#ifdef CONFIG_PAX_SEGMEXEC
63411 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63412 +
63413 + if (vma_m) {
63414 + BUG_ON(vma_m->anon_vma);
63415 + vma_m->anon_vma = anon_vma;
63416 + avc_m->anon_vma = anon_vma;
63417 + avc_m->vma = vma;
63418 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63419 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
63420 + avc_m = NULL;
63421 + }
63422 +#endif
63423 +
63424 vma->anon_vma = anon_vma;
63425 avc->anon_vma = anon_vma;
63426 avc->vma = vma;
63427 @@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
63428
63429 if (unlikely(allocated))
63430 put_anon_vma(allocated);
63431 +
63432 +#ifdef CONFIG_PAX_SEGMEXEC
63433 + if (unlikely(avc_m))
63434 + anon_vma_chain_free(avc_m);
63435 +#endif
63436 +
63437 if (unlikely(avc))
63438 anon_vma_chain_free(avc);
63439 }
63440 return 0;
63441
63442 out_enomem_free_avc:
63443 +
63444 +#ifdef CONFIG_PAX_SEGMEXEC
63445 + if (avc_m)
63446 + anon_vma_chain_free(avc_m);
63447 +#endif
63448 +
63449 anon_vma_chain_free(avc);
63450 out_enomem:
63451 return -ENOMEM;
63452 @@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
63453 * Attach the anon_vmas from src to dst.
63454 * Returns 0 on success, -ENOMEM on failure.
63455 */
63456 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63457 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63458 {
63459 struct anon_vma_chain *avc, *pavc;
63460 struct anon_vma *root = NULL;
63461 @@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
63462 * the corresponding VMA in the parent process is attached to.
63463 * Returns 0 on success, non-zero on failure.
63464 */
63465 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63466 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63467 {
63468 struct anon_vma_chain *avc;
63469 struct anon_vma *anon_vma;
63470 diff -urNp linux-3.0.4/mm/shmem.c linux-3.0.4/mm/shmem.c
63471 --- linux-3.0.4/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
63472 +++ linux-3.0.4/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
63473 @@ -31,7 +31,7 @@
63474 #include <linux/percpu_counter.h>
63475 #include <linux/swap.h>
63476
63477 -static struct vfsmount *shm_mnt;
63478 +struct vfsmount *shm_mnt;
63479
63480 #ifdef CONFIG_SHMEM
63481 /*
63482 @@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
63483 goto unlock;
63484 }
63485 entry = shmem_swp_entry(info, index, NULL);
63486 + if (!entry)
63487 + goto unlock;
63488 if (entry->val) {
63489 /*
63490 * The more uptodate page coming down from a stacked
63491 @@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
63492 struct vm_area_struct pvma;
63493 struct page *page;
63494
63495 + pax_track_stack();
63496 +
63497 spol = mpol_cond_copy(&mpol,
63498 mpol_shared_policy_lookup(&info->policy, idx));
63499
63500 @@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
63501 int err = -ENOMEM;
63502
63503 /* Round up to L1_CACHE_BYTES to resist false sharing */
63504 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63505 - L1_CACHE_BYTES), GFP_KERNEL);
63506 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63507 if (!sbinfo)
63508 return -ENOMEM;
63509
63510 diff -urNp linux-3.0.4/mm/slab.c linux-3.0.4/mm/slab.c
63511 --- linux-3.0.4/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
63512 +++ linux-3.0.4/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
63513 @@ -151,7 +151,7 @@
63514
63515 /* Legal flag mask for kmem_cache_create(). */
63516 #if DEBUG
63517 -# define CREATE_MASK (SLAB_RED_ZONE | \
63518 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63519 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63520 SLAB_CACHE_DMA | \
63521 SLAB_STORE_USER | \
63522 @@ -159,7 +159,7 @@
63523 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63524 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63525 #else
63526 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63527 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63528 SLAB_CACHE_DMA | \
63529 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63530 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63531 @@ -288,7 +288,7 @@ struct kmem_list3 {
63532 * Need this for bootstrapping a per node allocator.
63533 */
63534 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63535 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63536 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63537 #define CACHE_CACHE 0
63538 #define SIZE_AC MAX_NUMNODES
63539 #define SIZE_L3 (2 * MAX_NUMNODES)
63540 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
63541 if ((x)->max_freeable < i) \
63542 (x)->max_freeable = i; \
63543 } while (0)
63544 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63545 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63546 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63547 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63548 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63549 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63550 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63551 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63552 #else
63553 #define STATS_INC_ACTIVE(x) do { } while (0)
63554 #define STATS_DEC_ACTIVE(x) do { } while (0)
63555 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
63556 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63557 */
63558 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63559 - const struct slab *slab, void *obj)
63560 + const struct slab *slab, const void *obj)
63561 {
63562 u32 offset = (obj - slab->s_mem);
63563 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63564 @@ -564,7 +564,7 @@ struct cache_names {
63565 static struct cache_names __initdata cache_names[] = {
63566 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63567 #include <linux/kmalloc_sizes.h>
63568 - {NULL,}
63569 + {NULL}
63570 #undef CACHE
63571 };
63572
63573 @@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
63574 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63575 sizes[INDEX_AC].cs_size,
63576 ARCH_KMALLOC_MINALIGN,
63577 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63578 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63579 NULL);
63580
63581 if (INDEX_AC != INDEX_L3) {
63582 @@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
63583 kmem_cache_create(names[INDEX_L3].name,
63584 sizes[INDEX_L3].cs_size,
63585 ARCH_KMALLOC_MINALIGN,
63586 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63587 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63588 NULL);
63589 }
63590
63591 @@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
63592 sizes->cs_cachep = kmem_cache_create(names->name,
63593 sizes->cs_size,
63594 ARCH_KMALLOC_MINALIGN,
63595 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63596 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63597 NULL);
63598 }
63599 #ifdef CONFIG_ZONE_DMA
63600 @@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
63601 }
63602 /* cpu stats */
63603 {
63604 - unsigned long allochit = atomic_read(&cachep->allochit);
63605 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63606 - unsigned long freehit = atomic_read(&cachep->freehit);
63607 - unsigned long freemiss = atomic_read(&cachep->freemiss);
63608 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63609 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63610 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63611 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63612
63613 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63614 allochit, allocmiss, freehit, freemiss);
63615 @@ -4532,15 +4532,66 @@ static const struct file_operations proc
63616
63617 static int __init slab_proc_init(void)
63618 {
63619 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63620 + mode_t gr_mode = S_IRUGO;
63621 +
63622 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63623 + gr_mode = S_IRUSR;
63624 +#endif
63625 +
63626 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
63627 #ifdef CONFIG_DEBUG_SLAB_LEAK
63628 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
63629 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
63630 #endif
63631 return 0;
63632 }
63633 module_init(slab_proc_init);
63634 #endif
63635
63636 +void check_object_size(const void *ptr, unsigned long n, bool to)
63637 +{
63638 +
63639 +#ifdef CONFIG_PAX_USERCOPY
63640 + struct page *page;
63641 + struct kmem_cache *cachep = NULL;
63642 + struct slab *slabp;
63643 + unsigned int objnr;
63644 + unsigned long offset;
63645 +
63646 + if (!n)
63647 + return;
63648 +
63649 + if (ZERO_OR_NULL_PTR(ptr))
63650 + goto report;
63651 +
63652 + if (!virt_addr_valid(ptr))
63653 + return;
63654 +
63655 + page = virt_to_head_page(ptr);
63656 +
63657 + if (!PageSlab(page)) {
63658 + if (object_is_on_stack(ptr, n) == -1)
63659 + goto report;
63660 + return;
63661 + }
63662 +
63663 + cachep = page_get_cache(page);
63664 + if (!(cachep->flags & SLAB_USERCOPY))
63665 + goto report;
63666 +
63667 + slabp = page_get_slab(page);
63668 + objnr = obj_to_index(cachep, slabp, ptr);
63669 + BUG_ON(objnr >= cachep->num);
63670 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
63671 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
63672 + return;
63673 +
63674 +report:
63675 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
63676 +#endif
63677 +
63678 +}
63679 +EXPORT_SYMBOL(check_object_size);
63680 +
63681 /**
63682 * ksize - get the actual amount of memory allocated for a given object
63683 * @objp: Pointer to the object
63684 diff -urNp linux-3.0.4/mm/slob.c linux-3.0.4/mm/slob.c
63685 --- linux-3.0.4/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
63686 +++ linux-3.0.4/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
63687 @@ -29,7 +29,7 @@
63688 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
63689 * alloc_pages() directly, allocating compound pages so the page order
63690 * does not have to be separately tracked, and also stores the exact
63691 - * allocation size in page->private so that it can be used to accurately
63692 + * allocation size in slob_page->size so that it can be used to accurately
63693 * provide ksize(). These objects are detected in kfree() because slob_page()
63694 * is false for them.
63695 *
63696 @@ -58,6 +58,7 @@
63697 */
63698
63699 #include <linux/kernel.h>
63700 +#include <linux/sched.h>
63701 #include <linux/slab.h>
63702 #include <linux/mm.h>
63703 #include <linux/swap.h> /* struct reclaim_state */
63704 @@ -102,7 +103,8 @@ struct slob_page {
63705 unsigned long flags; /* mandatory */
63706 atomic_t _count; /* mandatory */
63707 slobidx_t units; /* free units left in page */
63708 - unsigned long pad[2];
63709 + unsigned long pad[1];
63710 + unsigned long size; /* size when >=PAGE_SIZE */
63711 slob_t *free; /* first free slob_t in page */
63712 struct list_head list; /* linked list of free pages */
63713 };
63714 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
63715 */
63716 static inline int is_slob_page(struct slob_page *sp)
63717 {
63718 - return PageSlab((struct page *)sp);
63719 + return PageSlab((struct page *)sp) && !sp->size;
63720 }
63721
63722 static inline void set_slob_page(struct slob_page *sp)
63723 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
63724
63725 static inline struct slob_page *slob_page(const void *addr)
63726 {
63727 - return (struct slob_page *)virt_to_page(addr);
63728 + return (struct slob_page *)virt_to_head_page(addr);
63729 }
63730
63731 /*
63732 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
63733 /*
63734 * Return the size of a slob block.
63735 */
63736 -static slobidx_t slob_units(slob_t *s)
63737 +static slobidx_t slob_units(const slob_t *s)
63738 {
63739 if (s->units > 0)
63740 return s->units;
63741 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
63742 /*
63743 * Return the next free slob block pointer after this one.
63744 */
63745 -static slob_t *slob_next(slob_t *s)
63746 +static slob_t *slob_next(const slob_t *s)
63747 {
63748 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
63749 slobidx_t next;
63750 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
63751 /*
63752 * Returns true if s is the last free block in its page.
63753 */
63754 -static int slob_last(slob_t *s)
63755 +static int slob_last(const slob_t *s)
63756 {
63757 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
63758 }
63759 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
63760 if (!page)
63761 return NULL;
63762
63763 + set_slob_page(page);
63764 return page_address(page);
63765 }
63766
63767 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
63768 if (!b)
63769 return NULL;
63770 sp = slob_page(b);
63771 - set_slob_page(sp);
63772
63773 spin_lock_irqsave(&slob_lock, flags);
63774 sp->units = SLOB_UNITS(PAGE_SIZE);
63775 sp->free = b;
63776 + sp->size = 0;
63777 INIT_LIST_HEAD(&sp->list);
63778 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
63779 set_slob_page_free(sp, slob_list);
63780 @@ -476,10 +479,9 @@ out:
63781 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
63782 */
63783
63784 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63785 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
63786 {
63787 - unsigned int *m;
63788 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63789 + slob_t *m;
63790 void *ret;
63791
63792 lockdep_trace_alloc(gfp);
63793 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
63794
63795 if (!m)
63796 return NULL;
63797 - *m = size;
63798 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
63799 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
63800 + m[0].units = size;
63801 + m[1].units = align;
63802 ret = (void *)m + align;
63803
63804 trace_kmalloc_node(_RET_IP_, ret,
63805 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
63806 gfp |= __GFP_COMP;
63807 ret = slob_new_pages(gfp, order, node);
63808 if (ret) {
63809 - struct page *page;
63810 - page = virt_to_page(ret);
63811 - page->private = size;
63812 + struct slob_page *sp;
63813 + sp = slob_page(ret);
63814 + sp->size = size;
63815 }
63816
63817 trace_kmalloc_node(_RET_IP_, ret,
63818 size, PAGE_SIZE << order, gfp, node);
63819 }
63820
63821 - kmemleak_alloc(ret, size, 1, gfp);
63822 + return ret;
63823 +}
63824 +
63825 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63826 +{
63827 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63828 + void *ret = __kmalloc_node_align(size, gfp, node, align);
63829 +
63830 + if (!ZERO_OR_NULL_PTR(ret))
63831 + kmemleak_alloc(ret, size, 1, gfp);
63832 return ret;
63833 }
63834 EXPORT_SYMBOL(__kmalloc_node);
63835 @@ -531,13 +545,88 @@ void kfree(const void *block)
63836 sp = slob_page(block);
63837 if (is_slob_page(sp)) {
63838 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63839 - unsigned int *m = (unsigned int *)(block - align);
63840 - slob_free(m, *m + align);
63841 - } else
63842 + slob_t *m = (slob_t *)(block - align);
63843 + slob_free(m, m[0].units + align);
63844 + } else {
63845 + clear_slob_page(sp);
63846 + free_slob_page(sp);
63847 + sp->size = 0;
63848 put_page(&sp->page);
63849 + }
63850 }
63851 EXPORT_SYMBOL(kfree);
63852
63853 +void check_object_size(const void *ptr, unsigned long n, bool to)
63854 +{
63855 +
63856 +#ifdef CONFIG_PAX_USERCOPY
63857 + struct slob_page *sp;
63858 + const slob_t *free;
63859 + const void *base;
63860 + unsigned long flags;
63861 +
63862 + if (!n)
63863 + return;
63864 +
63865 + if (ZERO_OR_NULL_PTR(ptr))
63866 + goto report;
63867 +
63868 + if (!virt_addr_valid(ptr))
63869 + return;
63870 +
63871 + sp = slob_page(ptr);
63872 + if (!PageSlab((struct page*)sp)) {
63873 + if (object_is_on_stack(ptr, n) == -1)
63874 + goto report;
63875 + return;
63876 + }
63877 +
63878 + if (sp->size) {
63879 + base = page_address(&sp->page);
63880 + if (base <= ptr && n <= sp->size - (ptr - base))
63881 + return;
63882 + goto report;
63883 + }
63884 +
63885 + /* some tricky double walking to find the chunk */
63886 + spin_lock_irqsave(&slob_lock, flags);
63887 + base = (void *)((unsigned long)ptr & PAGE_MASK);
63888 + free = sp->free;
63889 +
63890 + while (!slob_last(free) && (void *)free <= ptr) {
63891 + base = free + slob_units(free);
63892 + free = slob_next(free);
63893 + }
63894 +
63895 + while (base < (void *)free) {
63896 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
63897 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
63898 + int offset;
63899 +
63900 + if (ptr < base + align)
63901 + break;
63902 +
63903 + offset = ptr - base - align;
63904 + if (offset >= m) {
63905 + base += size;
63906 + continue;
63907 + }
63908 +
63909 + if (n > m - offset)
63910 + break;
63911 +
63912 + spin_unlock_irqrestore(&slob_lock, flags);
63913 + return;
63914 + }
63915 +
63916 + spin_unlock_irqrestore(&slob_lock, flags);
63917 +report:
63918 + pax_report_usercopy(ptr, n, to, NULL);
63919 +#endif
63920 +
63921 +}
63922 +EXPORT_SYMBOL(check_object_size);
63923 +
63924 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
63925 size_t ksize(const void *block)
63926 {
63927 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
63928 sp = slob_page(block);
63929 if (is_slob_page(sp)) {
63930 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63931 - unsigned int *m = (unsigned int *)(block - align);
63932 - return SLOB_UNITS(*m) * SLOB_UNIT;
63933 + slob_t *m = (slob_t *)(block - align);
63934 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
63935 } else
63936 - return sp->page.private;
63937 + return sp->size;
63938 }
63939 EXPORT_SYMBOL(ksize);
63940
63941 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
63942 {
63943 struct kmem_cache *c;
63944
63945 +#ifdef CONFIG_PAX_USERCOPY
63946 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
63947 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
63948 +#else
63949 c = slob_alloc(sizeof(struct kmem_cache),
63950 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
63951 +#endif
63952
63953 if (c) {
63954 c->name = name;
63955 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
63956 {
63957 void *b;
63958
63959 +#ifdef CONFIG_PAX_USERCOPY
63960 + b = __kmalloc_node_align(c->size, flags, node, c->align);
63961 +#else
63962 if (c->size < PAGE_SIZE) {
63963 b = slob_alloc(c->size, flags, c->align, node);
63964 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
63965 SLOB_UNITS(c->size) * SLOB_UNIT,
63966 flags, node);
63967 } else {
63968 + struct slob_page *sp;
63969 +
63970 b = slob_new_pages(flags, get_order(c->size), node);
63971 + sp = slob_page(b);
63972 + sp->size = c->size;
63973 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
63974 PAGE_SIZE << get_order(c->size),
63975 flags, node);
63976 }
63977 +#endif
63978
63979 if (c->ctor)
63980 c->ctor(b);
63981 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
63982
63983 static void __kmem_cache_free(void *b, int size)
63984 {
63985 - if (size < PAGE_SIZE)
63986 + struct slob_page *sp = slob_page(b);
63987 +
63988 + if (is_slob_page(sp))
63989 slob_free(b, size);
63990 - else
63991 + else {
63992 + clear_slob_page(sp);
63993 + free_slob_page(sp);
63994 + sp->size = 0;
63995 slob_free_pages(b, get_order(size));
63996 + }
63997 }
63998
63999 static void kmem_rcu_free(struct rcu_head *head)
64000 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64001
64002 void kmem_cache_free(struct kmem_cache *c, void *b)
64003 {
64004 + int size = c->size;
64005 +
64006 +#ifdef CONFIG_PAX_USERCOPY
64007 + if (size + c->align < PAGE_SIZE) {
64008 + size += c->align;
64009 + b -= c->align;
64010 + }
64011 +#endif
64012 +
64013 kmemleak_free_recursive(b, c->flags);
64014 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64015 struct slob_rcu *slob_rcu;
64016 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64017 - slob_rcu->size = c->size;
64018 + slob_rcu = b + (size - sizeof(struct slob_rcu));
64019 + slob_rcu->size = size;
64020 call_rcu(&slob_rcu->head, kmem_rcu_free);
64021 } else {
64022 - __kmem_cache_free(b, c->size);
64023 + __kmem_cache_free(b, size);
64024 }
64025
64026 +#ifdef CONFIG_PAX_USERCOPY
64027 + trace_kfree(_RET_IP_, b);
64028 +#else
64029 trace_kmem_cache_free(_RET_IP_, b);
64030 +#endif
64031 +
64032 }
64033 EXPORT_SYMBOL(kmem_cache_free);
64034
64035 diff -urNp linux-3.0.4/mm/slub.c linux-3.0.4/mm/slub.c
64036 --- linux-3.0.4/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
64037 +++ linux-3.0.4/mm/slub.c 2011-08-23 21:48:14.000000000 -0400
64038 @@ -442,7 +442,7 @@ static void print_track(const char *s, s
64039 if (!t->addr)
64040 return;
64041
64042 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64043 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64044 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64045 }
64046
64047 @@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
64048
64049 page = virt_to_head_page(x);
64050
64051 + BUG_ON(!PageSlab(page));
64052 +
64053 slab_free(s, page, x, _RET_IP_);
64054
64055 trace_kmem_cache_free(_RET_IP_, x);
64056 @@ -2170,7 +2172,7 @@ static int slub_min_objects;
64057 * Merge control. If this is set then no merging of slab caches will occur.
64058 * (Could be removed. This was introduced to pacify the merge skeptics.)
64059 */
64060 -static int slub_nomerge;
64061 +static int slub_nomerge = 1;
64062
64063 /*
64064 * Calculate the order of allocation given an slab object size.
64065 @@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
64066 * list to avoid pounding the page allocator excessively.
64067 */
64068 set_min_partial(s, ilog2(s->size));
64069 - s->refcount = 1;
64070 + atomic_set(&s->refcount, 1);
64071 #ifdef CONFIG_NUMA
64072 s->remote_node_defrag_ratio = 1000;
64073 #endif
64074 @@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
64075 void kmem_cache_destroy(struct kmem_cache *s)
64076 {
64077 down_write(&slub_lock);
64078 - s->refcount--;
64079 - if (!s->refcount) {
64080 + if (atomic_dec_and_test(&s->refcount)) {
64081 list_del(&s->list);
64082 if (kmem_cache_close(s)) {
64083 printk(KERN_ERR "SLUB %s: %s called for cache that "
64084 @@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
64085 EXPORT_SYMBOL(__kmalloc_node);
64086 #endif
64087
64088 +void check_object_size(const void *ptr, unsigned long n, bool to)
64089 +{
64090 +
64091 +#ifdef CONFIG_PAX_USERCOPY
64092 + struct page *page;
64093 + struct kmem_cache *s = NULL;
64094 + unsigned long offset;
64095 +
64096 + if (!n)
64097 + return;
64098 +
64099 + if (ZERO_OR_NULL_PTR(ptr))
64100 + goto report;
64101 +
64102 + if (!virt_addr_valid(ptr))
64103 + return;
64104 +
64105 + page = virt_to_head_page(ptr);
64106 +
64107 + if (!PageSlab(page)) {
64108 + if (object_is_on_stack(ptr, n) == -1)
64109 + goto report;
64110 + return;
64111 + }
64112 +
64113 + s = page->slab;
64114 + if (!(s->flags & SLAB_USERCOPY))
64115 + goto report;
64116 +
64117 + offset = (ptr - page_address(page)) % s->size;
64118 + if (offset <= s->objsize && n <= s->objsize - offset)
64119 + return;
64120 +
64121 +report:
64122 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64123 +#endif
64124 +
64125 +}
64126 +EXPORT_SYMBOL(check_object_size);
64127 +
64128 size_t ksize(const void *object)
64129 {
64130 struct page *page;
64131 @@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
64132 int node;
64133
64134 list_add(&s->list, &slab_caches);
64135 - s->refcount = -1;
64136 + atomic_set(&s->refcount, -1);
64137
64138 for_each_node_state(node, N_NORMAL_MEMORY) {
64139 struct kmem_cache_node *n = get_node(s, node);
64140 @@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
64141
64142 /* Caches that are not of the two-to-the-power-of size */
64143 if (KMALLOC_MIN_SIZE <= 32) {
64144 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64145 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64146 caches++;
64147 }
64148
64149 if (KMALLOC_MIN_SIZE <= 64) {
64150 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64151 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64152 caches++;
64153 }
64154
64155 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64156 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64157 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64158 caches++;
64159 }
64160
64161 @@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
64162 /*
64163 * We may have set a slab to be unmergeable during bootstrap.
64164 */
64165 - if (s->refcount < 0)
64166 + if (atomic_read(&s->refcount) < 0)
64167 return 1;
64168
64169 return 0;
64170 @@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
64171 down_write(&slub_lock);
64172 s = find_mergeable(size, align, flags, name, ctor);
64173 if (s) {
64174 - s->refcount++;
64175 + atomic_inc(&s->refcount);
64176 /*
64177 * Adjust the object sizes so that we clear
64178 * the complete object on kzalloc.
64179 @@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
64180 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64181
64182 if (sysfs_slab_alias(s, name)) {
64183 - s->refcount--;
64184 + atomic_dec(&s->refcount);
64185 goto err;
64186 }
64187 up_write(&slub_lock);
64188 @@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
64189
64190 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64191 {
64192 - return sprintf(buf, "%d\n", s->refcount - 1);
64193 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64194 }
64195 SLAB_ATTR_RO(aliases);
64196
64197 @@ -4894,7 +4935,13 @@ static const struct file_operations proc
64198
64199 static int __init slab_proc_init(void)
64200 {
64201 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64202 + mode_t gr_mode = S_IRUGO;
64203 +
64204 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64205 + gr_mode = S_IRUSR;
64206 +#endif
64207 +
64208 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64209 return 0;
64210 }
64211 module_init(slab_proc_init);
64212 diff -urNp linux-3.0.4/mm/swap.c linux-3.0.4/mm/swap.c
64213 --- linux-3.0.4/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
64214 +++ linux-3.0.4/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
64215 @@ -31,6 +31,7 @@
64216 #include <linux/backing-dev.h>
64217 #include <linux/memcontrol.h>
64218 #include <linux/gfp.h>
64219 +#include <linux/hugetlb.h>
64220
64221 #include "internal.h"
64222
64223 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64224
64225 __page_cache_release(page);
64226 dtor = get_compound_page_dtor(page);
64227 + if (!PageHuge(page))
64228 + BUG_ON(dtor != free_compound_page);
64229 (*dtor)(page);
64230 }
64231
64232 diff -urNp linux-3.0.4/mm/swapfile.c linux-3.0.4/mm/swapfile.c
64233 --- linux-3.0.4/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
64234 +++ linux-3.0.4/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
64235 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
64236
64237 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64238 /* Activity counter to indicate that a swapon or swapoff has occurred */
64239 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
64240 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64241
64242 static inline unsigned char swap_count(unsigned char ent)
64243 {
64244 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64245 }
64246 filp_close(swap_file, NULL);
64247 err = 0;
64248 - atomic_inc(&proc_poll_event);
64249 + atomic_inc_unchecked(&proc_poll_event);
64250 wake_up_interruptible(&proc_poll_wait);
64251
64252 out_dput:
64253 @@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
64254
64255 poll_wait(file, &proc_poll_wait, wait);
64256
64257 - if (s->event != atomic_read(&proc_poll_event)) {
64258 - s->event = atomic_read(&proc_poll_event);
64259 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64260 + s->event = atomic_read_unchecked(&proc_poll_event);
64261 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64262 }
64263
64264 @@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
64265 }
64266
64267 s->seq.private = s;
64268 - s->event = atomic_read(&proc_poll_event);
64269 + s->event = atomic_read_unchecked(&proc_poll_event);
64270 return ret;
64271 }
64272
64273 @@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64274 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64275
64276 mutex_unlock(&swapon_mutex);
64277 - atomic_inc(&proc_poll_event);
64278 + atomic_inc_unchecked(&proc_poll_event);
64279 wake_up_interruptible(&proc_poll_wait);
64280
64281 if (S_ISREG(inode->i_mode))
64282 diff -urNp linux-3.0.4/mm/util.c linux-3.0.4/mm/util.c
64283 --- linux-3.0.4/mm/util.c 2011-07-21 22:17:23.000000000 -0400
64284 +++ linux-3.0.4/mm/util.c 2011-08-23 21:47:56.000000000 -0400
64285 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
64286 * allocated buffer. Use this if you don't want to free the buffer immediately
64287 * like, for example, with RCU.
64288 */
64289 +#undef __krealloc
64290 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64291 {
64292 void *ret;
64293 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
64294 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64295 * %NULL pointer, the object pointed to is freed.
64296 */
64297 +#undef krealloc
64298 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64299 {
64300 void *ret;
64301 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
64302 void arch_pick_mmap_layout(struct mm_struct *mm)
64303 {
64304 mm->mmap_base = TASK_UNMAPPED_BASE;
64305 +
64306 +#ifdef CONFIG_PAX_RANDMMAP
64307 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64308 + mm->mmap_base += mm->delta_mmap;
64309 +#endif
64310 +
64311 mm->get_unmapped_area = arch_get_unmapped_area;
64312 mm->unmap_area = arch_unmap_area;
64313 }
64314 diff -urNp linux-3.0.4/mm/vmalloc.c linux-3.0.4/mm/vmalloc.c
64315 --- linux-3.0.4/mm/vmalloc.c 2011-08-23 21:44:40.000000000 -0400
64316 +++ linux-3.0.4/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
64317 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64318
64319 pte = pte_offset_kernel(pmd, addr);
64320 do {
64321 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64322 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64323 +
64324 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64325 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64326 + BUG_ON(!pte_exec(*pte));
64327 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64328 + continue;
64329 + }
64330 +#endif
64331 +
64332 + {
64333 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64334 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64335 + }
64336 } while (pte++, addr += PAGE_SIZE, addr != end);
64337 }
64338
64339 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64340 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64341 {
64342 pte_t *pte;
64343 + int ret = -ENOMEM;
64344
64345 /*
64346 * nr is a running index into the array which helps higher level
64347 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64348 pte = pte_alloc_kernel(pmd, addr);
64349 if (!pte)
64350 return -ENOMEM;
64351 +
64352 + pax_open_kernel();
64353 do {
64354 struct page *page = pages[*nr];
64355
64356 - if (WARN_ON(!pte_none(*pte)))
64357 - return -EBUSY;
64358 - if (WARN_ON(!page))
64359 - return -ENOMEM;
64360 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64361 + if (pgprot_val(prot) & _PAGE_NX)
64362 +#endif
64363 +
64364 + if (WARN_ON(!pte_none(*pte))) {
64365 + ret = -EBUSY;
64366 + goto out;
64367 + }
64368 + if (WARN_ON(!page)) {
64369 + ret = -ENOMEM;
64370 + goto out;
64371 + }
64372 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64373 (*nr)++;
64374 } while (pte++, addr += PAGE_SIZE, addr != end);
64375 - return 0;
64376 + ret = 0;
64377 +out:
64378 + pax_close_kernel();
64379 + return ret;
64380 }
64381
64382 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64383 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64384 * and fall back on vmalloc() if that fails. Others
64385 * just put it in the vmalloc space.
64386 */
64387 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64388 +#ifdef CONFIG_MODULES
64389 +#ifdef MODULES_VADDR
64390 unsigned long addr = (unsigned long)x;
64391 if (addr >= MODULES_VADDR && addr < MODULES_END)
64392 return 1;
64393 #endif
64394 +
64395 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64396 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64397 + return 1;
64398 +#endif
64399 +
64400 +#endif
64401 +
64402 return is_vmalloc_addr(x);
64403 }
64404
64405 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64406
64407 if (!pgd_none(*pgd)) {
64408 pud_t *pud = pud_offset(pgd, addr);
64409 +#ifdef CONFIG_X86
64410 + if (!pud_large(*pud))
64411 +#endif
64412 if (!pud_none(*pud)) {
64413 pmd_t *pmd = pmd_offset(pud, addr);
64414 +#ifdef CONFIG_X86
64415 + if (!pmd_large(*pmd))
64416 +#endif
64417 if (!pmd_none(*pmd)) {
64418 pte_t *ptep, pte;
64419
64420 @@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
64421 struct vm_struct *area;
64422
64423 BUG_ON(in_interrupt());
64424 +
64425 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64426 + if (flags & VM_KERNEXEC) {
64427 + if (start != VMALLOC_START || end != VMALLOC_END)
64428 + return NULL;
64429 + start = (unsigned long)MODULES_EXEC_VADDR;
64430 + end = (unsigned long)MODULES_EXEC_END;
64431 + }
64432 +#endif
64433 +
64434 if (flags & VM_IOREMAP) {
64435 int bit = fls(size);
64436
64437 @@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
64438 if (count > totalram_pages)
64439 return NULL;
64440
64441 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64442 + if (!(pgprot_val(prot) & _PAGE_NX))
64443 + flags |= VM_KERNEXEC;
64444 +#endif
64445 +
64446 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64447 __builtin_return_address(0));
64448 if (!area)
64449 @@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
64450 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64451 return NULL;
64452
64453 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64454 + if (!(pgprot_val(prot) & _PAGE_NX))
64455 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64456 + node, gfp_mask, caller);
64457 + else
64458 +#endif
64459 +
64460 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64461 gfp_mask, caller);
64462
64463 @@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
64464 gfp_mask, prot, node, caller);
64465 }
64466
64467 +#undef __vmalloc
64468 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64469 {
64470 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64471 @@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
64472 * For tight control over page level allocator and protection flags
64473 * use __vmalloc() instead.
64474 */
64475 +#undef vmalloc
64476 void *vmalloc(unsigned long size)
64477 {
64478 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64479 @@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
64480 * For tight control over page level allocator and protection flags
64481 * use __vmalloc() instead.
64482 */
64483 +#undef vzalloc
64484 void *vzalloc(unsigned long size)
64485 {
64486 return __vmalloc_node_flags(size, -1,
64487 @@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
64488 * The resulting memory area is zeroed so it can be mapped to userspace
64489 * without leaking data.
64490 */
64491 +#undef vmalloc_user
64492 void *vmalloc_user(unsigned long size)
64493 {
64494 struct vm_struct *area;
64495 @@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
64496 * For tight control over page level allocator and protection flags
64497 * use __vmalloc() instead.
64498 */
64499 +#undef vmalloc_node
64500 void *vmalloc_node(unsigned long size, int node)
64501 {
64502 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64503 @@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
64504 * For tight control over page level allocator and protection flags
64505 * use __vmalloc_node() instead.
64506 */
64507 +#undef vzalloc_node
64508 void *vzalloc_node(unsigned long size, int node)
64509 {
64510 return __vmalloc_node_flags(size, node,
64511 @@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
64512 * For tight control over page level allocator and protection flags
64513 * use __vmalloc() instead.
64514 */
64515 -
64516 +#undef vmalloc_exec
64517 void *vmalloc_exec(unsigned long size)
64518 {
64519 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64520 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64521 -1, __builtin_return_address(0));
64522 }
64523
64524 @@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
64525 * Allocate enough 32bit PA addressable pages to cover @size from the
64526 * page level allocator and map them into contiguous kernel virtual space.
64527 */
64528 +#undef vmalloc_32
64529 void *vmalloc_32(unsigned long size)
64530 {
64531 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64532 @@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
64533 * The resulting memory area is 32bit addressable and zeroed so it can be
64534 * mapped to userspace without leaking data.
64535 */
64536 +#undef vmalloc_32_user
64537 void *vmalloc_32_user(unsigned long size)
64538 {
64539 struct vm_struct *area;
64540 @@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
64541 unsigned long uaddr = vma->vm_start;
64542 unsigned long usize = vma->vm_end - vma->vm_start;
64543
64544 + BUG_ON(vma->vm_mirror);
64545 +
64546 if ((PAGE_SIZE-1) & (unsigned long)addr)
64547 return -EINVAL;
64548
64549 diff -urNp linux-3.0.4/mm/vmstat.c linux-3.0.4/mm/vmstat.c
64550 --- linux-3.0.4/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
64551 +++ linux-3.0.4/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
64552 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64553 *
64554 * vm_stat contains the global counters
64555 */
64556 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64557 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64558 EXPORT_SYMBOL(vm_stat);
64559
64560 #ifdef CONFIG_SMP
64561 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64562 v = p->vm_stat_diff[i];
64563 p->vm_stat_diff[i] = 0;
64564 local_irq_restore(flags);
64565 - atomic_long_add(v, &zone->vm_stat[i]);
64566 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64567 global_diff[i] += v;
64568 #ifdef CONFIG_NUMA
64569 /* 3 seconds idle till flush */
64570 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64571
64572 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64573 if (global_diff[i])
64574 - atomic_long_add(global_diff[i], &vm_stat[i]);
64575 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64576 }
64577
64578 #endif
64579 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
64580 start_cpu_timer(cpu);
64581 #endif
64582 #ifdef CONFIG_PROC_FS
64583 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64584 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64585 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64586 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64587 + {
64588 + mode_t gr_mode = S_IRUGO;
64589 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64590 + gr_mode = S_IRUSR;
64591 +#endif
64592 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64593 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64594 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64595 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64596 +#else
64597 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64598 +#endif
64599 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64600 + }
64601 #endif
64602 return 0;
64603 }
64604 diff -urNp linux-3.0.4/net/8021q/vlan.c linux-3.0.4/net/8021q/vlan.c
64605 --- linux-3.0.4/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
64606 +++ linux-3.0.4/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
64607 @@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
64608 err = -EPERM;
64609 if (!capable(CAP_NET_ADMIN))
64610 break;
64611 - if ((args.u.name_type >= 0) &&
64612 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64613 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64614 struct vlan_net *vn;
64615
64616 vn = net_generic(net, vlan_net_id);
64617 diff -urNp linux-3.0.4/net/atm/atm_misc.c linux-3.0.4/net/atm/atm_misc.c
64618 --- linux-3.0.4/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
64619 +++ linux-3.0.4/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
64620 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64621 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64622 return 1;
64623 atm_return(vcc, truesize);
64624 - atomic_inc(&vcc->stats->rx_drop);
64625 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64626 return 0;
64627 }
64628 EXPORT_SYMBOL(atm_charge);
64629 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
64630 }
64631 }
64632 atm_return(vcc, guess);
64633 - atomic_inc(&vcc->stats->rx_drop);
64634 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64635 return NULL;
64636 }
64637 EXPORT_SYMBOL(atm_alloc_charge);
64638 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
64639
64640 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64641 {
64642 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64643 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64644 __SONET_ITEMS
64645 #undef __HANDLE_ITEM
64646 }
64647 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
64648
64649 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64650 {
64651 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64652 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
64653 __SONET_ITEMS
64654 #undef __HANDLE_ITEM
64655 }
64656 diff -urNp linux-3.0.4/net/atm/lec.h linux-3.0.4/net/atm/lec.h
64657 --- linux-3.0.4/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
64658 +++ linux-3.0.4/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
64659 @@ -48,7 +48,7 @@ struct lane2_ops {
64660 const u8 *tlvs, u32 sizeoftlvs);
64661 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
64662 const u8 *tlvs, u32 sizeoftlvs);
64663 -};
64664 +} __no_const;
64665
64666 /*
64667 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
64668 diff -urNp linux-3.0.4/net/atm/mpc.h linux-3.0.4/net/atm/mpc.h
64669 --- linux-3.0.4/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
64670 +++ linux-3.0.4/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
64671 @@ -33,7 +33,7 @@ struct mpoa_client {
64672 struct mpc_parameters parameters; /* parameters for this client */
64673
64674 const struct net_device_ops *old_ops;
64675 - struct net_device_ops new_ops;
64676 + net_device_ops_no_const new_ops;
64677 };
64678
64679
64680 diff -urNp linux-3.0.4/net/atm/mpoa_caches.c linux-3.0.4/net/atm/mpoa_caches.c
64681 --- linux-3.0.4/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
64682 +++ linux-3.0.4/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
64683 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
64684 struct timeval now;
64685 struct k_message msg;
64686
64687 + pax_track_stack();
64688 +
64689 do_gettimeofday(&now);
64690
64691 read_lock_bh(&client->ingress_lock);
64692 diff -urNp linux-3.0.4/net/atm/proc.c linux-3.0.4/net/atm/proc.c
64693 --- linux-3.0.4/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
64694 +++ linux-3.0.4/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
64695 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
64696 const struct k_atm_aal_stats *stats)
64697 {
64698 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
64699 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
64700 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
64701 - atomic_read(&stats->rx_drop));
64702 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
64703 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
64704 + atomic_read_unchecked(&stats->rx_drop));
64705 }
64706
64707 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
64708 diff -urNp linux-3.0.4/net/atm/resources.c linux-3.0.4/net/atm/resources.c
64709 --- linux-3.0.4/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
64710 +++ linux-3.0.4/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
64711 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
64712 static void copy_aal_stats(struct k_atm_aal_stats *from,
64713 struct atm_aal_stats *to)
64714 {
64715 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64716 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64717 __AAL_STAT_ITEMS
64718 #undef __HANDLE_ITEM
64719 }
64720 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
64721 static void subtract_aal_stats(struct k_atm_aal_stats *from,
64722 struct atm_aal_stats *to)
64723 {
64724 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64725 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
64726 __AAL_STAT_ITEMS
64727 #undef __HANDLE_ITEM
64728 }
64729 diff -urNp linux-3.0.4/net/batman-adv/hard-interface.c linux-3.0.4/net/batman-adv/hard-interface.c
64730 --- linux-3.0.4/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
64731 +++ linux-3.0.4/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
64732 @@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
64733 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
64734 dev_add_pack(&hard_iface->batman_adv_ptype);
64735
64736 - atomic_set(&hard_iface->seqno, 1);
64737 - atomic_set(&hard_iface->frag_seqno, 1);
64738 + atomic_set_unchecked(&hard_iface->seqno, 1);
64739 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
64740 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
64741 hard_iface->net_dev->name);
64742
64743 diff -urNp linux-3.0.4/net/batman-adv/routing.c linux-3.0.4/net/batman-adv/routing.c
64744 --- linux-3.0.4/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
64745 +++ linux-3.0.4/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
64746 @@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
64747 return;
64748
64749 /* could be changed by schedule_own_packet() */
64750 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
64751 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
64752
64753 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
64754
64755 diff -urNp linux-3.0.4/net/batman-adv/send.c linux-3.0.4/net/batman-adv/send.c
64756 --- linux-3.0.4/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
64757 +++ linux-3.0.4/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
64758 @@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
64759
64760 /* change sequence number to network order */
64761 batman_packet->seqno =
64762 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
64763 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
64764
64765 if (vis_server == VIS_TYPE_SERVER_SYNC)
64766 batman_packet->flags |= VIS_SERVER;
64767 @@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
64768 else
64769 batman_packet->gw_flags = 0;
64770
64771 - atomic_inc(&hard_iface->seqno);
64772 + atomic_inc_unchecked(&hard_iface->seqno);
64773
64774 slide_own_bcast_window(hard_iface);
64775 send_time = own_send_time(bat_priv);
64776 diff -urNp linux-3.0.4/net/batman-adv/soft-interface.c linux-3.0.4/net/batman-adv/soft-interface.c
64777 --- linux-3.0.4/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
64778 +++ linux-3.0.4/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
64779 @@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
64780
64781 /* set broadcast sequence number */
64782 bcast_packet->seqno =
64783 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
64784 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
64785
64786 add_bcast_packet_to_list(bat_priv, skb);
64787
64788 @@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
64789 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
64790
64791 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
64792 - atomic_set(&bat_priv->bcast_seqno, 1);
64793 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
64794 atomic_set(&bat_priv->tt_local_changed, 0);
64795
64796 bat_priv->primary_if = NULL;
64797 diff -urNp linux-3.0.4/net/batman-adv/types.h linux-3.0.4/net/batman-adv/types.h
64798 --- linux-3.0.4/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
64799 +++ linux-3.0.4/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
64800 @@ -38,8 +38,8 @@ struct hard_iface {
64801 int16_t if_num;
64802 char if_status;
64803 struct net_device *net_dev;
64804 - atomic_t seqno;
64805 - atomic_t frag_seqno;
64806 + atomic_unchecked_t seqno;
64807 + atomic_unchecked_t frag_seqno;
64808 unsigned char *packet_buff;
64809 int packet_len;
64810 struct kobject *hardif_obj;
64811 @@ -142,7 +142,7 @@ struct bat_priv {
64812 atomic_t orig_interval; /* uint */
64813 atomic_t hop_penalty; /* uint */
64814 atomic_t log_level; /* uint */
64815 - atomic_t bcast_seqno;
64816 + atomic_unchecked_t bcast_seqno;
64817 atomic_t bcast_queue_left;
64818 atomic_t batman_queue_left;
64819 char num_ifaces;
64820 diff -urNp linux-3.0.4/net/batman-adv/unicast.c linux-3.0.4/net/batman-adv/unicast.c
64821 --- linux-3.0.4/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
64822 +++ linux-3.0.4/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
64823 @@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
64824 frag1->flags = UNI_FRAG_HEAD | large_tail;
64825 frag2->flags = large_tail;
64826
64827 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
64828 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
64829 frag1->seqno = htons(seqno - 1);
64830 frag2->seqno = htons(seqno);
64831
64832 diff -urNp linux-3.0.4/net/bridge/br_multicast.c linux-3.0.4/net/bridge/br_multicast.c
64833 --- linux-3.0.4/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
64834 +++ linux-3.0.4/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
64835 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
64836 nexthdr = ip6h->nexthdr;
64837 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
64838
64839 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
64840 + if (nexthdr != IPPROTO_ICMPV6)
64841 return 0;
64842
64843 /* Okay, we found ICMPv6 header */
64844 diff -urNp linux-3.0.4/net/bridge/netfilter/ebtables.c linux-3.0.4/net/bridge/netfilter/ebtables.c
64845 --- linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
64846 +++ linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
64847 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
64848 tmp.valid_hooks = t->table->valid_hooks;
64849 }
64850 mutex_unlock(&ebt_mutex);
64851 - if (copy_to_user(user, &tmp, *len) != 0){
64852 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
64853 BUGPRINT("c2u Didn't work\n");
64854 ret = -EFAULT;
64855 break;
64856 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
64857 int ret;
64858 void __user *pos;
64859
64860 + pax_track_stack();
64861 +
64862 memset(&tinfo, 0, sizeof(tinfo));
64863
64864 if (cmd == EBT_SO_GET_ENTRIES) {
64865 diff -urNp linux-3.0.4/net/caif/caif_socket.c linux-3.0.4/net/caif/caif_socket.c
64866 --- linux-3.0.4/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
64867 +++ linux-3.0.4/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
64868 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
64869 #ifdef CONFIG_DEBUG_FS
64870 struct debug_fs_counter {
64871 atomic_t caif_nr_socks;
64872 - atomic_t caif_sock_create;
64873 - atomic_t num_connect_req;
64874 - atomic_t num_connect_resp;
64875 - atomic_t num_connect_fail_resp;
64876 - atomic_t num_disconnect;
64877 - atomic_t num_remote_shutdown_ind;
64878 - atomic_t num_tx_flow_off_ind;
64879 - atomic_t num_tx_flow_on_ind;
64880 - atomic_t num_rx_flow_off;
64881 - atomic_t num_rx_flow_on;
64882 + atomic_unchecked_t caif_sock_create;
64883 + atomic_unchecked_t num_connect_req;
64884 + atomic_unchecked_t num_connect_resp;
64885 + atomic_unchecked_t num_connect_fail_resp;
64886 + atomic_unchecked_t num_disconnect;
64887 + atomic_unchecked_t num_remote_shutdown_ind;
64888 + atomic_unchecked_t num_tx_flow_off_ind;
64889 + atomic_unchecked_t num_tx_flow_on_ind;
64890 + atomic_unchecked_t num_rx_flow_off;
64891 + atomic_unchecked_t num_rx_flow_on;
64892 };
64893 static struct debug_fs_counter cnt;
64894 #define dbfs_atomic_inc(v) atomic_inc_return(v)
64895 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
64896 #define dbfs_atomic_dec(v) atomic_dec_return(v)
64897 #else
64898 #define dbfs_atomic_inc(v) 0
64899 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
64900 atomic_read(&cf_sk->sk.sk_rmem_alloc),
64901 sk_rcvbuf_lowwater(cf_sk));
64902 set_rx_flow_off(cf_sk);
64903 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
64904 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64905 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64906 }
64907
64908 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
64909 set_rx_flow_off(cf_sk);
64910 if (net_ratelimit())
64911 pr_debug("sending flow OFF due to rmem_schedule\n");
64912 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
64913 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64914 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64915 }
64916 skb->dev = NULL;
64917 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
64918 switch (flow) {
64919 case CAIF_CTRLCMD_FLOW_ON_IND:
64920 /* OK from modem to start sending again */
64921 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
64922 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
64923 set_tx_flow_on(cf_sk);
64924 cf_sk->sk.sk_state_change(&cf_sk->sk);
64925 break;
64926
64927 case CAIF_CTRLCMD_FLOW_OFF_IND:
64928 /* Modem asks us to shut up */
64929 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
64930 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
64931 set_tx_flow_off(cf_sk);
64932 cf_sk->sk.sk_state_change(&cf_sk->sk);
64933 break;
64934 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
64935 /* We're now connected */
64936 caif_client_register_refcnt(&cf_sk->layer,
64937 cfsk_hold, cfsk_put);
64938 - dbfs_atomic_inc(&cnt.num_connect_resp);
64939 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
64940 cf_sk->sk.sk_state = CAIF_CONNECTED;
64941 set_tx_flow_on(cf_sk);
64942 cf_sk->sk.sk_state_change(&cf_sk->sk);
64943 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
64944
64945 case CAIF_CTRLCMD_INIT_FAIL_RSP:
64946 /* Connect request failed */
64947 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
64948 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
64949 cf_sk->sk.sk_err = ECONNREFUSED;
64950 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
64951 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
64952 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
64953
64954 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
64955 /* Modem has closed this connection, or device is down. */
64956 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
64957 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
64958 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
64959 cf_sk->sk.sk_err = ECONNRESET;
64960 set_rx_flow_on(cf_sk);
64961 @@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
64962 return;
64963
64964 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
64965 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
64966 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
64967 set_rx_flow_on(cf_sk);
64968 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
64969 }
64970 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
64971 /*ifindex = id of the interface.*/
64972 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
64973
64974 - dbfs_atomic_inc(&cnt.num_connect_req);
64975 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
64976 cf_sk->layer.receive = caif_sktrecv_cb;
64977
64978 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
64979 @@ -943,7 +944,7 @@ static int caif_release(struct socket *s
64980 spin_unlock_bh(&sk->sk_receive_queue.lock);
64981 sock->sk = NULL;
64982
64983 - dbfs_atomic_inc(&cnt.num_disconnect);
64984 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
64985
64986 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
64987 if (cf_sk->debugfs_socket_dir != NULL)
64988 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
64989 cf_sk->conn_req.protocol = protocol;
64990 /* Increase the number of sockets created. */
64991 dbfs_atomic_inc(&cnt.caif_nr_socks);
64992 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
64993 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
64994 #ifdef CONFIG_DEBUG_FS
64995 if (!IS_ERR(debugfsdir)) {
64996
64997 diff -urNp linux-3.0.4/net/caif/cfctrl.c linux-3.0.4/net/caif/cfctrl.c
64998 --- linux-3.0.4/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
64999 +++ linux-3.0.4/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
65000 @@ -9,6 +9,7 @@
65001 #include <linux/stddef.h>
65002 #include <linux/spinlock.h>
65003 #include <linux/slab.h>
65004 +#include <linux/sched.h>
65005 #include <net/caif/caif_layer.h>
65006 #include <net/caif/cfpkt.h>
65007 #include <net/caif/cfctrl.h>
65008 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
65009 dev_info.id = 0xff;
65010 memset(this, 0, sizeof(*this));
65011 cfsrvl_init(&this->serv, 0, &dev_info, false);
65012 - atomic_set(&this->req_seq_no, 1);
65013 - atomic_set(&this->rsp_seq_no, 1);
65014 + atomic_set_unchecked(&this->req_seq_no, 1);
65015 + atomic_set_unchecked(&this->rsp_seq_no, 1);
65016 this->serv.layer.receive = cfctrl_recv;
65017 sprintf(this->serv.layer.name, "ctrl");
65018 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65019 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
65020 struct cfctrl_request_info *req)
65021 {
65022 spin_lock_bh(&ctrl->info_list_lock);
65023 - atomic_inc(&ctrl->req_seq_no);
65024 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
65025 + atomic_inc_unchecked(&ctrl->req_seq_no);
65026 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65027 list_add_tail(&req->list, &ctrl->list);
65028 spin_unlock_bh(&ctrl->info_list_lock);
65029 }
65030 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
65031 if (p != first)
65032 pr_warn("Requests are not received in order\n");
65033
65034 - atomic_set(&ctrl->rsp_seq_no,
65035 + atomic_set_unchecked(&ctrl->rsp_seq_no,
65036 p->sequence_no);
65037 list_del(&p->list);
65038 goto out;
65039 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
65040 struct cfctrl *cfctrl = container_obj(layer);
65041 struct cfctrl_request_info rsp, *req;
65042
65043 + pax_track_stack();
65044
65045 cfpkt_extr_head(pkt, &cmdrsp, 1);
65046 cmd = cmdrsp & CFCTRL_CMD_MASK;
65047 diff -urNp linux-3.0.4/net/core/datagram.c linux-3.0.4/net/core/datagram.c
65048 --- linux-3.0.4/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
65049 +++ linux-3.0.4/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
65050 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65051 }
65052
65053 kfree_skb(skb);
65054 - atomic_inc(&sk->sk_drops);
65055 + atomic_inc_unchecked(&sk->sk_drops);
65056 sk_mem_reclaim_partial(sk);
65057
65058 return err;
65059 diff -urNp linux-3.0.4/net/core/dev.c linux-3.0.4/net/core/dev.c
65060 --- linux-3.0.4/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
65061 +++ linux-3.0.4/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
65062 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65063 if (no_module && capable(CAP_NET_ADMIN))
65064 no_module = request_module("netdev-%s", name);
65065 if (no_module && capable(CAP_SYS_MODULE)) {
65066 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65067 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
65068 +#else
65069 if (!request_module("%s", name))
65070 pr_err("Loading kernel module for a network device "
65071 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65072 "instead\n", name);
65073 +#endif
65074 }
65075 }
65076 EXPORT_SYMBOL(dev_load);
65077 @@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
65078
65079 struct dev_gso_cb {
65080 void (*destructor)(struct sk_buff *skb);
65081 -};
65082 +} __no_const;
65083
65084 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65085
65086 @@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
65087 }
65088 EXPORT_SYMBOL(netif_rx_ni);
65089
65090 -static void net_tx_action(struct softirq_action *h)
65091 +static void net_tx_action(void)
65092 {
65093 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65094
65095 @@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
65096 }
65097 EXPORT_SYMBOL(netif_napi_del);
65098
65099 -static void net_rx_action(struct softirq_action *h)
65100 +static void net_rx_action(void)
65101 {
65102 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65103 unsigned long time_limit = jiffies + 2;
65104 diff -urNp linux-3.0.4/net/core/flow.c linux-3.0.4/net/core/flow.c
65105 --- linux-3.0.4/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
65106 +++ linux-3.0.4/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
65107 @@ -60,7 +60,7 @@ struct flow_cache {
65108 struct timer_list rnd_timer;
65109 };
65110
65111 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
65112 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65113 EXPORT_SYMBOL(flow_cache_genid);
65114 static struct flow_cache flow_cache_global;
65115 static struct kmem_cache *flow_cachep __read_mostly;
65116 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65117
65118 static int flow_entry_valid(struct flow_cache_entry *fle)
65119 {
65120 - if (atomic_read(&flow_cache_genid) != fle->genid)
65121 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65122 return 0;
65123 if (fle->object && !fle->object->ops->check(fle->object))
65124 return 0;
65125 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65126 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65127 fcp->hash_count++;
65128 }
65129 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65130 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65131 flo = fle->object;
65132 if (!flo)
65133 goto ret_object;
65134 @@ -274,7 +274,7 @@ nocache:
65135 }
65136 flo = resolver(net, key, family, dir, flo, ctx);
65137 if (fle) {
65138 - fle->genid = atomic_read(&flow_cache_genid);
65139 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
65140 if (!IS_ERR(flo))
65141 fle->object = flo;
65142 else
65143 diff -urNp linux-3.0.4/net/core/rtnetlink.c linux-3.0.4/net/core/rtnetlink.c
65144 --- linux-3.0.4/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
65145 +++ linux-3.0.4/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
65146 @@ -56,7 +56,7 @@
65147 struct rtnl_link {
65148 rtnl_doit_func doit;
65149 rtnl_dumpit_func dumpit;
65150 -};
65151 +} __no_const;
65152
65153 static DEFINE_MUTEX(rtnl_mutex);
65154
65155 diff -urNp linux-3.0.4/net/core/skbuff.c linux-3.0.4/net/core/skbuff.c
65156 --- linux-3.0.4/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
65157 +++ linux-3.0.4/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
65158 @@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
65159 struct sock *sk = skb->sk;
65160 int ret = 0;
65161
65162 + pax_track_stack();
65163 +
65164 if (splice_grow_spd(pipe, &spd))
65165 return -ENOMEM;
65166
65167 diff -urNp linux-3.0.4/net/core/sock.c linux-3.0.4/net/core/sock.c
65168 --- linux-3.0.4/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
65169 +++ linux-3.0.4/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
65170 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65171 */
65172 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65173 (unsigned)sk->sk_rcvbuf) {
65174 - atomic_inc(&sk->sk_drops);
65175 + atomic_inc_unchecked(&sk->sk_drops);
65176 return -ENOMEM;
65177 }
65178
65179 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65180 return err;
65181
65182 if (!sk_rmem_schedule(sk, skb->truesize)) {
65183 - atomic_inc(&sk->sk_drops);
65184 + atomic_inc_unchecked(&sk->sk_drops);
65185 return -ENOBUFS;
65186 }
65187
65188 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65189 skb_dst_force(skb);
65190
65191 spin_lock_irqsave(&list->lock, flags);
65192 - skb->dropcount = atomic_read(&sk->sk_drops);
65193 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65194 __skb_queue_tail(list, skb);
65195 spin_unlock_irqrestore(&list->lock, flags);
65196
65197 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65198 skb->dev = NULL;
65199
65200 if (sk_rcvqueues_full(sk, skb)) {
65201 - atomic_inc(&sk->sk_drops);
65202 + atomic_inc_unchecked(&sk->sk_drops);
65203 goto discard_and_relse;
65204 }
65205 if (nested)
65206 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65207 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65208 } else if (sk_add_backlog(sk, skb)) {
65209 bh_unlock_sock(sk);
65210 - atomic_inc(&sk->sk_drops);
65211 + atomic_inc_unchecked(&sk->sk_drops);
65212 goto discard_and_relse;
65213 }
65214
65215 @@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
65216 if (len > sizeof(peercred))
65217 len = sizeof(peercred);
65218 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
65219 - if (copy_to_user(optval, &peercred, len))
65220 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
65221 return -EFAULT;
65222 goto lenout;
65223 }
65224 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65225 return -ENOTCONN;
65226 if (lv < len)
65227 return -EINVAL;
65228 - if (copy_to_user(optval, address, len))
65229 + if (len > sizeof(address) || copy_to_user(optval, address, len))
65230 return -EFAULT;
65231 goto lenout;
65232 }
65233 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65234
65235 if (len > lv)
65236 len = lv;
65237 - if (copy_to_user(optval, &v, len))
65238 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
65239 return -EFAULT;
65240 lenout:
65241 if (put_user(len, optlen))
65242 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65243 */
65244 smp_wmb();
65245 atomic_set(&sk->sk_refcnt, 1);
65246 - atomic_set(&sk->sk_drops, 0);
65247 + atomic_set_unchecked(&sk->sk_drops, 0);
65248 }
65249 EXPORT_SYMBOL(sock_init_data);
65250
65251 diff -urNp linux-3.0.4/net/decnet/sysctl_net_decnet.c linux-3.0.4/net/decnet/sysctl_net_decnet.c
65252 --- linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
65253 +++ linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
65254 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65255
65256 if (len > *lenp) len = *lenp;
65257
65258 - if (copy_to_user(buffer, addr, len))
65259 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
65260 return -EFAULT;
65261
65262 *lenp = len;
65263 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65264
65265 if (len > *lenp) len = *lenp;
65266
65267 - if (copy_to_user(buffer, devname, len))
65268 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
65269 return -EFAULT;
65270
65271 *lenp = len;
65272 diff -urNp linux-3.0.4/net/econet/Kconfig linux-3.0.4/net/econet/Kconfig
65273 --- linux-3.0.4/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
65274 +++ linux-3.0.4/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
65275 @@ -4,7 +4,7 @@
65276
65277 config ECONET
65278 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65279 - depends on EXPERIMENTAL && INET
65280 + depends on EXPERIMENTAL && INET && BROKEN
65281 ---help---
65282 Econet is a fairly old and slow networking protocol mainly used by
65283 Acorn computers to access file and print servers. It uses native
65284 diff -urNp linux-3.0.4/net/ipv4/fib_frontend.c linux-3.0.4/net/ipv4/fib_frontend.c
65285 --- linux-3.0.4/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
65286 +++ linux-3.0.4/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
65287 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
65288 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65289 fib_sync_up(dev);
65290 #endif
65291 - atomic_inc(&net->ipv4.dev_addr_genid);
65292 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65293 rt_cache_flush(dev_net(dev), -1);
65294 break;
65295 case NETDEV_DOWN:
65296 fib_del_ifaddr(ifa, NULL);
65297 - atomic_inc(&net->ipv4.dev_addr_genid);
65298 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65299 if (ifa->ifa_dev->ifa_list == NULL) {
65300 /* Last address was deleted from this interface.
65301 * Disable IP.
65302 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
65303 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65304 fib_sync_up(dev);
65305 #endif
65306 - atomic_inc(&net->ipv4.dev_addr_genid);
65307 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65308 rt_cache_flush(dev_net(dev), -1);
65309 break;
65310 case NETDEV_DOWN:
65311 diff -urNp linux-3.0.4/net/ipv4/fib_semantics.c linux-3.0.4/net/ipv4/fib_semantics.c
65312 --- linux-3.0.4/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
65313 +++ linux-3.0.4/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
65314 @@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
65315 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65316 nh->nh_gw,
65317 nh->nh_parent->fib_scope);
65318 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65319 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65320
65321 return nh->nh_saddr;
65322 }
65323 diff -urNp linux-3.0.4/net/ipv4/inet_diag.c linux-3.0.4/net/ipv4/inet_diag.c
65324 --- linux-3.0.4/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
65325 +++ linux-3.0.4/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
65326 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65327 r->idiag_retrans = 0;
65328
65329 r->id.idiag_if = sk->sk_bound_dev_if;
65330 +
65331 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65332 + r->id.idiag_cookie[0] = 0;
65333 + r->id.idiag_cookie[1] = 0;
65334 +#else
65335 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65336 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65337 +#endif
65338
65339 r->id.idiag_sport = inet->inet_sport;
65340 r->id.idiag_dport = inet->inet_dport;
65341 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65342 r->idiag_family = tw->tw_family;
65343 r->idiag_retrans = 0;
65344 r->id.idiag_if = tw->tw_bound_dev_if;
65345 +
65346 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65347 + r->id.idiag_cookie[0] = 0;
65348 + r->id.idiag_cookie[1] = 0;
65349 +#else
65350 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65351 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65352 +#endif
65353 +
65354 r->id.idiag_sport = tw->tw_sport;
65355 r->id.idiag_dport = tw->tw_dport;
65356 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65357 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65358 if (sk == NULL)
65359 goto unlock;
65360
65361 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65362 err = -ESTALE;
65363 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65364 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65365 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65366 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65367 goto out;
65368 +#endif
65369
65370 err = -ENOMEM;
65371 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65372 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65373 r->idiag_retrans = req->retrans;
65374
65375 r->id.idiag_if = sk->sk_bound_dev_if;
65376 +
65377 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65378 + r->id.idiag_cookie[0] = 0;
65379 + r->id.idiag_cookie[1] = 0;
65380 +#else
65381 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65382 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65383 +#endif
65384
65385 tmo = req->expires - jiffies;
65386 if (tmo < 0)
65387 diff -urNp linux-3.0.4/net/ipv4/inet_hashtables.c linux-3.0.4/net/ipv4/inet_hashtables.c
65388 --- linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:44:40.000000000 -0400
65389 +++ linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
65390 @@ -18,12 +18,15 @@
65391 #include <linux/sched.h>
65392 #include <linux/slab.h>
65393 #include <linux/wait.h>
65394 +#include <linux/security.h>
65395
65396 #include <net/inet_connection_sock.h>
65397 #include <net/inet_hashtables.h>
65398 #include <net/secure_seq.h>
65399 #include <net/ip.h>
65400
65401 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65402 +
65403 /*
65404 * Allocate and initialize a new local port bind bucket.
65405 * The bindhash mutex for snum's hash chain must be held here.
65406 @@ -530,6 +533,8 @@ ok:
65407 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65408 spin_unlock(&head->lock);
65409
65410 + gr_update_task_in_ip_table(current, inet_sk(sk));
65411 +
65412 if (tw) {
65413 inet_twsk_deschedule(tw, death_row);
65414 while (twrefcnt) {
65415 diff -urNp linux-3.0.4/net/ipv4/inetpeer.c linux-3.0.4/net/ipv4/inetpeer.c
65416 --- linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:44:40.000000000 -0400
65417 +++ linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
65418 @@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
65419 unsigned int sequence;
65420 int invalidated, newrefcnt = 0;
65421
65422 + pax_track_stack();
65423 +
65424 /* Look up for the address quickly, lockless.
65425 * Because of a concurrent writer, we might not find an existing entry.
65426 */
65427 @@ -517,8 +519,8 @@ found: /* The existing node has been fo
65428 if (p) {
65429 p->daddr = *daddr;
65430 atomic_set(&p->refcnt, 1);
65431 - atomic_set(&p->rid, 0);
65432 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65433 + atomic_set_unchecked(&p->rid, 0);
65434 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65435 p->tcp_ts_stamp = 0;
65436 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65437 p->rate_tokens = 0;
65438 diff -urNp linux-3.0.4/net/ipv4/ip_fragment.c linux-3.0.4/net/ipv4/ip_fragment.c
65439 --- linux-3.0.4/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
65440 +++ linux-3.0.4/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
65441 @@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
65442 return 0;
65443
65444 start = qp->rid;
65445 - end = atomic_inc_return(&peer->rid);
65446 + end = atomic_inc_return_unchecked(&peer->rid);
65447 qp->rid = end;
65448
65449 rc = qp->q.fragments && (end - start) > max;
65450 diff -urNp linux-3.0.4/net/ipv4/ip_sockglue.c linux-3.0.4/net/ipv4/ip_sockglue.c
65451 --- linux-3.0.4/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65452 +++ linux-3.0.4/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65453 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
65454 int val;
65455 int len;
65456
65457 + pax_track_stack();
65458 +
65459 if (level != SOL_IP)
65460 return -EOPNOTSUPP;
65461
65462 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
65463 len = min_t(unsigned int, len, opt->optlen);
65464 if (put_user(len, optlen))
65465 return -EFAULT;
65466 - if (copy_to_user(optval, opt->__data, len))
65467 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
65468 + copy_to_user(optval, opt->__data, len))
65469 return -EFAULT;
65470 return 0;
65471 }
65472 diff -urNp linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
65473 --- linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
65474 +++ linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
65475 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65476
65477 *len = 0;
65478
65479 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65480 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65481 if (*octets == NULL) {
65482 if (net_ratelimit())
65483 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65484 diff -urNp linux-3.0.4/net/ipv4/ping.c linux-3.0.4/net/ipv4/ping.c
65485 --- linux-3.0.4/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
65486 +++ linux-3.0.4/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
65487 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
65488 sk_rmem_alloc_get(sp),
65489 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65490 atomic_read(&sp->sk_refcnt), sp,
65491 - atomic_read(&sp->sk_drops), len);
65492 + atomic_read_unchecked(&sp->sk_drops), len);
65493 }
65494
65495 static int ping_seq_show(struct seq_file *seq, void *v)
65496 diff -urNp linux-3.0.4/net/ipv4/raw.c linux-3.0.4/net/ipv4/raw.c
65497 --- linux-3.0.4/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
65498 +++ linux-3.0.4/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
65499 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65500 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65501 {
65502 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65503 - atomic_inc(&sk->sk_drops);
65504 + atomic_inc_unchecked(&sk->sk_drops);
65505 kfree_skb(skb);
65506 return NET_RX_DROP;
65507 }
65508 @@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
65509
65510 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65511 {
65512 + struct icmp_filter filter;
65513 +
65514 if (optlen > sizeof(struct icmp_filter))
65515 optlen = sizeof(struct icmp_filter);
65516 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65517 + if (copy_from_user(&filter, optval, optlen))
65518 return -EFAULT;
65519 + raw_sk(sk)->filter = filter;
65520 return 0;
65521 }
65522
65523 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65524 {
65525 int len, ret = -EFAULT;
65526 + struct icmp_filter filter;
65527
65528 if (get_user(len, optlen))
65529 goto out;
65530 @@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
65531 if (len > sizeof(struct icmp_filter))
65532 len = sizeof(struct icmp_filter);
65533 ret = -EFAULT;
65534 - if (put_user(len, optlen) ||
65535 - copy_to_user(optval, &raw_sk(sk)->filter, len))
65536 + filter = raw_sk(sk)->filter;
65537 + if (put_user(len, optlen) || len > sizeof filter ||
65538 + copy_to_user(optval, &filter, len))
65539 goto out;
65540 ret = 0;
65541 out: return ret;
65542 @@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
65543 sk_wmem_alloc_get(sp),
65544 sk_rmem_alloc_get(sp),
65545 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65546 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65547 + atomic_read(&sp->sk_refcnt),
65548 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65549 + NULL,
65550 +#else
65551 + sp,
65552 +#endif
65553 + atomic_read_unchecked(&sp->sk_drops));
65554 }
65555
65556 static int raw_seq_show(struct seq_file *seq, void *v)
65557 diff -urNp linux-3.0.4/net/ipv4/route.c linux-3.0.4/net/ipv4/route.c
65558 --- linux-3.0.4/net/ipv4/route.c 2011-08-23 21:44:40.000000000 -0400
65559 +++ linux-3.0.4/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
65560 @@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
65561
65562 static inline int rt_genid(struct net *net)
65563 {
65564 - return atomic_read(&net->ipv4.rt_genid);
65565 + return atomic_read_unchecked(&net->ipv4.rt_genid);
65566 }
65567
65568 #ifdef CONFIG_PROC_FS
65569 @@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
65570 unsigned char shuffle;
65571
65572 get_random_bytes(&shuffle, sizeof(shuffle));
65573 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65574 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65575 }
65576
65577 /*
65578 @@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
65579 error = rt->dst.error;
65580 if (peer) {
65581 inet_peer_refcheck(rt->peer);
65582 - id = atomic_read(&peer->ip_id_count) & 0xffff;
65583 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
65584 if (peer->tcp_ts_stamp) {
65585 ts = peer->tcp_ts;
65586 tsage = get_seconds() - peer->tcp_ts_stamp;
65587 diff -urNp linux-3.0.4/net/ipv4/tcp.c linux-3.0.4/net/ipv4/tcp.c
65588 --- linux-3.0.4/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
65589 +++ linux-3.0.4/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
65590 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
65591 int val;
65592 int err = 0;
65593
65594 + pax_track_stack();
65595 +
65596 /* These are data/string values, all the others are ints */
65597 switch (optname) {
65598 case TCP_CONGESTION: {
65599 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
65600 struct tcp_sock *tp = tcp_sk(sk);
65601 int val, len;
65602
65603 + pax_track_stack();
65604 +
65605 if (get_user(len, optlen))
65606 return -EFAULT;
65607
65608 diff -urNp linux-3.0.4/net/ipv4/tcp_ipv4.c linux-3.0.4/net/ipv4/tcp_ipv4.c
65609 --- linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:44:40.000000000 -0400
65610 +++ linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
65611 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
65612 int sysctl_tcp_low_latency __read_mostly;
65613 EXPORT_SYMBOL(sysctl_tcp_low_latency);
65614
65615 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65616 +extern int grsec_enable_blackhole;
65617 +#endif
65618
65619 #ifdef CONFIG_TCP_MD5SIG
65620 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
65621 @@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
65622 return 0;
65623
65624 reset:
65625 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65626 + if (!grsec_enable_blackhole)
65627 +#endif
65628 tcp_v4_send_reset(rsk, skb);
65629 discard:
65630 kfree_skb(skb);
65631 @@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
65632 TCP_SKB_CB(skb)->sacked = 0;
65633
65634 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
65635 - if (!sk)
65636 + if (!sk) {
65637 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65638 + ret = 1;
65639 +#endif
65640 goto no_tcp_socket;
65641 -
65642 + }
65643 process:
65644 - if (sk->sk_state == TCP_TIME_WAIT)
65645 + if (sk->sk_state == TCP_TIME_WAIT) {
65646 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65647 + ret = 2;
65648 +#endif
65649 goto do_time_wait;
65650 + }
65651
65652 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
65653 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
65654 @@ -1724,6 +1737,10 @@ no_tcp_socket:
65655 bad_packet:
65656 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
65657 } else {
65658 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65659 + if (!grsec_enable_blackhole || (ret == 1 &&
65660 + (skb->dev->flags & IFF_LOOPBACK)))
65661 +#endif
65662 tcp_v4_send_reset(NULL, skb);
65663 }
65664
65665 @@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
65666 0, /* non standard timer */
65667 0, /* open_requests have no inode */
65668 atomic_read(&sk->sk_refcnt),
65669 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65670 + NULL,
65671 +#else
65672 req,
65673 +#endif
65674 len);
65675 }
65676
65677 @@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
65678 sock_i_uid(sk),
65679 icsk->icsk_probes_out,
65680 sock_i_ino(sk),
65681 - atomic_read(&sk->sk_refcnt), sk,
65682 + atomic_read(&sk->sk_refcnt),
65683 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65684 + NULL,
65685 +#else
65686 + sk,
65687 +#endif
65688 jiffies_to_clock_t(icsk->icsk_rto),
65689 jiffies_to_clock_t(icsk->icsk_ack.ato),
65690 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
65691 @@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
65692 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
65693 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
65694 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
65695 - atomic_read(&tw->tw_refcnt), tw, len);
65696 + atomic_read(&tw->tw_refcnt),
65697 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65698 + NULL,
65699 +#else
65700 + tw,
65701 +#endif
65702 + len);
65703 }
65704
65705 #define TMPSZ 150
65706 diff -urNp linux-3.0.4/net/ipv4/tcp_minisocks.c linux-3.0.4/net/ipv4/tcp_minisocks.c
65707 --- linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
65708 +++ linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
65709 @@ -27,6 +27,10 @@
65710 #include <net/inet_common.h>
65711 #include <net/xfrm.h>
65712
65713 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65714 +extern int grsec_enable_blackhole;
65715 +#endif
65716 +
65717 int sysctl_tcp_syncookies __read_mostly = 1;
65718 EXPORT_SYMBOL(sysctl_tcp_syncookies);
65719
65720 @@ -745,6 +749,10 @@ listen_overflow:
65721
65722 embryonic_reset:
65723 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
65724 +
65725 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65726 + if (!grsec_enable_blackhole)
65727 +#endif
65728 if (!(flg & TCP_FLAG_RST))
65729 req->rsk_ops->send_reset(sk, skb);
65730
65731 diff -urNp linux-3.0.4/net/ipv4/tcp_output.c linux-3.0.4/net/ipv4/tcp_output.c
65732 --- linux-3.0.4/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
65733 +++ linux-3.0.4/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
65734 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
65735 int mss;
65736 int s_data_desired = 0;
65737
65738 + pax_track_stack();
65739 +
65740 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
65741 s_data_desired = cvp->s_data_desired;
65742 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
65743 diff -urNp linux-3.0.4/net/ipv4/tcp_probe.c linux-3.0.4/net/ipv4/tcp_probe.c
65744 --- linux-3.0.4/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
65745 +++ linux-3.0.4/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
65746 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
65747 if (cnt + width >= len)
65748 break;
65749
65750 - if (copy_to_user(buf + cnt, tbuf, width))
65751 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
65752 return -EFAULT;
65753 cnt += width;
65754 }
65755 diff -urNp linux-3.0.4/net/ipv4/tcp_timer.c linux-3.0.4/net/ipv4/tcp_timer.c
65756 --- linux-3.0.4/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
65757 +++ linux-3.0.4/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
65758 @@ -22,6 +22,10 @@
65759 #include <linux/gfp.h>
65760 #include <net/tcp.h>
65761
65762 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65763 +extern int grsec_lastack_retries;
65764 +#endif
65765 +
65766 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
65767 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
65768 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
65769 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
65770 }
65771 }
65772
65773 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65774 + if ((sk->sk_state == TCP_LAST_ACK) &&
65775 + (grsec_lastack_retries > 0) &&
65776 + (grsec_lastack_retries < retry_until))
65777 + retry_until = grsec_lastack_retries;
65778 +#endif
65779 +
65780 if (retransmits_timed_out(sk, retry_until,
65781 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
65782 /* Has it gone just too far? */
65783 diff -urNp linux-3.0.4/net/ipv4/udp.c linux-3.0.4/net/ipv4/udp.c
65784 --- linux-3.0.4/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
65785 +++ linux-3.0.4/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
65786 @@ -86,6 +86,7 @@
65787 #include <linux/types.h>
65788 #include <linux/fcntl.h>
65789 #include <linux/module.h>
65790 +#include <linux/security.h>
65791 #include <linux/socket.h>
65792 #include <linux/sockios.h>
65793 #include <linux/igmp.h>
65794 @@ -107,6 +108,10 @@
65795 #include <net/xfrm.h>
65796 #include "udp_impl.h"
65797
65798 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65799 +extern int grsec_enable_blackhole;
65800 +#endif
65801 +
65802 struct udp_table udp_table __read_mostly;
65803 EXPORT_SYMBOL(udp_table);
65804
65805 @@ -564,6 +569,9 @@ found:
65806 return s;
65807 }
65808
65809 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
65810 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
65811 +
65812 /*
65813 * This routine is called by the ICMP module when it gets some
65814 * sort of error condition. If err < 0 then the socket should
65815 @@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
65816 dport = usin->sin_port;
65817 if (dport == 0)
65818 return -EINVAL;
65819 +
65820 + err = gr_search_udp_sendmsg(sk, usin);
65821 + if (err)
65822 + return err;
65823 } else {
65824 if (sk->sk_state != TCP_ESTABLISHED)
65825 return -EDESTADDRREQ;
65826 +
65827 + err = gr_search_udp_sendmsg(sk, NULL);
65828 + if (err)
65829 + return err;
65830 +
65831 daddr = inet->inet_daddr;
65832 dport = inet->inet_dport;
65833 /* Open fast path for connected socket.
65834 @@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
65835 udp_lib_checksum_complete(skb)) {
65836 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65837 IS_UDPLITE(sk));
65838 - atomic_inc(&sk->sk_drops);
65839 + atomic_inc_unchecked(&sk->sk_drops);
65840 __skb_unlink(skb, rcvq);
65841 __skb_queue_tail(&list_kill, skb);
65842 }
65843 @@ -1184,6 +1201,10 @@ try_again:
65844 if (!skb)
65845 goto out;
65846
65847 + err = gr_search_udp_recvmsg(sk, skb);
65848 + if (err)
65849 + goto out_free;
65850 +
65851 ulen = skb->len - sizeof(struct udphdr);
65852 if (len > ulen)
65853 len = ulen;
65854 @@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
65855
65856 drop:
65857 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
65858 - atomic_inc(&sk->sk_drops);
65859 + atomic_inc_unchecked(&sk->sk_drops);
65860 kfree_skb(skb);
65861 return -1;
65862 }
65863 @@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
65864 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
65865
65866 if (!skb1) {
65867 - atomic_inc(&sk->sk_drops);
65868 + atomic_inc_unchecked(&sk->sk_drops);
65869 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
65870 IS_UDPLITE(sk));
65871 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65872 @@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
65873 goto csum_error;
65874
65875 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
65876 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65877 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
65878 +#endif
65879 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
65880
65881 /*
65882 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
65883 sk_wmem_alloc_get(sp),
65884 sk_rmem_alloc_get(sp),
65885 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65886 - atomic_read(&sp->sk_refcnt), sp,
65887 - atomic_read(&sp->sk_drops), len);
65888 + atomic_read(&sp->sk_refcnt),
65889 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65890 + NULL,
65891 +#else
65892 + sp,
65893 +#endif
65894 + atomic_read_unchecked(&sp->sk_drops), len);
65895 }
65896
65897 int udp4_seq_show(struct seq_file *seq, void *v)
65898 diff -urNp linux-3.0.4/net/ipv6/inet6_connection_sock.c linux-3.0.4/net/ipv6/inet6_connection_sock.c
65899 --- linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
65900 +++ linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
65901 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
65902 #ifdef CONFIG_XFRM
65903 {
65904 struct rt6_info *rt = (struct rt6_info *)dst;
65905 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
65906 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
65907 }
65908 #endif
65909 }
65910 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
65911 #ifdef CONFIG_XFRM
65912 if (dst) {
65913 struct rt6_info *rt = (struct rt6_info *)dst;
65914 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
65915 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
65916 __sk_dst_reset(sk);
65917 dst = NULL;
65918 }
65919 diff -urNp linux-3.0.4/net/ipv6/ipv6_sockglue.c linux-3.0.4/net/ipv6/ipv6_sockglue.c
65920 --- linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65921 +++ linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65922 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
65923 int val, valbool;
65924 int retv = -ENOPROTOOPT;
65925
65926 + pax_track_stack();
65927 +
65928 if (optval == NULL)
65929 val=0;
65930 else {
65931 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
65932 int len;
65933 int val;
65934
65935 + pax_track_stack();
65936 +
65937 if (ip6_mroute_opt(optname))
65938 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
65939
65940 diff -urNp linux-3.0.4/net/ipv6/raw.c linux-3.0.4/net/ipv6/raw.c
65941 --- linux-3.0.4/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
65942 +++ linux-3.0.4/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
65943 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
65944 {
65945 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
65946 skb_checksum_complete(skb)) {
65947 - atomic_inc(&sk->sk_drops);
65948 + atomic_inc_unchecked(&sk->sk_drops);
65949 kfree_skb(skb);
65950 return NET_RX_DROP;
65951 }
65952 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
65953 struct raw6_sock *rp = raw6_sk(sk);
65954
65955 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
65956 - atomic_inc(&sk->sk_drops);
65957 + atomic_inc_unchecked(&sk->sk_drops);
65958 kfree_skb(skb);
65959 return NET_RX_DROP;
65960 }
65961 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
65962
65963 if (inet->hdrincl) {
65964 if (skb_checksum_complete(skb)) {
65965 - atomic_inc(&sk->sk_drops);
65966 + atomic_inc_unchecked(&sk->sk_drops);
65967 kfree_skb(skb);
65968 return NET_RX_DROP;
65969 }
65970 @@ -601,7 +601,7 @@ out:
65971 return err;
65972 }
65973
65974 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
65975 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
65976 struct flowi6 *fl6, struct dst_entry **dstp,
65977 unsigned int flags)
65978 {
65979 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
65980 u16 proto;
65981 int err;
65982
65983 + pax_track_stack();
65984 +
65985 /* Rough check on arithmetic overflow,
65986 better check is made in ip6_append_data().
65987 */
65988 @@ -909,12 +911,15 @@ do_confirm:
65989 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
65990 char __user *optval, int optlen)
65991 {
65992 + struct icmp6_filter filter;
65993 +
65994 switch (optname) {
65995 case ICMPV6_FILTER:
65996 if (optlen > sizeof(struct icmp6_filter))
65997 optlen = sizeof(struct icmp6_filter);
65998 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
65999 + if (copy_from_user(&filter, optval, optlen))
66000 return -EFAULT;
66001 + raw6_sk(sk)->filter = filter;
66002 return 0;
66003 default:
66004 return -ENOPROTOOPT;
66005 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
66006 char __user *optval, int __user *optlen)
66007 {
66008 int len;
66009 + struct icmp6_filter filter;
66010
66011 switch (optname) {
66012 case ICMPV6_FILTER:
66013 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66014 len = sizeof(struct icmp6_filter);
66015 if (put_user(len, optlen))
66016 return -EFAULT;
66017 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66018 + filter = raw6_sk(sk)->filter;
66019 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
66020 return -EFAULT;
66021 return 0;
66022 default:
66023 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66024 0, 0L, 0,
66025 sock_i_uid(sp), 0,
66026 sock_i_ino(sp),
66027 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66028 + atomic_read(&sp->sk_refcnt),
66029 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66030 + NULL,
66031 +#else
66032 + sp,
66033 +#endif
66034 + atomic_read_unchecked(&sp->sk_drops));
66035 }
66036
66037 static int raw6_seq_show(struct seq_file *seq, void *v)
66038 diff -urNp linux-3.0.4/net/ipv6/tcp_ipv6.c linux-3.0.4/net/ipv6/tcp_ipv6.c
66039 --- linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:44:40.000000000 -0400
66040 +++ linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
66041 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66042 }
66043 #endif
66044
66045 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66046 +extern int grsec_enable_blackhole;
66047 +#endif
66048 +
66049 static void tcp_v6_hash(struct sock *sk)
66050 {
66051 if (sk->sk_state != TCP_CLOSE) {
66052 @@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66053 return 0;
66054
66055 reset:
66056 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66057 + if (!grsec_enable_blackhole)
66058 +#endif
66059 tcp_v6_send_reset(sk, skb);
66060 discard:
66061 if (opt_skb)
66062 @@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66063 TCP_SKB_CB(skb)->sacked = 0;
66064
66065 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66066 - if (!sk)
66067 + if (!sk) {
66068 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66069 + ret = 1;
66070 +#endif
66071 goto no_tcp_socket;
66072 + }
66073
66074 process:
66075 - if (sk->sk_state == TCP_TIME_WAIT)
66076 + if (sk->sk_state == TCP_TIME_WAIT) {
66077 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66078 + ret = 2;
66079 +#endif
66080 goto do_time_wait;
66081 + }
66082
66083 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66084 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66085 @@ -1794,6 +1809,10 @@ no_tcp_socket:
66086 bad_packet:
66087 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66088 } else {
66089 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66090 + if (!grsec_enable_blackhole || (ret == 1 &&
66091 + (skb->dev->flags & IFF_LOOPBACK)))
66092 +#endif
66093 tcp_v6_send_reset(NULL, skb);
66094 }
66095
66096 @@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
66097 uid,
66098 0, /* non standard timer */
66099 0, /* open_requests have no inode */
66100 - 0, req);
66101 + 0,
66102 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66103 + NULL
66104 +#else
66105 + req
66106 +#endif
66107 + );
66108 }
66109
66110 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66111 @@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
66112 sock_i_uid(sp),
66113 icsk->icsk_probes_out,
66114 sock_i_ino(sp),
66115 - atomic_read(&sp->sk_refcnt), sp,
66116 + atomic_read(&sp->sk_refcnt),
66117 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66118 + NULL,
66119 +#else
66120 + sp,
66121 +#endif
66122 jiffies_to_clock_t(icsk->icsk_rto),
66123 jiffies_to_clock_t(icsk->icsk_ack.ato),
66124 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66125 @@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
66126 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66127 tw->tw_substate, 0, 0,
66128 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66129 - atomic_read(&tw->tw_refcnt), tw);
66130 + atomic_read(&tw->tw_refcnt),
66131 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66132 + NULL
66133 +#else
66134 + tw
66135 +#endif
66136 + );
66137 }
66138
66139 static int tcp6_seq_show(struct seq_file *seq, void *v)
66140 diff -urNp linux-3.0.4/net/ipv6/udp.c linux-3.0.4/net/ipv6/udp.c
66141 --- linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:44:40.000000000 -0400
66142 +++ linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
66143 @@ -50,6 +50,10 @@
66144 #include <linux/seq_file.h>
66145 #include "udp_impl.h"
66146
66147 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66148 +extern int grsec_enable_blackhole;
66149 +#endif
66150 +
66151 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66152 {
66153 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66154 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66155
66156 return 0;
66157 drop:
66158 - atomic_inc(&sk->sk_drops);
66159 + atomic_inc_unchecked(&sk->sk_drops);
66160 drop_no_sk_drops_inc:
66161 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66162 kfree_skb(skb);
66163 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66164 continue;
66165 }
66166 drop:
66167 - atomic_inc(&sk->sk_drops);
66168 + atomic_inc_unchecked(&sk->sk_drops);
66169 UDP6_INC_STATS_BH(sock_net(sk),
66170 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66171 UDP6_INC_STATS_BH(sock_net(sk),
66172 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66173 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66174 proto == IPPROTO_UDPLITE);
66175
66176 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66177 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66178 +#endif
66179 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66180
66181 kfree_skb(skb);
66182 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66183 if (!sock_owned_by_user(sk))
66184 udpv6_queue_rcv_skb(sk, skb);
66185 else if (sk_add_backlog(sk, skb)) {
66186 - atomic_inc(&sk->sk_drops);
66187 + atomic_inc_unchecked(&sk->sk_drops);
66188 bh_unlock_sock(sk);
66189 sock_put(sk);
66190 goto discard;
66191 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66192 0, 0L, 0,
66193 sock_i_uid(sp), 0,
66194 sock_i_ino(sp),
66195 - atomic_read(&sp->sk_refcnt), sp,
66196 - atomic_read(&sp->sk_drops));
66197 + atomic_read(&sp->sk_refcnt),
66198 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66199 + NULL,
66200 +#else
66201 + sp,
66202 +#endif
66203 + atomic_read_unchecked(&sp->sk_drops));
66204 }
66205
66206 int udp6_seq_show(struct seq_file *seq, void *v)
66207 diff -urNp linux-3.0.4/net/irda/ircomm/ircomm_tty.c linux-3.0.4/net/irda/ircomm/ircomm_tty.c
66208 --- linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
66209 +++ linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
66210 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
66211 add_wait_queue(&self->open_wait, &wait);
66212
66213 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66214 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66215 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66216
66217 /* As far as I can see, we protect open_count - Jean II */
66218 spin_lock_irqsave(&self->spinlock, flags);
66219 if (!tty_hung_up_p(filp)) {
66220 extra_count = 1;
66221 - self->open_count--;
66222 + local_dec(&self->open_count);
66223 }
66224 spin_unlock_irqrestore(&self->spinlock, flags);
66225 - self->blocked_open++;
66226 + local_inc(&self->blocked_open);
66227
66228 while (1) {
66229 if (tty->termios->c_cflag & CBAUD) {
66230 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
66231 }
66232
66233 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66234 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66235 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66236
66237 schedule();
66238 }
66239 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
66240 if (extra_count) {
66241 /* ++ is not atomic, so this should be protected - Jean II */
66242 spin_lock_irqsave(&self->spinlock, flags);
66243 - self->open_count++;
66244 + local_inc(&self->open_count);
66245 spin_unlock_irqrestore(&self->spinlock, flags);
66246 }
66247 - self->blocked_open--;
66248 + local_dec(&self->blocked_open);
66249
66250 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66251 - __FILE__,__LINE__, tty->driver->name, self->open_count);
66252 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66253
66254 if (!retval)
66255 self->flags |= ASYNC_NORMAL_ACTIVE;
66256 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
66257 }
66258 /* ++ is not atomic, so this should be protected - Jean II */
66259 spin_lock_irqsave(&self->spinlock, flags);
66260 - self->open_count++;
66261 + local_inc(&self->open_count);
66262
66263 tty->driver_data = self;
66264 self->tty = tty;
66265 spin_unlock_irqrestore(&self->spinlock, flags);
66266
66267 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66268 - self->line, self->open_count);
66269 + self->line, local_read(&self->open_count));
66270
66271 /* Not really used by us, but lets do it anyway */
66272 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66273 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
66274 return;
66275 }
66276
66277 - if ((tty->count == 1) && (self->open_count != 1)) {
66278 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66279 /*
66280 * Uh, oh. tty->count is 1, which means that the tty
66281 * structure will be freed. state->count should always
66282 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
66283 */
66284 IRDA_DEBUG(0, "%s(), bad serial port count; "
66285 "tty->count is 1, state->count is %d\n", __func__ ,
66286 - self->open_count);
66287 - self->open_count = 1;
66288 + local_read(&self->open_count));
66289 + local_set(&self->open_count, 1);
66290 }
66291
66292 - if (--self->open_count < 0) {
66293 + if (local_dec_return(&self->open_count) < 0) {
66294 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66295 - __func__, self->line, self->open_count);
66296 - self->open_count = 0;
66297 + __func__, self->line, local_read(&self->open_count));
66298 + local_set(&self->open_count, 0);
66299 }
66300 - if (self->open_count) {
66301 + if (local_read(&self->open_count)) {
66302 spin_unlock_irqrestore(&self->spinlock, flags);
66303
66304 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66305 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
66306 tty->closing = 0;
66307 self->tty = NULL;
66308
66309 - if (self->blocked_open) {
66310 + if (local_read(&self->blocked_open)) {
66311 if (self->close_delay)
66312 schedule_timeout_interruptible(self->close_delay);
66313 wake_up_interruptible(&self->open_wait);
66314 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
66315 spin_lock_irqsave(&self->spinlock, flags);
66316 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66317 self->tty = NULL;
66318 - self->open_count = 0;
66319 + local_set(&self->open_count, 0);
66320 spin_unlock_irqrestore(&self->spinlock, flags);
66321
66322 wake_up_interruptible(&self->open_wait);
66323 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
66324 seq_putc(m, '\n');
66325
66326 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66327 - seq_printf(m, "Open count: %d\n", self->open_count);
66328 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66329 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66330 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66331
66332 diff -urNp linux-3.0.4/net/iucv/af_iucv.c linux-3.0.4/net/iucv/af_iucv.c
66333 --- linux-3.0.4/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
66334 +++ linux-3.0.4/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
66335 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
66336
66337 write_lock_bh(&iucv_sk_list.lock);
66338
66339 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66340 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66341 while (__iucv_get_sock_by_name(name)) {
66342 sprintf(name, "%08x",
66343 - atomic_inc_return(&iucv_sk_list.autobind_name));
66344 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66345 }
66346
66347 write_unlock_bh(&iucv_sk_list.lock);
66348 diff -urNp linux-3.0.4/net/key/af_key.c linux-3.0.4/net/key/af_key.c
66349 --- linux-3.0.4/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
66350 +++ linux-3.0.4/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
66351 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66352 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66353 struct xfrm_kmaddress k;
66354
66355 + pax_track_stack();
66356 +
66357 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66358 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66359 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66360 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66361 static u32 get_acqseq(void)
66362 {
66363 u32 res;
66364 - static atomic_t acqseq;
66365 + static atomic_unchecked_t acqseq;
66366
66367 do {
66368 - res = atomic_inc_return(&acqseq);
66369 + res = atomic_inc_return_unchecked(&acqseq);
66370 } while (!res);
66371 return res;
66372 }
66373 diff -urNp linux-3.0.4/net/lapb/lapb_iface.c linux-3.0.4/net/lapb/lapb_iface.c
66374 --- linux-3.0.4/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
66375 +++ linux-3.0.4/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
66376 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66377 goto out;
66378
66379 lapb->dev = dev;
66380 - lapb->callbacks = *callbacks;
66381 + lapb->callbacks = callbacks;
66382
66383 __lapb_insert_cb(lapb);
66384
66385 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66386
66387 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66388 {
66389 - if (lapb->callbacks.connect_confirmation)
66390 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
66391 + if (lapb->callbacks->connect_confirmation)
66392 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
66393 }
66394
66395 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66396 {
66397 - if (lapb->callbacks.connect_indication)
66398 - lapb->callbacks.connect_indication(lapb->dev, reason);
66399 + if (lapb->callbacks->connect_indication)
66400 + lapb->callbacks->connect_indication(lapb->dev, reason);
66401 }
66402
66403 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66404 {
66405 - if (lapb->callbacks.disconnect_confirmation)
66406 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66407 + if (lapb->callbacks->disconnect_confirmation)
66408 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66409 }
66410
66411 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66412 {
66413 - if (lapb->callbacks.disconnect_indication)
66414 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
66415 + if (lapb->callbacks->disconnect_indication)
66416 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
66417 }
66418
66419 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66420 {
66421 - if (lapb->callbacks.data_indication)
66422 - return lapb->callbacks.data_indication(lapb->dev, skb);
66423 + if (lapb->callbacks->data_indication)
66424 + return lapb->callbacks->data_indication(lapb->dev, skb);
66425
66426 kfree_skb(skb);
66427 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66428 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66429 {
66430 int used = 0;
66431
66432 - if (lapb->callbacks.data_transmit) {
66433 - lapb->callbacks.data_transmit(lapb->dev, skb);
66434 + if (lapb->callbacks->data_transmit) {
66435 + lapb->callbacks->data_transmit(lapb->dev, skb);
66436 used = 1;
66437 }
66438
66439 diff -urNp linux-3.0.4/net/mac80211/debugfs_sta.c linux-3.0.4/net/mac80211/debugfs_sta.c
66440 --- linux-3.0.4/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
66441 +++ linux-3.0.4/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
66442 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
66443 struct tid_ampdu_rx *tid_rx;
66444 struct tid_ampdu_tx *tid_tx;
66445
66446 + pax_track_stack();
66447 +
66448 rcu_read_lock();
66449
66450 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66451 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
66452 struct sta_info *sta = file->private_data;
66453 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66454
66455 + pax_track_stack();
66456 +
66457 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66458 htc->ht_supported ? "" : "not ");
66459 if (htc->ht_supported) {
66460 diff -urNp linux-3.0.4/net/mac80211/ieee80211_i.h linux-3.0.4/net/mac80211/ieee80211_i.h
66461 --- linux-3.0.4/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
66462 +++ linux-3.0.4/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
66463 @@ -27,6 +27,7 @@
66464 #include <net/ieee80211_radiotap.h>
66465 #include <net/cfg80211.h>
66466 #include <net/mac80211.h>
66467 +#include <asm/local.h>
66468 #include "key.h"
66469 #include "sta_info.h"
66470
66471 @@ -721,7 +722,7 @@ struct ieee80211_local {
66472 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66473 spinlock_t queue_stop_reason_lock;
66474
66475 - int open_count;
66476 + local_t open_count;
66477 int monitors, cooked_mntrs;
66478 /* number of interfaces with corresponding FIF_ flags */
66479 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66480 diff -urNp linux-3.0.4/net/mac80211/iface.c linux-3.0.4/net/mac80211/iface.c
66481 --- linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:44:40.000000000 -0400
66482 +++ linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
66483 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66484 break;
66485 }
66486
66487 - if (local->open_count == 0) {
66488 + if (local_read(&local->open_count) == 0) {
66489 res = drv_start(local);
66490 if (res)
66491 goto err_del_bss;
66492 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66493 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66494
66495 if (!is_valid_ether_addr(dev->dev_addr)) {
66496 - if (!local->open_count)
66497 + if (!local_read(&local->open_count))
66498 drv_stop(local);
66499 return -EADDRNOTAVAIL;
66500 }
66501 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66502 mutex_unlock(&local->mtx);
66503
66504 if (coming_up)
66505 - local->open_count++;
66506 + local_inc(&local->open_count);
66507
66508 if (hw_reconf_flags) {
66509 ieee80211_hw_config(local, hw_reconf_flags);
66510 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66511 err_del_interface:
66512 drv_remove_interface(local, &sdata->vif);
66513 err_stop:
66514 - if (!local->open_count)
66515 + if (!local_read(&local->open_count))
66516 drv_stop(local);
66517 err_del_bss:
66518 sdata->bss = NULL;
66519 @@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
66520 }
66521
66522 if (going_down)
66523 - local->open_count--;
66524 + local_dec(&local->open_count);
66525
66526 switch (sdata->vif.type) {
66527 case NL80211_IFTYPE_AP_VLAN:
66528 @@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
66529
66530 ieee80211_recalc_ps(local, -1);
66531
66532 - if (local->open_count == 0) {
66533 + if (local_read(&local->open_count) == 0) {
66534 if (local->ops->napi_poll)
66535 napi_disable(&local->napi);
66536 ieee80211_clear_tx_pending(local);
66537 diff -urNp linux-3.0.4/net/mac80211/main.c linux-3.0.4/net/mac80211/main.c
66538 --- linux-3.0.4/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
66539 +++ linux-3.0.4/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
66540 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
66541 local->hw.conf.power_level = power;
66542 }
66543
66544 - if (changed && local->open_count) {
66545 + if (changed && local_read(&local->open_count)) {
66546 ret = drv_config(local, changed);
66547 /*
66548 * Goal:
66549 diff -urNp linux-3.0.4/net/mac80211/mlme.c linux-3.0.4/net/mac80211/mlme.c
66550 --- linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:44:40.000000000 -0400
66551 +++ linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
66552 @@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
66553 bool have_higher_than_11mbit = false;
66554 u16 ap_ht_cap_flags;
66555
66556 + pax_track_stack();
66557 +
66558 /* AssocResp and ReassocResp have identical structure */
66559
66560 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66561 diff -urNp linux-3.0.4/net/mac80211/pm.c linux-3.0.4/net/mac80211/pm.c
66562 --- linux-3.0.4/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
66563 +++ linux-3.0.4/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
66564 @@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
66565 cancel_work_sync(&local->dynamic_ps_enable_work);
66566 del_timer_sync(&local->dynamic_ps_timer);
66567
66568 - local->wowlan = wowlan && local->open_count;
66569 + local->wowlan = wowlan && local_read(&local->open_count);
66570 if (local->wowlan) {
66571 int err = drv_suspend(local, wowlan);
66572 if (err) {
66573 @@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
66574 }
66575
66576 /* stop hardware - this must stop RX */
66577 - if (local->open_count)
66578 + if (local_read(&local->open_count))
66579 ieee80211_stop_device(local);
66580
66581 suspend:
66582 diff -urNp linux-3.0.4/net/mac80211/rate.c linux-3.0.4/net/mac80211/rate.c
66583 --- linux-3.0.4/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
66584 +++ linux-3.0.4/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
66585 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66586
66587 ASSERT_RTNL();
66588
66589 - if (local->open_count)
66590 + if (local_read(&local->open_count))
66591 return -EBUSY;
66592
66593 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
66594 diff -urNp linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c
66595 --- linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
66596 +++ linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
66597 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
66598
66599 spin_unlock_irqrestore(&events->lock, status);
66600
66601 - if (copy_to_user(buf, pb, p))
66602 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
66603 return -EFAULT;
66604
66605 return p;
66606 diff -urNp linux-3.0.4/net/mac80211/util.c linux-3.0.4/net/mac80211/util.c
66607 --- linux-3.0.4/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
66608 +++ linux-3.0.4/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
66609 @@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
66610 #endif
66611
66612 /* restart hardware */
66613 - if (local->open_count) {
66614 + if (local_read(&local->open_count)) {
66615 /*
66616 * Upon resume hardware can sometimes be goofy due to
66617 * various platform / driver / bus issues, so restarting
66618 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c
66619 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
66620 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
66621 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
66622 /* Increase the refcnt counter of the dest */
66623 atomic_inc(&dest->refcnt);
66624
66625 - conn_flags = atomic_read(&dest->conn_flags);
66626 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
66627 if (cp->protocol != IPPROTO_UDP)
66628 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
66629 /* Bind with the destination and its corresponding transmitter */
66630 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
66631 atomic_set(&cp->refcnt, 1);
66632
66633 atomic_set(&cp->n_control, 0);
66634 - atomic_set(&cp->in_pkts, 0);
66635 + atomic_set_unchecked(&cp->in_pkts, 0);
66636
66637 atomic_inc(&ipvs->conn_count);
66638 if (flags & IP_VS_CONN_F_NO_CPORT)
66639 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
66640
66641 /* Don't drop the entry if its number of incoming packets is not
66642 located in [0, 8] */
66643 - i = atomic_read(&cp->in_pkts);
66644 + i = atomic_read_unchecked(&cp->in_pkts);
66645 if (i > 8 || i < 0) return 0;
66646
66647 if (!todrop_rate[i]) return 0;
66648 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c
66649 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
66650 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
66651 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
66652 ret = cp->packet_xmit(skb, cp, pd->pp);
66653 /* do not touch skb anymore */
66654
66655 - atomic_inc(&cp->in_pkts);
66656 + atomic_inc_unchecked(&cp->in_pkts);
66657 ip_vs_conn_put(cp);
66658 return ret;
66659 }
66660 @@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
66661 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
66662 pkts = sysctl_sync_threshold(ipvs);
66663 else
66664 - pkts = atomic_add_return(1, &cp->in_pkts);
66665 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66666
66667 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
66668 cp->protocol == IPPROTO_SCTP) {
66669 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c
66670 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:44:40.000000000 -0400
66671 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
66672 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
66673 ip_vs_rs_hash(ipvs, dest);
66674 write_unlock_bh(&ipvs->rs_lock);
66675 }
66676 - atomic_set(&dest->conn_flags, conn_flags);
66677 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
66678
66679 /* bind the service */
66680 if (!dest->svc) {
66681 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
66682 " %-7s %-6d %-10d %-10d\n",
66683 &dest->addr.in6,
66684 ntohs(dest->port),
66685 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66686 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66687 atomic_read(&dest->weight),
66688 atomic_read(&dest->activeconns),
66689 atomic_read(&dest->inactconns));
66690 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
66691 "%-7s %-6d %-10d %-10d\n",
66692 ntohl(dest->addr.ip),
66693 ntohs(dest->port),
66694 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66695 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66696 atomic_read(&dest->weight),
66697 atomic_read(&dest->activeconns),
66698 atomic_read(&dest->inactconns));
66699 @@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
66700 struct ip_vs_dest_user *udest_compat;
66701 struct ip_vs_dest_user_kern udest;
66702
66703 + pax_track_stack();
66704 +
66705 if (!capable(CAP_NET_ADMIN))
66706 return -EPERM;
66707
66708 @@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
66709
66710 entry.addr = dest->addr.ip;
66711 entry.port = dest->port;
66712 - entry.conn_flags = atomic_read(&dest->conn_flags);
66713 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
66714 entry.weight = atomic_read(&dest->weight);
66715 entry.u_threshold = dest->u_threshold;
66716 entry.l_threshold = dest->l_threshold;
66717 @@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
66718 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
66719
66720 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
66721 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66722 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66723 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
66724 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
66725 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
66726 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c
66727 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
66728 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
66729 @@ -648,7 +648,7 @@ control:
66730 * i.e only increment in_pkts for Templates.
66731 */
66732 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
66733 - int pkts = atomic_add_return(1, &cp->in_pkts);
66734 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66735
66736 if (pkts % sysctl_sync_period(ipvs) != 1)
66737 return;
66738 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
66739
66740 if (opt)
66741 memcpy(&cp->in_seq, opt, sizeof(*opt));
66742 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66743 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66744 cp->state = state;
66745 cp->old_state = cp->state;
66746 /*
66747 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c
66748 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
66749 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
66750 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
66751 else
66752 rc = NF_ACCEPT;
66753 /* do not touch skb anymore */
66754 - atomic_inc(&cp->in_pkts);
66755 + atomic_inc_unchecked(&cp->in_pkts);
66756 goto out;
66757 }
66758
66759 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
66760 else
66761 rc = NF_ACCEPT;
66762 /* do not touch skb anymore */
66763 - atomic_inc(&cp->in_pkts);
66764 + atomic_inc_unchecked(&cp->in_pkts);
66765 goto out;
66766 }
66767
66768 diff -urNp linux-3.0.4/net/netfilter/Kconfig linux-3.0.4/net/netfilter/Kconfig
66769 --- linux-3.0.4/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
66770 +++ linux-3.0.4/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
66771 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
66772
66773 To compile it as a module, choose M here. If unsure, say N.
66774
66775 +config NETFILTER_XT_MATCH_GRADM
66776 + tristate '"gradm" match support'
66777 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
66778 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
66779 + ---help---
66780 + The gradm match allows to match on grsecurity RBAC being enabled.
66781 + It is useful when iptables rules are applied early on bootup to
66782 + prevent connections to the machine (except from a trusted host)
66783 + while the RBAC system is disabled.
66784 +
66785 config NETFILTER_XT_MATCH_HASHLIMIT
66786 tristate '"hashlimit" match support'
66787 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
66788 diff -urNp linux-3.0.4/net/netfilter/Makefile linux-3.0.4/net/netfilter/Makefile
66789 --- linux-3.0.4/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
66790 +++ linux-3.0.4/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
66791 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
66792 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
66793 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
66794 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
66795 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
66796 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
66797 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
66798 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
66799 diff -urNp linux-3.0.4/net/netfilter/nfnetlink_log.c linux-3.0.4/net/netfilter/nfnetlink_log.c
66800 --- linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
66801 +++ linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
66802 @@ -70,7 +70,7 @@ struct nfulnl_instance {
66803 };
66804
66805 static DEFINE_SPINLOCK(instances_lock);
66806 -static atomic_t global_seq;
66807 +static atomic_unchecked_t global_seq;
66808
66809 #define INSTANCE_BUCKETS 16
66810 static struct hlist_head instance_table[INSTANCE_BUCKETS];
66811 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
66812 /* global sequence number */
66813 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
66814 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
66815 - htonl(atomic_inc_return(&global_seq)));
66816 + htonl(atomic_inc_return_unchecked(&global_seq)));
66817
66818 if (data_len) {
66819 struct nlattr *nla;
66820 diff -urNp linux-3.0.4/net/netfilter/nfnetlink_queue.c linux-3.0.4/net/netfilter/nfnetlink_queue.c
66821 --- linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
66822 +++ linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
66823 @@ -58,7 +58,7 @@ struct nfqnl_instance {
66824 */
66825 spinlock_t lock;
66826 unsigned int queue_total;
66827 - atomic_t id_sequence; /* 'sequence' of pkt ids */
66828 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
66829 struct list_head queue_list; /* packets in queue */
66830 };
66831
66832 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
66833 nfmsg->version = NFNETLINK_V0;
66834 nfmsg->res_id = htons(queue->queue_num);
66835
66836 - entry->id = atomic_inc_return(&queue->id_sequence);
66837 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
66838 pmsg.packet_id = htonl(entry->id);
66839 pmsg.hw_protocol = entskb->protocol;
66840 pmsg.hook = entry->hook;
66841 @@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
66842 inst->peer_pid, inst->queue_total,
66843 inst->copy_mode, inst->copy_range,
66844 inst->queue_dropped, inst->queue_user_dropped,
66845 - atomic_read(&inst->id_sequence), 1);
66846 + atomic_read_unchecked(&inst->id_sequence), 1);
66847 }
66848
66849 static const struct seq_operations nfqnl_seq_ops = {
66850 diff -urNp linux-3.0.4/net/netfilter/xt_gradm.c linux-3.0.4/net/netfilter/xt_gradm.c
66851 --- linux-3.0.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
66852 +++ linux-3.0.4/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
66853 @@ -0,0 +1,51 @@
66854 +/*
66855 + * gradm match for netfilter
66856 + * Copyright © Zbigniew Krzystolik, 2010
66857 + *
66858 + * This program is free software; you can redistribute it and/or modify
66859 + * it under the terms of the GNU General Public License; either version
66860 + * 2 or 3 as published by the Free Software Foundation.
66861 + */
66862 +#include <linux/module.h>
66863 +#include <linux/moduleparam.h>
66864 +#include <linux/skbuff.h>
66865 +#include <linux/netfilter/x_tables.h>
66866 +#include <linux/grsecurity.h>
66867 +#include <linux/netfilter/xt_gradm.h>
66868 +
66869 +static bool
66870 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
66871 +{
66872 + const struct xt_gradm_mtinfo *info = par->matchinfo;
66873 + bool retval = false;
66874 + if (gr_acl_is_enabled())
66875 + retval = true;
66876 + return retval ^ info->invflags;
66877 +}
66878 +
66879 +static struct xt_match gradm_mt_reg __read_mostly = {
66880 + .name = "gradm",
66881 + .revision = 0,
66882 + .family = NFPROTO_UNSPEC,
66883 + .match = gradm_mt,
66884 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
66885 + .me = THIS_MODULE,
66886 +};
66887 +
66888 +static int __init gradm_mt_init(void)
66889 +{
66890 + return xt_register_match(&gradm_mt_reg);
66891 +}
66892 +
66893 +static void __exit gradm_mt_exit(void)
66894 +{
66895 + xt_unregister_match(&gradm_mt_reg);
66896 +}
66897 +
66898 +module_init(gradm_mt_init);
66899 +module_exit(gradm_mt_exit);
66900 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
66901 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
66902 +MODULE_LICENSE("GPL");
66903 +MODULE_ALIAS("ipt_gradm");
66904 +MODULE_ALIAS("ip6t_gradm");
66905 diff -urNp linux-3.0.4/net/netfilter/xt_statistic.c linux-3.0.4/net/netfilter/xt_statistic.c
66906 --- linux-3.0.4/net/netfilter/xt_statistic.c 2011-07-21 22:17:23.000000000 -0400
66907 +++ linux-3.0.4/net/netfilter/xt_statistic.c 2011-08-23 21:47:56.000000000 -0400
66908 @@ -18,7 +18,7 @@
66909 #include <linux/netfilter/x_tables.h>
66910
66911 struct xt_statistic_priv {
66912 - atomic_t count;
66913 + atomic_unchecked_t count;
66914 } ____cacheline_aligned_in_smp;
66915
66916 MODULE_LICENSE("GPL");
66917 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
66918 break;
66919 case XT_STATISTIC_MODE_NTH:
66920 do {
66921 - oval = atomic_read(&info->master->count);
66922 + oval = atomic_read_unchecked(&info->master->count);
66923 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
66924 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
66925 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
66926 if (nval == 0)
66927 ret = !ret;
66928 break;
66929 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
66930 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
66931 if (info->master == NULL)
66932 return -ENOMEM;
66933 - atomic_set(&info->master->count, info->u.nth.count);
66934 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
66935
66936 return 0;
66937 }
66938 diff -urNp linux-3.0.4/net/netlink/af_netlink.c linux-3.0.4/net/netlink/af_netlink.c
66939 --- linux-3.0.4/net/netlink/af_netlink.c 2011-07-21 22:17:23.000000000 -0400
66940 +++ linux-3.0.4/net/netlink/af_netlink.c 2011-08-23 21:47:56.000000000 -0400
66941 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
66942 sk->sk_error_report(sk);
66943 }
66944 }
66945 - atomic_inc(&sk->sk_drops);
66946 + atomic_inc_unchecked(&sk->sk_drops);
66947 }
66948
66949 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
66950 @@ -1994,7 +1994,7 @@ static int netlink_seq_show(struct seq_f
66951 sk_wmem_alloc_get(s),
66952 nlk->cb,
66953 atomic_read(&s->sk_refcnt),
66954 - atomic_read(&s->sk_drops),
66955 + atomic_read_unchecked(&s->sk_drops),
66956 sock_i_ino(s)
66957 );
66958
66959 diff -urNp linux-3.0.4/net/netrom/af_netrom.c linux-3.0.4/net/netrom/af_netrom.c
66960 --- linux-3.0.4/net/netrom/af_netrom.c 2011-07-21 22:17:23.000000000 -0400
66961 +++ linux-3.0.4/net/netrom/af_netrom.c 2011-08-23 21:48:14.000000000 -0400
66962 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *soc
66963 struct sock *sk = sock->sk;
66964 struct nr_sock *nr = nr_sk(sk);
66965
66966 + memset(sax, 0, sizeof(*sax));
66967 lock_sock(sk);
66968 if (peer != 0) {
66969 if (sk->sk_state != TCP_ESTABLISHED) {
66970 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *soc
66971 *uaddr_len = sizeof(struct full_sockaddr_ax25);
66972 } else {
66973 sax->fsa_ax25.sax25_family = AF_NETROM;
66974 - sax->fsa_ax25.sax25_ndigis = 0;
66975 sax->fsa_ax25.sax25_call = nr->source_addr;
66976 *uaddr_len = sizeof(struct sockaddr_ax25);
66977 }
66978 diff -urNp linux-3.0.4/net/packet/af_packet.c linux-3.0.4/net/packet/af_packet.c
66979 --- linux-3.0.4/net/packet/af_packet.c 2011-07-21 22:17:23.000000000 -0400
66980 +++ linux-3.0.4/net/packet/af_packet.c 2011-08-23 21:47:56.000000000 -0400
66981 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
66982
66983 spin_lock(&sk->sk_receive_queue.lock);
66984 po->stats.tp_packets++;
66985 - skb->dropcount = atomic_read(&sk->sk_drops);
66986 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
66987 __skb_queue_tail(&sk->sk_receive_queue, skb);
66988 spin_unlock(&sk->sk_receive_queue.lock);
66989 sk->sk_data_ready(sk, skb->len);
66990 return 0;
66991
66992 drop_n_acct:
66993 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
66994 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
66995
66996 drop_n_restore:
66997 if (skb_head != skb->data && skb_shared(skb)) {
66998 @@ -2168,7 +2168,7 @@ static int packet_getsockopt(struct sock
66999 case PACKET_HDRLEN:
67000 if (len > sizeof(int))
67001 len = sizeof(int);
67002 - if (copy_from_user(&val, optval, len))
67003 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
67004 return -EFAULT;
67005 switch (val) {
67006 case TPACKET_V1:
67007 @@ -2206,7 +2206,7 @@ static int packet_getsockopt(struct sock
67008
67009 if (put_user(len, optlen))
67010 return -EFAULT;
67011 - if (copy_to_user(optval, data, len))
67012 + if (len > sizeof(st) || copy_to_user(optval, data, len))
67013 return -EFAULT;
67014 return 0;
67015 }
67016 diff -urNp linux-3.0.4/net/phonet/af_phonet.c linux-3.0.4/net/phonet/af_phonet.c
67017 --- linux-3.0.4/net/phonet/af_phonet.c 2011-07-21 22:17:23.000000000 -0400
67018 +++ linux-3.0.4/net/phonet/af_phonet.c 2011-08-23 21:48:14.000000000 -0400
67019 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
67020 {
67021 struct phonet_protocol *pp;
67022
67023 - if (protocol >= PHONET_NPROTO)
67024 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67025 return NULL;
67026
67027 rcu_read_lock();
67028 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
67029 {
67030 int err = 0;
67031
67032 - if (protocol >= PHONET_NPROTO)
67033 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67034 return -EINVAL;
67035
67036 err = proto_register(pp->prot, 1);
67037 diff -urNp linux-3.0.4/net/phonet/pep.c linux-3.0.4/net/phonet/pep.c
67038 --- linux-3.0.4/net/phonet/pep.c 2011-07-21 22:17:23.000000000 -0400
67039 +++ linux-3.0.4/net/phonet/pep.c 2011-08-23 21:47:56.000000000 -0400
67040 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
67041
67042 case PNS_PEP_CTRL_REQ:
67043 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
67044 - atomic_inc(&sk->sk_drops);
67045 + atomic_inc_unchecked(&sk->sk_drops);
67046 break;
67047 }
67048 __skb_pull(skb, 4);
67049 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
67050 }
67051
67052 if (pn->rx_credits == 0) {
67053 - atomic_inc(&sk->sk_drops);
67054 + atomic_inc_unchecked(&sk->sk_drops);
67055 err = -ENOBUFS;
67056 break;
67057 }
67058 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
67059 }
67060
67061 if (pn->rx_credits == 0) {
67062 - atomic_inc(&sk->sk_drops);
67063 + atomic_inc_unchecked(&sk->sk_drops);
67064 err = NET_RX_DROP;
67065 break;
67066 }
67067 diff -urNp linux-3.0.4/net/phonet/socket.c linux-3.0.4/net/phonet/socket.c
67068 --- linux-3.0.4/net/phonet/socket.c 2011-07-21 22:17:23.000000000 -0400
67069 +++ linux-3.0.4/net/phonet/socket.c 2011-08-23 21:48:14.000000000 -0400
67070 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_f
67071 pn->resource, sk->sk_state,
67072 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
67073 sock_i_uid(sk), sock_i_ino(sk),
67074 - atomic_read(&sk->sk_refcnt), sk,
67075 - atomic_read(&sk->sk_drops), &len);
67076 + atomic_read(&sk->sk_refcnt),
67077 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67078 + NULL,
67079 +#else
67080 + sk,
67081 +#endif
67082 + atomic_read_unchecked(&sk->sk_drops), &len);
67083 }
67084 seq_printf(seq, "%*s\n", 127 - len, "");
67085 return 0;
67086 diff -urNp linux-3.0.4/net/rds/cong.c linux-3.0.4/net/rds/cong.c
67087 --- linux-3.0.4/net/rds/cong.c 2011-07-21 22:17:23.000000000 -0400
67088 +++ linux-3.0.4/net/rds/cong.c 2011-08-23 21:47:56.000000000 -0400
67089 @@ -77,7 +77,7 @@
67090 * finds that the saved generation number is smaller than the global generation
67091 * number, it wakes up the process.
67092 */
67093 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
67094 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
67095
67096 /*
67097 * Congestion monitoring
67098 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
67099 rdsdebug("waking map %p for %pI4\n",
67100 map, &map->m_addr);
67101 rds_stats_inc(s_cong_update_received);
67102 - atomic_inc(&rds_cong_generation);
67103 + atomic_inc_unchecked(&rds_cong_generation);
67104 if (waitqueue_active(&map->m_waitq))
67105 wake_up(&map->m_waitq);
67106 if (waitqueue_active(&rds_poll_waitq))
67107 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
67108
67109 int rds_cong_updated_since(unsigned long *recent)
67110 {
67111 - unsigned long gen = atomic_read(&rds_cong_generation);
67112 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
67113
67114 if (likely(*recent == gen))
67115 return 0;
67116 diff -urNp linux-3.0.4/net/rds/ib_cm.c linux-3.0.4/net/rds/ib_cm.c
67117 --- linux-3.0.4/net/rds/ib_cm.c 2011-07-21 22:17:23.000000000 -0400
67118 +++ linux-3.0.4/net/rds/ib_cm.c 2011-08-23 21:47:56.000000000 -0400
67119 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
67120 /* Clear the ACK state */
67121 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67122 #ifdef KERNEL_HAS_ATOMIC64
67123 - atomic64_set(&ic->i_ack_next, 0);
67124 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67125 #else
67126 ic->i_ack_next = 0;
67127 #endif
67128 diff -urNp linux-3.0.4/net/rds/ib.h linux-3.0.4/net/rds/ib.h
67129 --- linux-3.0.4/net/rds/ib.h 2011-07-21 22:17:23.000000000 -0400
67130 +++ linux-3.0.4/net/rds/ib.h 2011-08-23 21:47:56.000000000 -0400
67131 @@ -127,7 +127,7 @@ struct rds_ib_connection {
67132 /* sending acks */
67133 unsigned long i_ack_flags;
67134 #ifdef KERNEL_HAS_ATOMIC64
67135 - atomic64_t i_ack_next; /* next ACK to send */
67136 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67137 #else
67138 spinlock_t i_ack_lock; /* protect i_ack_next */
67139 u64 i_ack_next; /* next ACK to send */
67140 diff -urNp linux-3.0.4/net/rds/ib_recv.c linux-3.0.4/net/rds/ib_recv.c
67141 --- linux-3.0.4/net/rds/ib_recv.c 2011-07-21 22:17:23.000000000 -0400
67142 +++ linux-3.0.4/net/rds/ib_recv.c 2011-08-23 21:47:56.000000000 -0400
67143 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67144 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
67145 int ack_required)
67146 {
67147 - atomic64_set(&ic->i_ack_next, seq);
67148 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67149 if (ack_required) {
67150 smp_mb__before_clear_bit();
67151 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67152 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67153 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67154 smp_mb__after_clear_bit();
67155
67156 - return atomic64_read(&ic->i_ack_next);
67157 + return atomic64_read_unchecked(&ic->i_ack_next);
67158 }
67159 #endif
67160
67161 diff -urNp linux-3.0.4/net/rds/iw_cm.c linux-3.0.4/net/rds/iw_cm.c
67162 --- linux-3.0.4/net/rds/iw_cm.c 2011-07-21 22:17:23.000000000 -0400
67163 +++ linux-3.0.4/net/rds/iw_cm.c 2011-08-23 21:47:56.000000000 -0400
67164 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
67165 /* Clear the ACK state */
67166 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67167 #ifdef KERNEL_HAS_ATOMIC64
67168 - atomic64_set(&ic->i_ack_next, 0);
67169 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67170 #else
67171 ic->i_ack_next = 0;
67172 #endif
67173 diff -urNp linux-3.0.4/net/rds/iw.h linux-3.0.4/net/rds/iw.h
67174 --- linux-3.0.4/net/rds/iw.h 2011-07-21 22:17:23.000000000 -0400
67175 +++ linux-3.0.4/net/rds/iw.h 2011-08-23 21:47:56.000000000 -0400
67176 @@ -133,7 +133,7 @@ struct rds_iw_connection {
67177 /* sending acks */
67178 unsigned long i_ack_flags;
67179 #ifdef KERNEL_HAS_ATOMIC64
67180 - atomic64_t i_ack_next; /* next ACK to send */
67181 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67182 #else
67183 spinlock_t i_ack_lock; /* protect i_ack_next */
67184 u64 i_ack_next; /* next ACK to send */
67185 diff -urNp linux-3.0.4/net/rds/iw_rdma.c linux-3.0.4/net/rds/iw_rdma.c
67186 --- linux-3.0.4/net/rds/iw_rdma.c 2011-07-21 22:17:23.000000000 -0400
67187 +++ linux-3.0.4/net/rds/iw_rdma.c 2011-08-23 21:48:14.000000000 -0400
67188 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
67189 struct rdma_cm_id *pcm_id;
67190 int rc;
67191
67192 + pax_track_stack();
67193 +
67194 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
67195 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
67196
67197 diff -urNp linux-3.0.4/net/rds/iw_recv.c linux-3.0.4/net/rds/iw_recv.c
67198 --- linux-3.0.4/net/rds/iw_recv.c 2011-07-21 22:17:23.000000000 -0400
67199 +++ linux-3.0.4/net/rds/iw_recv.c 2011-08-23 21:47:56.000000000 -0400
67200 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67201 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
67202 int ack_required)
67203 {
67204 - atomic64_set(&ic->i_ack_next, seq);
67205 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67206 if (ack_required) {
67207 smp_mb__before_clear_bit();
67208 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67209 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67210 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67211 smp_mb__after_clear_bit();
67212
67213 - return atomic64_read(&ic->i_ack_next);
67214 + return atomic64_read_unchecked(&ic->i_ack_next);
67215 }
67216 #endif
67217
67218 diff -urNp linux-3.0.4/net/rxrpc/af_rxrpc.c linux-3.0.4/net/rxrpc/af_rxrpc.c
67219 --- linux-3.0.4/net/rxrpc/af_rxrpc.c 2011-07-21 22:17:23.000000000 -0400
67220 +++ linux-3.0.4/net/rxrpc/af_rxrpc.c 2011-08-23 21:47:56.000000000 -0400
67221 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
67222 __be32 rxrpc_epoch;
67223
67224 /* current debugging ID */
67225 -atomic_t rxrpc_debug_id;
67226 +atomic_unchecked_t rxrpc_debug_id;
67227
67228 /* count of skbs currently in use */
67229 atomic_t rxrpc_n_skbs;
67230 diff -urNp linux-3.0.4/net/rxrpc/ar-ack.c linux-3.0.4/net/rxrpc/ar-ack.c
67231 --- linux-3.0.4/net/rxrpc/ar-ack.c 2011-07-21 22:17:23.000000000 -0400
67232 +++ linux-3.0.4/net/rxrpc/ar-ack.c 2011-08-23 21:48:14.000000000 -0400
67233 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
67234
67235 _enter("{%d,%d,%d,%d},",
67236 call->acks_hard, call->acks_unacked,
67237 - atomic_read(&call->sequence),
67238 + atomic_read_unchecked(&call->sequence),
67239 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
67240
67241 stop = 0;
67242 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
67243
67244 /* each Tx packet has a new serial number */
67245 sp->hdr.serial =
67246 - htonl(atomic_inc_return(&call->conn->serial));
67247 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
67248
67249 hdr = (struct rxrpc_header *) txb->head;
67250 hdr->serial = sp->hdr.serial;
67251 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
67252 */
67253 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
67254 {
67255 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
67256 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
67257 }
67258
67259 /*
67260 @@ -629,7 +629,7 @@ process_further:
67261
67262 latest = ntohl(sp->hdr.serial);
67263 hard = ntohl(ack.firstPacket);
67264 - tx = atomic_read(&call->sequence);
67265 + tx = atomic_read_unchecked(&call->sequence);
67266
67267 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67268 latest,
67269 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_stru
67270 u32 abort_code = RX_PROTOCOL_ERROR;
67271 u8 *acks = NULL;
67272
67273 + pax_track_stack();
67274 +
67275 //printk("\n--------------------\n");
67276 _enter("{%d,%s,%lx} [%lu]",
67277 call->debug_id, rxrpc_call_states[call->state], call->events,
67278 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_stru
67279 goto maybe_reschedule;
67280
67281 send_ACK_with_skew:
67282 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
67283 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
67284 ntohl(ack.serial));
67285 send_ACK:
67286 mtu = call->conn->trans->peer->if_mtu;
67287 @@ -1173,7 +1175,7 @@ send_ACK:
67288 ackinfo.rxMTU = htonl(5692);
67289 ackinfo.jumbo_max = htonl(4);
67290
67291 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67292 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67293 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67294 ntohl(hdr.serial),
67295 ntohs(ack.maxSkew),
67296 @@ -1191,7 +1193,7 @@ send_ACK:
67297 send_message:
67298 _debug("send message");
67299
67300 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67301 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67302 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
67303 send_message_2:
67304
67305 diff -urNp linux-3.0.4/net/rxrpc/ar-call.c linux-3.0.4/net/rxrpc/ar-call.c
67306 --- linux-3.0.4/net/rxrpc/ar-call.c 2011-07-21 22:17:23.000000000 -0400
67307 +++ linux-3.0.4/net/rxrpc/ar-call.c 2011-08-23 21:47:56.000000000 -0400
67308 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
67309 spin_lock_init(&call->lock);
67310 rwlock_init(&call->state_lock);
67311 atomic_set(&call->usage, 1);
67312 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
67313 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67314 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
67315
67316 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
67317 diff -urNp linux-3.0.4/net/rxrpc/ar-connection.c linux-3.0.4/net/rxrpc/ar-connection.c
67318 --- linux-3.0.4/net/rxrpc/ar-connection.c 2011-07-21 22:17:23.000000000 -0400
67319 +++ linux-3.0.4/net/rxrpc/ar-connection.c 2011-08-23 21:47:56.000000000 -0400
67320 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
67321 rwlock_init(&conn->lock);
67322 spin_lock_init(&conn->state_lock);
67323 atomic_set(&conn->usage, 1);
67324 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
67325 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67326 conn->avail_calls = RXRPC_MAXCALLS;
67327 conn->size_align = 4;
67328 conn->header_size = sizeof(struct rxrpc_header);
67329 diff -urNp linux-3.0.4/net/rxrpc/ar-connevent.c linux-3.0.4/net/rxrpc/ar-connevent.c
67330 --- linux-3.0.4/net/rxrpc/ar-connevent.c 2011-07-21 22:17:23.000000000 -0400
67331 +++ linux-3.0.4/net/rxrpc/ar-connevent.c 2011-08-23 21:47:56.000000000 -0400
67332 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
67333
67334 len = iov[0].iov_len + iov[1].iov_len;
67335
67336 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67337 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67338 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
67339
67340 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67341 diff -urNp linux-3.0.4/net/rxrpc/ar-input.c linux-3.0.4/net/rxrpc/ar-input.c
67342 --- linux-3.0.4/net/rxrpc/ar-input.c 2011-07-21 22:17:23.000000000 -0400
67343 +++ linux-3.0.4/net/rxrpc/ar-input.c 2011-08-23 21:47:56.000000000 -0400
67344 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
67345 /* track the latest serial number on this connection for ACK packet
67346 * information */
67347 serial = ntohl(sp->hdr.serial);
67348 - hi_serial = atomic_read(&call->conn->hi_serial);
67349 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
67350 while (serial > hi_serial)
67351 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
67352 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
67353 serial);
67354
67355 /* request ACK generation for any ACK or DATA packet that requests
67356 diff -urNp linux-3.0.4/net/rxrpc/ar-internal.h linux-3.0.4/net/rxrpc/ar-internal.h
67357 --- linux-3.0.4/net/rxrpc/ar-internal.h 2011-07-21 22:17:23.000000000 -0400
67358 +++ linux-3.0.4/net/rxrpc/ar-internal.h 2011-08-23 21:47:56.000000000 -0400
67359 @@ -272,8 +272,8 @@ struct rxrpc_connection {
67360 int error; /* error code for local abort */
67361 int debug_id; /* debug ID for printks */
67362 unsigned call_counter; /* call ID counter */
67363 - atomic_t serial; /* packet serial number counter */
67364 - atomic_t hi_serial; /* highest serial number received */
67365 + atomic_unchecked_t serial; /* packet serial number counter */
67366 + atomic_unchecked_t hi_serial; /* highest serial number received */
67367 u8 avail_calls; /* number of calls available */
67368 u8 size_align; /* data size alignment (for security) */
67369 u8 header_size; /* rxrpc + security header size */
67370 @@ -346,7 +346,7 @@ struct rxrpc_call {
67371 spinlock_t lock;
67372 rwlock_t state_lock; /* lock for state transition */
67373 atomic_t usage;
67374 - atomic_t sequence; /* Tx data packet sequence counter */
67375 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
67376 u32 abort_code; /* local/remote abort code */
67377 enum { /* current state of call */
67378 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
67379 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
67380 */
67381 extern atomic_t rxrpc_n_skbs;
67382 extern __be32 rxrpc_epoch;
67383 -extern atomic_t rxrpc_debug_id;
67384 +extern atomic_unchecked_t rxrpc_debug_id;
67385 extern struct workqueue_struct *rxrpc_workqueue;
67386
67387 /*
67388 diff -urNp linux-3.0.4/net/rxrpc/ar-local.c linux-3.0.4/net/rxrpc/ar-local.c
67389 --- linux-3.0.4/net/rxrpc/ar-local.c 2011-07-21 22:17:23.000000000 -0400
67390 +++ linux-3.0.4/net/rxrpc/ar-local.c 2011-08-23 21:47:56.000000000 -0400
67391 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
67392 spin_lock_init(&local->lock);
67393 rwlock_init(&local->services_lock);
67394 atomic_set(&local->usage, 1);
67395 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
67396 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67397 memcpy(&local->srx, srx, sizeof(*srx));
67398 }
67399
67400 diff -urNp linux-3.0.4/net/rxrpc/ar-output.c linux-3.0.4/net/rxrpc/ar-output.c
67401 --- linux-3.0.4/net/rxrpc/ar-output.c 2011-07-21 22:17:23.000000000 -0400
67402 +++ linux-3.0.4/net/rxrpc/ar-output.c 2011-08-23 21:47:56.000000000 -0400
67403 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
67404 sp->hdr.cid = call->cid;
67405 sp->hdr.callNumber = call->call_id;
67406 sp->hdr.seq =
67407 - htonl(atomic_inc_return(&call->sequence));
67408 + htonl(atomic_inc_return_unchecked(&call->sequence));
67409 sp->hdr.serial =
67410 - htonl(atomic_inc_return(&conn->serial));
67411 + htonl(atomic_inc_return_unchecked(&conn->serial));
67412 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
67413 sp->hdr.userStatus = 0;
67414 sp->hdr.securityIndex = conn->security_ix;
67415 diff -urNp linux-3.0.4/net/rxrpc/ar-peer.c linux-3.0.4/net/rxrpc/ar-peer.c
67416 --- linux-3.0.4/net/rxrpc/ar-peer.c 2011-07-21 22:17:23.000000000 -0400
67417 +++ linux-3.0.4/net/rxrpc/ar-peer.c 2011-08-23 21:47:56.000000000 -0400
67418 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
67419 INIT_LIST_HEAD(&peer->error_targets);
67420 spin_lock_init(&peer->lock);
67421 atomic_set(&peer->usage, 1);
67422 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
67423 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67424 memcpy(&peer->srx, srx, sizeof(*srx));
67425
67426 rxrpc_assess_MTU_size(peer);
67427 diff -urNp linux-3.0.4/net/rxrpc/ar-proc.c linux-3.0.4/net/rxrpc/ar-proc.c
67428 --- linux-3.0.4/net/rxrpc/ar-proc.c 2011-07-21 22:17:23.000000000 -0400
67429 +++ linux-3.0.4/net/rxrpc/ar-proc.c 2011-08-23 21:47:56.000000000 -0400
67430 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
67431 atomic_read(&conn->usage),
67432 rxrpc_conn_states[conn->state],
67433 key_serial(conn->key),
67434 - atomic_read(&conn->serial),
67435 - atomic_read(&conn->hi_serial));
67436 + atomic_read_unchecked(&conn->serial),
67437 + atomic_read_unchecked(&conn->hi_serial));
67438
67439 return 0;
67440 }
67441 diff -urNp linux-3.0.4/net/rxrpc/ar-transport.c linux-3.0.4/net/rxrpc/ar-transport.c
67442 --- linux-3.0.4/net/rxrpc/ar-transport.c 2011-07-21 22:17:23.000000000 -0400
67443 +++ linux-3.0.4/net/rxrpc/ar-transport.c 2011-08-23 21:47:56.000000000 -0400
67444 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
67445 spin_lock_init(&trans->client_lock);
67446 rwlock_init(&trans->conn_lock);
67447 atomic_set(&trans->usage, 1);
67448 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
67449 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67450
67451 if (peer->srx.transport.family == AF_INET) {
67452 switch (peer->srx.transport_type) {
67453 diff -urNp linux-3.0.4/net/rxrpc/rxkad.c linux-3.0.4/net/rxrpc/rxkad.c
67454 --- linux-3.0.4/net/rxrpc/rxkad.c 2011-07-21 22:17:23.000000000 -0400
67455 +++ linux-3.0.4/net/rxrpc/rxkad.c 2011-08-23 21:48:14.000000000 -0400
67456 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
67457 u16 check;
67458 int nsg;
67459
67460 + pax_track_stack();
67461 +
67462 sp = rxrpc_skb(skb);
67463
67464 _enter("");
67465 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
67466 u16 check;
67467 int nsg;
67468
67469 + pax_track_stack();
67470 +
67471 _enter("");
67472
67473 sp = rxrpc_skb(skb);
67474 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
67475
67476 len = iov[0].iov_len + iov[1].iov_len;
67477
67478 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67479 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67480 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
67481
67482 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67483 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
67484
67485 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
67486
67487 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
67488 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67489 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
67490
67491 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
67492 diff -urNp linux-3.0.4/net/sctp/proc.c linux-3.0.4/net/sctp/proc.c
67493 --- linux-3.0.4/net/sctp/proc.c 2011-07-21 22:17:23.000000000 -0400
67494 +++ linux-3.0.4/net/sctp/proc.c 2011-08-23 21:48:14.000000000 -0400
67495 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct s
67496 seq_printf(seq,
67497 "%8pK %8pK %-3d %-3d %-2d %-4d "
67498 "%4d %8d %8d %7d %5lu %-5d %5d ",
67499 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
67500 + assoc, sk,
67501 + sctp_sk(sk)->type, sk->sk_state,
67502 assoc->state, hash,
67503 assoc->assoc_id,
67504 assoc->sndbuf_used,
67505 diff -urNp linux-3.0.4/net/sctp/socket.c linux-3.0.4/net/sctp/socket.c
67506 --- linux-3.0.4/net/sctp/socket.c 2011-07-21 22:17:23.000000000 -0400
67507 +++ linux-3.0.4/net/sctp/socket.c 2011-08-23 21:47:56.000000000 -0400
67508 @@ -4452,7 +4452,7 @@ static int sctp_getsockopt_peer_addrs(st
67509 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
67510 if (space_left < addrlen)
67511 return -ENOMEM;
67512 - if (copy_to_user(to, &temp, addrlen))
67513 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
67514 return -EFAULT;
67515 to += addrlen;
67516 cnt++;
67517 diff -urNp linux-3.0.4/net/socket.c linux-3.0.4/net/socket.c
67518 --- linux-3.0.4/net/socket.c 2011-08-23 21:44:40.000000000 -0400
67519 +++ linux-3.0.4/net/socket.c 2011-08-23 21:48:14.000000000 -0400
67520 @@ -88,6 +88,7 @@
67521 #include <linux/nsproxy.h>
67522 #include <linux/magic.h>
67523 #include <linux/slab.h>
67524 +#include <linux/in.h>
67525
67526 #include <asm/uaccess.h>
67527 #include <asm/unistd.h>
67528 @@ -105,6 +106,8 @@
67529 #include <linux/sockios.h>
67530 #include <linux/atalk.h>
67531
67532 +#include <linux/grsock.h>
67533 +
67534 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
67535 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
67536 unsigned long nr_segs, loff_t pos);
67537 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struc
67538 &sockfs_dentry_operations, SOCKFS_MAGIC);
67539 }
67540
67541 -static struct vfsmount *sock_mnt __read_mostly;
67542 +struct vfsmount *sock_mnt __read_mostly;
67543
67544 static struct file_system_type sock_fs_type = {
67545 .name = "sockfs",
67546 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int f
67547 return -EAFNOSUPPORT;
67548 if (type < 0 || type >= SOCK_MAX)
67549 return -EINVAL;
67550 + if (protocol < 0)
67551 + return -EINVAL;
67552
67553 /* Compatibility.
67554
67555 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int
67556 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
67557 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
67558
67559 + if(!gr_search_socket(family, type, protocol)) {
67560 + retval = -EACCES;
67561 + goto out;
67562 + }
67563 +
67564 + if (gr_handle_sock_all(family, type, protocol)) {
67565 + retval = -EACCES;
67566 + goto out;
67567 + }
67568 +
67569 retval = sock_create(family, type, protocol, &sock);
67570 if (retval < 0)
67571 goto out;
67572 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
67573 if (sock) {
67574 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
67575 if (err >= 0) {
67576 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
67577 + err = -EACCES;
67578 + goto error;
67579 + }
67580 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
67581 + if (err)
67582 + goto error;
67583 +
67584 err = security_socket_bind(sock,
67585 (struct sockaddr *)&address,
67586 addrlen);
67587 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
67588 (struct sockaddr *)
67589 &address, addrlen);
67590 }
67591 +error:
67592 fput_light(sock->file, fput_needed);
67593 }
67594 return err;
67595 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
67596 if ((unsigned)backlog > somaxconn)
67597 backlog = somaxconn;
67598
67599 + if (gr_handle_sock_server_other(sock->sk)) {
67600 + err = -EPERM;
67601 + goto error;
67602 + }
67603 +
67604 + err = gr_search_listen(sock);
67605 + if (err)
67606 + goto error;
67607 +
67608 err = security_socket_listen(sock, backlog);
67609 if (!err)
67610 err = sock->ops->listen(sock, backlog);
67611
67612 +error:
67613 fput_light(sock->file, fput_needed);
67614 }
67615 return err;
67616 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
67617 newsock->type = sock->type;
67618 newsock->ops = sock->ops;
67619
67620 + if (gr_handle_sock_server_other(sock->sk)) {
67621 + err = -EPERM;
67622 + sock_release(newsock);
67623 + goto out_put;
67624 + }
67625 +
67626 + err = gr_search_accept(sock);
67627 + if (err) {
67628 + sock_release(newsock);
67629 + goto out_put;
67630 + }
67631 +
67632 /*
67633 * We don't need try_module_get here, as the listening socket (sock)
67634 * has the protocol module (sock->ops->owner) held.
67635 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
67636 fd_install(newfd, newfile);
67637 err = newfd;
67638
67639 + gr_attach_curr_ip(newsock->sk);
67640 +
67641 out_put:
67642 fput_light(sock->file, fput_needed);
67643 out:
67644 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
67645 int, addrlen)
67646 {
67647 struct socket *sock;
67648 + struct sockaddr *sck;
67649 struct sockaddr_storage address;
67650 int err, fput_needed;
67651
67652 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
67653 if (err < 0)
67654 goto out_put;
67655
67656 + sck = (struct sockaddr *)&address;
67657 +
67658 + if (gr_handle_sock_client(sck)) {
67659 + err = -EACCES;
67660 + goto out_put;
67661 + }
67662 +
67663 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
67664 + if (err)
67665 + goto out_put;
67666 +
67667 err =
67668 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
67669 if (err)
67670 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *
67671 unsigned char *ctl_buf = ctl;
67672 int err, ctl_len, iov_size, total_len;
67673
67674 + pax_track_stack();
67675 +
67676 err = -EFAULT;
67677 if (MSG_CMSG_COMPAT & flags) {
67678 if (get_compat_msghdr(msg_sys, msg_compat))
67679 diff -urNp linux-3.0.4/net/sunrpc/sched.c linux-3.0.4/net/sunrpc/sched.c
67680 --- linux-3.0.4/net/sunrpc/sched.c 2011-07-21 22:17:23.000000000 -0400
67681 +++ linux-3.0.4/net/sunrpc/sched.c 2011-08-23 21:47:56.000000000 -0400
67682 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
67683 #ifdef RPC_DEBUG
67684 static void rpc_task_set_debuginfo(struct rpc_task *task)
67685 {
67686 - static atomic_t rpc_pid;
67687 + static atomic_unchecked_t rpc_pid;
67688
67689 - task->tk_pid = atomic_inc_return(&rpc_pid);
67690 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
67691 }
67692 #else
67693 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
67694 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c
67695 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-07-21 22:17:23.000000000 -0400
67696 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-23 21:47:56.000000000 -0400
67697 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
67698 static unsigned int min_max_inline = 4096;
67699 static unsigned int max_max_inline = 65536;
67700
67701 -atomic_t rdma_stat_recv;
67702 -atomic_t rdma_stat_read;
67703 -atomic_t rdma_stat_write;
67704 -atomic_t rdma_stat_sq_starve;
67705 -atomic_t rdma_stat_rq_starve;
67706 -atomic_t rdma_stat_rq_poll;
67707 -atomic_t rdma_stat_rq_prod;
67708 -atomic_t rdma_stat_sq_poll;
67709 -atomic_t rdma_stat_sq_prod;
67710 +atomic_unchecked_t rdma_stat_recv;
67711 +atomic_unchecked_t rdma_stat_read;
67712 +atomic_unchecked_t rdma_stat_write;
67713 +atomic_unchecked_t rdma_stat_sq_starve;
67714 +atomic_unchecked_t rdma_stat_rq_starve;
67715 +atomic_unchecked_t rdma_stat_rq_poll;
67716 +atomic_unchecked_t rdma_stat_rq_prod;
67717 +atomic_unchecked_t rdma_stat_sq_poll;
67718 +atomic_unchecked_t rdma_stat_sq_prod;
67719
67720 /* Temporary NFS request map and context caches */
67721 struct kmem_cache *svc_rdma_map_cachep;
67722 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
67723 len -= *ppos;
67724 if (len > *lenp)
67725 len = *lenp;
67726 - if (len && copy_to_user(buffer, str_buf, len))
67727 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
67728 return -EFAULT;
67729 *lenp = len;
67730 *ppos += len;
67731 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
67732 {
67733 .procname = "rdma_stat_read",
67734 .data = &rdma_stat_read,
67735 - .maxlen = sizeof(atomic_t),
67736 + .maxlen = sizeof(atomic_unchecked_t),
67737 .mode = 0644,
67738 .proc_handler = read_reset_stat,
67739 },
67740 {
67741 .procname = "rdma_stat_recv",
67742 .data = &rdma_stat_recv,
67743 - .maxlen = sizeof(atomic_t),
67744 + .maxlen = sizeof(atomic_unchecked_t),
67745 .mode = 0644,
67746 .proc_handler = read_reset_stat,
67747 },
67748 {
67749 .procname = "rdma_stat_write",
67750 .data = &rdma_stat_write,
67751 - .maxlen = sizeof(atomic_t),
67752 + .maxlen = sizeof(atomic_unchecked_t),
67753 .mode = 0644,
67754 .proc_handler = read_reset_stat,
67755 },
67756 {
67757 .procname = "rdma_stat_sq_starve",
67758 .data = &rdma_stat_sq_starve,
67759 - .maxlen = sizeof(atomic_t),
67760 + .maxlen = sizeof(atomic_unchecked_t),
67761 .mode = 0644,
67762 .proc_handler = read_reset_stat,
67763 },
67764 {
67765 .procname = "rdma_stat_rq_starve",
67766 .data = &rdma_stat_rq_starve,
67767 - .maxlen = sizeof(atomic_t),
67768 + .maxlen = sizeof(atomic_unchecked_t),
67769 .mode = 0644,
67770 .proc_handler = read_reset_stat,
67771 },
67772 {
67773 .procname = "rdma_stat_rq_poll",
67774 .data = &rdma_stat_rq_poll,
67775 - .maxlen = sizeof(atomic_t),
67776 + .maxlen = sizeof(atomic_unchecked_t),
67777 .mode = 0644,
67778 .proc_handler = read_reset_stat,
67779 },
67780 {
67781 .procname = "rdma_stat_rq_prod",
67782 .data = &rdma_stat_rq_prod,
67783 - .maxlen = sizeof(atomic_t),
67784 + .maxlen = sizeof(atomic_unchecked_t),
67785 .mode = 0644,
67786 .proc_handler = read_reset_stat,
67787 },
67788 {
67789 .procname = "rdma_stat_sq_poll",
67790 .data = &rdma_stat_sq_poll,
67791 - .maxlen = sizeof(atomic_t),
67792 + .maxlen = sizeof(atomic_unchecked_t),
67793 .mode = 0644,
67794 .proc_handler = read_reset_stat,
67795 },
67796 {
67797 .procname = "rdma_stat_sq_prod",
67798 .data = &rdma_stat_sq_prod,
67799 - .maxlen = sizeof(atomic_t),
67800 + .maxlen = sizeof(atomic_unchecked_t),
67801 .mode = 0644,
67802 .proc_handler = read_reset_stat,
67803 },
67804 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
67805 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-07-21 22:17:23.000000000 -0400
67806 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-23 21:47:56.000000000 -0400
67807 @@ -499,7 +499,7 @@ next_sge:
67808 svc_rdma_put_context(ctxt, 0);
67809 goto out;
67810 }
67811 - atomic_inc(&rdma_stat_read);
67812 + atomic_inc_unchecked(&rdma_stat_read);
67813
67814 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
67815 chl_map->ch[ch_no].count -= read_wr.num_sge;
67816 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
67817 dto_q);
67818 list_del_init(&ctxt->dto_q);
67819 } else {
67820 - atomic_inc(&rdma_stat_rq_starve);
67821 + atomic_inc_unchecked(&rdma_stat_rq_starve);
67822 clear_bit(XPT_DATA, &xprt->xpt_flags);
67823 ctxt = NULL;
67824 }
67825 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
67826 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
67827 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
67828 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
67829 - atomic_inc(&rdma_stat_recv);
67830 + atomic_inc_unchecked(&rdma_stat_recv);
67831
67832 /* Build up the XDR from the receive buffers. */
67833 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
67834 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c
67835 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-07-21 22:17:23.000000000 -0400
67836 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-23 21:47:56.000000000 -0400
67837 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
67838 write_wr.wr.rdma.remote_addr = to;
67839
67840 /* Post It */
67841 - atomic_inc(&rdma_stat_write);
67842 + atomic_inc_unchecked(&rdma_stat_write);
67843 if (svc_rdma_send(xprt, &write_wr))
67844 goto err;
67845 return 0;
67846 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c
67847 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-07-21 22:17:23.000000000 -0400
67848 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-23 21:47:56.000000000 -0400
67849 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
67850 return;
67851
67852 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
67853 - atomic_inc(&rdma_stat_rq_poll);
67854 + atomic_inc_unchecked(&rdma_stat_rq_poll);
67855
67856 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
67857 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
67858 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
67859 }
67860
67861 if (ctxt)
67862 - atomic_inc(&rdma_stat_rq_prod);
67863 + atomic_inc_unchecked(&rdma_stat_rq_prod);
67864
67865 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
67866 /*
67867 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
67868 return;
67869
67870 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
67871 - atomic_inc(&rdma_stat_sq_poll);
67872 + atomic_inc_unchecked(&rdma_stat_sq_poll);
67873 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
67874 if (wc.status != IB_WC_SUCCESS)
67875 /* Close the transport */
67876 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
67877 }
67878
67879 if (ctxt)
67880 - atomic_inc(&rdma_stat_sq_prod);
67881 + atomic_inc_unchecked(&rdma_stat_sq_prod);
67882 }
67883
67884 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
67885 @@ -1272,7 +1272,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
67886 spin_lock_bh(&xprt->sc_lock);
67887 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
67888 spin_unlock_bh(&xprt->sc_lock);
67889 - atomic_inc(&rdma_stat_sq_starve);
67890 + atomic_inc_unchecked(&rdma_stat_sq_starve);
67891
67892 /* See if we can opportunistically reap SQ WR to make room */
67893 sq_cq_reap(xprt);
67894 diff -urNp linux-3.0.4/net/sysctl_net.c linux-3.0.4/net/sysctl_net.c
67895 --- linux-3.0.4/net/sysctl_net.c 2011-07-21 22:17:23.000000000 -0400
67896 +++ linux-3.0.4/net/sysctl_net.c 2011-08-23 21:48:14.000000000 -0400
67897 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
67898 struct ctl_table *table)
67899 {
67900 /* Allow network administrator to have same access as root. */
67901 - if (capable(CAP_NET_ADMIN)) {
67902 + if (capable_nolog(CAP_NET_ADMIN)) {
67903 int mode = (table->mode >> 6) & 7;
67904 return (mode << 6) | (mode << 3) | mode;
67905 }
67906 diff -urNp linux-3.0.4/net/unix/af_unix.c linux-3.0.4/net/unix/af_unix.c
67907 --- linux-3.0.4/net/unix/af_unix.c 2011-07-21 22:17:23.000000000 -0400
67908 +++ linux-3.0.4/net/unix/af_unix.c 2011-08-23 21:48:14.000000000 -0400
67909 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
67910 err = -ECONNREFUSED;
67911 if (!S_ISSOCK(inode->i_mode))
67912 goto put_fail;
67913 +
67914 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
67915 + err = -EACCES;
67916 + goto put_fail;
67917 + }
67918 +
67919 u = unix_find_socket_byinode(inode);
67920 if (!u)
67921 goto put_fail;
67922 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
67923 if (u) {
67924 struct dentry *dentry;
67925 dentry = unix_sk(u)->dentry;
67926 +
67927 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
67928 + err = -EPERM;
67929 + sock_put(u);
67930 + goto fail;
67931 + }
67932 +
67933 if (dentry)
67934 touch_atime(unix_sk(u)->mnt, dentry);
67935 } else
67936 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
67937 err = security_path_mknod(&nd.path, dentry, mode, 0);
67938 if (err)
67939 goto out_mknod_drop_write;
67940 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
67941 + err = -EACCES;
67942 + goto out_mknod_drop_write;
67943 + }
67944 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
67945 out_mknod_drop_write:
67946 mnt_drop_write(nd.path.mnt);
67947 if (err)
67948 goto out_mknod_dput;
67949 +
67950 + gr_handle_create(dentry, nd.path.mnt);
67951 +
67952 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
67953 dput(nd.path.dentry);
67954 nd.path.dentry = dentry;
67955 diff -urNp linux-3.0.4/net/wireless/core.h linux-3.0.4/net/wireless/core.h
67956 --- linux-3.0.4/net/wireless/core.h 2011-07-21 22:17:23.000000000 -0400
67957 +++ linux-3.0.4/net/wireless/core.h 2011-08-23 21:47:56.000000000 -0400
67958 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
67959 struct mutex mtx;
67960
67961 /* rfkill support */
67962 - struct rfkill_ops rfkill_ops;
67963 + rfkill_ops_no_const rfkill_ops;
67964 struct rfkill *rfkill;
67965 struct work_struct rfkill_sync;
67966
67967 diff -urNp linux-3.0.4/net/wireless/wext-core.c linux-3.0.4/net/wireless/wext-core.c
67968 --- linux-3.0.4/net/wireless/wext-core.c 2011-07-21 22:17:23.000000000 -0400
67969 +++ linux-3.0.4/net/wireless/wext-core.c 2011-08-23 21:47:56.000000000 -0400
67970 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
67971 */
67972
67973 /* Support for very large requests */
67974 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
67975 - (user_length > descr->max_tokens)) {
67976 + if (user_length > descr->max_tokens) {
67977 /* Allow userspace to GET more than max so
67978 * we can support any size GET requests.
67979 * There is still a limit : -ENOMEM.
67980 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
67981 }
67982 }
67983
67984 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
67985 - /*
67986 - * If this is a GET, but not NOMAX, it means that the extra
67987 - * data is not bounded by userspace, but by max_tokens. Thus
67988 - * set the length to max_tokens. This matches the extra data
67989 - * allocation.
67990 - * The driver should fill it with the number of tokens it
67991 - * provided, and it may check iwp->length rather than having
67992 - * knowledge of max_tokens. If the driver doesn't change the
67993 - * iwp->length, this ioctl just copies back max_token tokens
67994 - * filled with zeroes. Hopefully the driver isn't claiming
67995 - * them to be valid data.
67996 - */
67997 - iwp->length = descr->max_tokens;
67998 - }
67999 -
68000 err = handler(dev, info, (union iwreq_data *) iwp, extra);
68001
68002 iwp->length += essid_compat;
68003 diff -urNp linux-3.0.4/net/xfrm/xfrm_policy.c linux-3.0.4/net/xfrm/xfrm_policy.c
68004 --- linux-3.0.4/net/xfrm/xfrm_policy.c 2011-07-21 22:17:23.000000000 -0400
68005 +++ linux-3.0.4/net/xfrm/xfrm_policy.c 2011-08-23 21:47:56.000000000 -0400
68006 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
68007 {
68008 policy->walk.dead = 1;
68009
68010 - atomic_inc(&policy->genid);
68011 + atomic_inc_unchecked(&policy->genid);
68012
68013 if (del_timer(&policy->timer))
68014 xfrm_pol_put(policy);
68015 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
68016 hlist_add_head(&policy->bydst, chain);
68017 xfrm_pol_hold(policy);
68018 net->xfrm.policy_count[dir]++;
68019 - atomic_inc(&flow_cache_genid);
68020 + atomic_inc_unchecked(&flow_cache_genid);
68021 if (delpol)
68022 __xfrm_policy_unlink(delpol, dir);
68023 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
68024 @@ -1528,7 +1528,7 @@ free_dst:
68025 goto out;
68026 }
68027
68028 -static int inline
68029 +static inline int
68030 xfrm_dst_alloc_copy(void **target, const void *src, int size)
68031 {
68032 if (!*target) {
68033 @@ -1540,7 +1540,7 @@ xfrm_dst_alloc_copy(void **target, const
68034 return 0;
68035 }
68036
68037 -static int inline
68038 +static inline int
68039 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
68040 {
68041 #ifdef CONFIG_XFRM_SUB_POLICY
68042 @@ -1552,7 +1552,7 @@ xfrm_dst_update_parent(struct dst_entry
68043 #endif
68044 }
68045
68046 -static int inline
68047 +static inline int
68048 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
68049 {
68050 #ifdef CONFIG_XFRM_SUB_POLICY
68051 @@ -1646,7 +1646,7 @@ xfrm_resolve_and_create_bundle(struct xf
68052
68053 xdst->num_pols = num_pols;
68054 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
68055 - xdst->policy_genid = atomic_read(&pols[0]->genid);
68056 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
68057
68058 return xdst;
68059 }
68060 @@ -2333,7 +2333,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
68061 if (xdst->xfrm_genid != dst->xfrm->genid)
68062 return 0;
68063 if (xdst->num_pols > 0 &&
68064 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
68065 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
68066 return 0;
68067
68068 mtu = dst_mtu(dst->child);
68069 @@ -2861,7 +2861,7 @@ static int xfrm_policy_migrate(struct xf
68070 sizeof(pol->xfrm_vec[i].saddr));
68071 pol->xfrm_vec[i].encap_family = mp->new_family;
68072 /* flush bundles */
68073 - atomic_inc(&pol->genid);
68074 + atomic_inc_unchecked(&pol->genid);
68075 }
68076 }
68077
68078 diff -urNp linux-3.0.4/net/xfrm/xfrm_user.c linux-3.0.4/net/xfrm/xfrm_user.c
68079 --- linux-3.0.4/net/xfrm/xfrm_user.c 2011-07-21 22:17:23.000000000 -0400
68080 +++ linux-3.0.4/net/xfrm/xfrm_user.c 2011-08-23 21:48:14.000000000 -0400
68081 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
68082 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
68083 int i;
68084
68085 + pax_track_stack();
68086 +
68087 if (xp->xfrm_nr == 0)
68088 return 0;
68089
68090 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
68091 int err;
68092 int n = 0;
68093
68094 + pax_track_stack();
68095 +
68096 if (attrs[XFRMA_MIGRATE] == NULL)
68097 return -EINVAL;
68098
68099 diff -urNp linux-3.0.4/scripts/basic/fixdep.c linux-3.0.4/scripts/basic/fixdep.c
68100 --- linux-3.0.4/scripts/basic/fixdep.c 2011-07-21 22:17:23.000000000 -0400
68101 +++ linux-3.0.4/scripts/basic/fixdep.c 2011-08-23 21:47:56.000000000 -0400
68102 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
68103
68104 static void parse_config_file(const char *map, size_t len)
68105 {
68106 - const int *end = (const int *) (map + len);
68107 + const unsigned int *end = (const unsigned int *) (map + len);
68108 /* start at +1, so that p can never be < map */
68109 - const int *m = (const int *) map + 1;
68110 + const unsigned int *m = (const unsigned int *) map + 1;
68111 const char *p, *q;
68112
68113 for (; m < end; m++) {
68114 @@ -405,7 +405,7 @@ static void print_deps(void)
68115 static void traps(void)
68116 {
68117 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
68118 - int *p = (int *)test;
68119 + unsigned int *p = (unsigned int *)test;
68120
68121 if (*p != INT_CONF) {
68122 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
68123 diff -urNp linux-3.0.4/scripts/gcc-plugin.sh linux-3.0.4/scripts/gcc-plugin.sh
68124 --- linux-3.0.4/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
68125 +++ linux-3.0.4/scripts/gcc-plugin.sh 2011-08-23 21:47:56.000000000 -0400
68126 @@ -0,0 +1,2 @@
68127 +#!/bin/sh
68128 +echo "#include \"gcc-plugin.h\"" | $* -x c -shared - -o /dev/null -I`$* -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
68129 diff -urNp linux-3.0.4/scripts/Makefile.build linux-3.0.4/scripts/Makefile.build
68130 --- linux-3.0.4/scripts/Makefile.build 2011-07-21 22:17:23.000000000 -0400
68131 +++ linux-3.0.4/scripts/Makefile.build 2011-08-23 21:47:56.000000000 -0400
68132 @@ -109,7 +109,7 @@ endif
68133 endif
68134
68135 # Do not include host rules unless needed
68136 -ifneq ($(hostprogs-y)$(hostprogs-m),)
68137 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
68138 include scripts/Makefile.host
68139 endif
68140
68141 diff -urNp linux-3.0.4/scripts/Makefile.clean linux-3.0.4/scripts/Makefile.clean
68142 --- linux-3.0.4/scripts/Makefile.clean 2011-07-21 22:17:23.000000000 -0400
68143 +++ linux-3.0.4/scripts/Makefile.clean 2011-08-23 21:47:56.000000000 -0400
68144 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
68145 __clean-files := $(extra-y) $(always) \
68146 $(targets) $(clean-files) \
68147 $(host-progs) \
68148 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
68149 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
68150 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
68151
68152 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
68153
68154 diff -urNp linux-3.0.4/scripts/Makefile.host linux-3.0.4/scripts/Makefile.host
68155 --- linux-3.0.4/scripts/Makefile.host 2011-07-21 22:17:23.000000000 -0400
68156 +++ linux-3.0.4/scripts/Makefile.host 2011-08-23 21:47:56.000000000 -0400
68157 @@ -31,6 +31,7 @@
68158 # Note: Shared libraries consisting of C++ files are not supported
68159
68160 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
68161 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
68162
68163 # C code
68164 # Executables compiled from a single .c file
68165 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
68166 # Shared libaries (only .c supported)
68167 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
68168 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
68169 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
68170 # Remove .so files from "xxx-objs"
68171 host-cobjs := $(filter-out %.so,$(host-cobjs))
68172
68173 diff -urNp linux-3.0.4/scripts/mod/file2alias.c linux-3.0.4/scripts/mod/file2alias.c
68174 --- linux-3.0.4/scripts/mod/file2alias.c 2011-07-21 22:17:23.000000000 -0400
68175 +++ linux-3.0.4/scripts/mod/file2alias.c 2011-08-23 21:47:56.000000000 -0400
68176 @@ -72,7 +72,7 @@ static void device_id_check(const char *
68177 unsigned long size, unsigned long id_size,
68178 void *symval)
68179 {
68180 - int i;
68181 + unsigned int i;
68182
68183 if (size % id_size || size < id_size) {
68184 if (cross_build != 0)
68185 @@ -102,7 +102,7 @@ static void device_id_check(const char *
68186 /* USB is special because the bcdDevice can be matched against a numeric range */
68187 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
68188 static void do_usb_entry(struct usb_device_id *id,
68189 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
68190 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
68191 unsigned char range_lo, unsigned char range_hi,
68192 unsigned char max, struct module *mod)
68193 {
68194 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
68195 for (i = 0; i < count; i++) {
68196 const char *id = (char *)devs[i].id;
68197 char acpi_id[sizeof(devs[0].id)];
68198 - int j;
68199 + unsigned int j;
68200
68201 buf_printf(&mod->dev_table_buf,
68202 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68203 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
68204
68205 for (j = 0; j < PNP_MAX_DEVICES; j++) {
68206 const char *id = (char *)card->devs[j].id;
68207 - int i2, j2;
68208 + unsigned int i2, j2;
68209 int dup = 0;
68210
68211 if (!id[0])
68212 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
68213 /* add an individual alias for every device entry */
68214 if (!dup) {
68215 char acpi_id[sizeof(card->devs[0].id)];
68216 - int k;
68217 + unsigned int k;
68218
68219 buf_printf(&mod->dev_table_buf,
68220 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68221 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, co
68222 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
68223 char *alias)
68224 {
68225 - int i, j;
68226 + unsigned int i, j;
68227
68228 sprintf(alias, "dmi*");
68229
68230 diff -urNp linux-3.0.4/scripts/mod/modpost.c linux-3.0.4/scripts/mod/modpost.c
68231 --- linux-3.0.4/scripts/mod/modpost.c 2011-07-21 22:17:23.000000000 -0400
68232 +++ linux-3.0.4/scripts/mod/modpost.c 2011-08-23 21:47:56.000000000 -0400
68233 @@ -892,6 +892,7 @@ enum mismatch {
68234 ANY_INIT_TO_ANY_EXIT,
68235 ANY_EXIT_TO_ANY_INIT,
68236 EXPORT_TO_INIT_EXIT,
68237 + DATA_TO_TEXT
68238 };
68239
68240 struct sectioncheck {
68241 @@ -1000,6 +1001,12 @@ const struct sectioncheck sectioncheck[]
68242 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
68243 .mismatch = EXPORT_TO_INIT_EXIT,
68244 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
68245 +},
68246 +/* Do not reference code from writable data */
68247 +{
68248 + .fromsec = { DATA_SECTIONS, NULL },
68249 + .tosec = { TEXT_SECTIONS, NULL },
68250 + .mismatch = DATA_TO_TEXT
68251 }
68252 };
68253
68254 @@ -1122,10 +1129,10 @@ static Elf_Sym *find_elf_symbol(struct e
68255 continue;
68256 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
68257 continue;
68258 - if (sym->st_value == addr)
68259 - return sym;
68260 /* Find a symbol nearby - addr are maybe negative */
68261 d = sym->st_value - addr;
68262 + if (d == 0)
68263 + return sym;
68264 if (d < 0)
68265 d = addr - sym->st_value;
68266 if (d < distance) {
68267 @@ -1404,6 +1411,14 @@ static void report_sec_mismatch(const ch
68268 tosym, prl_to, prl_to, tosym);
68269 free(prl_to);
68270 break;
68271 + case DATA_TO_TEXT:
68272 +/*
68273 + fprintf(stderr,
68274 + "The variable %s references\n"
68275 + "the %s %s%s%s\n",
68276 + fromsym, to, sec2annotation(tosec), tosym, to_p);
68277 +*/
68278 + break;
68279 }
68280 fprintf(stderr, "\n");
68281 }
68282 @@ -1629,7 +1644,7 @@ static void section_rel(const char *modn
68283 static void check_sec_ref(struct module *mod, const char *modname,
68284 struct elf_info *elf)
68285 {
68286 - int i;
68287 + unsigned int i;
68288 Elf_Shdr *sechdrs = elf->sechdrs;
68289
68290 /* Walk through all sections */
68291 @@ -1727,7 +1742,7 @@ void __attribute__((format(printf, 2, 3)
68292 va_end(ap);
68293 }
68294
68295 -void buf_write(struct buffer *buf, const char *s, int len)
68296 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
68297 {
68298 if (buf->size - buf->pos < len) {
68299 buf->size += len + SZ;
68300 @@ -1939,7 +1954,7 @@ static void write_if_changed(struct buff
68301 if (fstat(fileno(file), &st) < 0)
68302 goto close_write;
68303
68304 - if (st.st_size != b->pos)
68305 + if (st.st_size != (off_t)b->pos)
68306 goto close_write;
68307
68308 tmp = NOFAIL(malloc(b->pos));
68309 diff -urNp linux-3.0.4/scripts/mod/modpost.h linux-3.0.4/scripts/mod/modpost.h
68310 --- linux-3.0.4/scripts/mod/modpost.h 2011-07-21 22:17:23.000000000 -0400
68311 +++ linux-3.0.4/scripts/mod/modpost.h 2011-08-23 21:47:56.000000000 -0400
68312 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
68313
68314 struct buffer {
68315 char *p;
68316 - int pos;
68317 - int size;
68318 + unsigned int pos;
68319 + unsigned int size;
68320 };
68321
68322 void __attribute__((format(printf, 2, 3)))
68323 buf_printf(struct buffer *buf, const char *fmt, ...);
68324
68325 void
68326 -buf_write(struct buffer *buf, const char *s, int len);
68327 +buf_write(struct buffer *buf, const char *s, unsigned int len);
68328
68329 struct module {
68330 struct module *next;
68331 diff -urNp linux-3.0.4/scripts/mod/sumversion.c linux-3.0.4/scripts/mod/sumversion.c
68332 --- linux-3.0.4/scripts/mod/sumversion.c 2011-07-21 22:17:23.000000000 -0400
68333 +++ linux-3.0.4/scripts/mod/sumversion.c 2011-08-23 21:47:56.000000000 -0400
68334 @@ -470,7 +470,7 @@ static void write_version(const char *fi
68335 goto out;
68336 }
68337
68338 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
68339 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
68340 warn("writing sum in %s failed: %s\n",
68341 filename, strerror(errno));
68342 goto out;
68343 diff -urNp linux-3.0.4/scripts/pnmtologo.c linux-3.0.4/scripts/pnmtologo.c
68344 --- linux-3.0.4/scripts/pnmtologo.c 2011-07-21 22:17:23.000000000 -0400
68345 +++ linux-3.0.4/scripts/pnmtologo.c 2011-08-23 21:47:56.000000000 -0400
68346 @@ -237,14 +237,14 @@ static void write_header(void)
68347 fprintf(out, " * Linux logo %s\n", logoname);
68348 fputs(" */\n\n", out);
68349 fputs("#include <linux/linux_logo.h>\n\n", out);
68350 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
68351 + fprintf(out, "static unsigned char %s_data[] = {\n",
68352 logoname);
68353 }
68354
68355 static void write_footer(void)
68356 {
68357 fputs("\n};\n\n", out);
68358 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
68359 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
68360 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
68361 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
68362 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
68363 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
68364 fputs("\n};\n\n", out);
68365
68366 /* write logo clut */
68367 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
68368 + fprintf(out, "static unsigned char %s_clut[] = {\n",
68369 logoname);
68370 write_hex_cnt = 0;
68371 for (i = 0; i < logo_clutsize; i++) {
68372 diff -urNp linux-3.0.4/security/apparmor/lsm.c linux-3.0.4/security/apparmor/lsm.c
68373 --- linux-3.0.4/security/apparmor/lsm.c 2011-08-23 21:44:40.000000000 -0400
68374 +++ linux-3.0.4/security/apparmor/lsm.c 2011-08-23 21:48:14.000000000 -0400
68375 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
68376 return error;
68377 }
68378
68379 -static struct security_operations apparmor_ops = {
68380 +static struct security_operations apparmor_ops __read_only = {
68381 .name = "apparmor",
68382
68383 .ptrace_access_check = apparmor_ptrace_access_check,
68384 diff -urNp linux-3.0.4/security/commoncap.c linux-3.0.4/security/commoncap.c
68385 --- linux-3.0.4/security/commoncap.c 2011-07-21 22:17:23.000000000 -0400
68386 +++ linux-3.0.4/security/commoncap.c 2011-08-23 21:48:14.000000000 -0400
68387 @@ -28,6 +28,7 @@
68388 #include <linux/prctl.h>
68389 #include <linux/securebits.h>
68390 #include <linux/user_namespace.h>
68391 +#include <net/sock.h>
68392
68393 /*
68394 * If a non-root user executes a setuid-root binary in
68395 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
68396
68397 int cap_netlink_recv(struct sk_buff *skb, int cap)
68398 {
68399 - if (!cap_raised(current_cap(), cap))
68400 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
68401 return -EPERM;
68402 return 0;
68403 }
68404 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_bin
68405 {
68406 const struct cred *cred = current_cred();
68407
68408 + if (gr_acl_enable_at_secure())
68409 + return 1;
68410 +
68411 if (cred->uid != 0) {
68412 if (bprm->cap_effective)
68413 return 1;
68414 diff -urNp linux-3.0.4/security/integrity/ima/ima_api.c linux-3.0.4/security/integrity/ima/ima_api.c
68415 --- linux-3.0.4/security/integrity/ima/ima_api.c 2011-07-21 22:17:23.000000000 -0400
68416 +++ linux-3.0.4/security/integrity/ima/ima_api.c 2011-08-23 21:47:56.000000000 -0400
68417 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
68418 int result;
68419
68420 /* can overflow, only indicator */
68421 - atomic_long_inc(&ima_htable.violations);
68422 + atomic_long_inc_unchecked(&ima_htable.violations);
68423
68424 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
68425 if (!entry) {
68426 diff -urNp linux-3.0.4/security/integrity/ima/ima_fs.c linux-3.0.4/security/integrity/ima/ima_fs.c
68427 --- linux-3.0.4/security/integrity/ima/ima_fs.c 2011-07-21 22:17:23.000000000 -0400
68428 +++ linux-3.0.4/security/integrity/ima/ima_fs.c 2011-08-23 21:47:56.000000000 -0400
68429 @@ -28,12 +28,12 @@
68430 static int valid_policy = 1;
68431 #define TMPBUFLEN 12
68432 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
68433 - loff_t *ppos, atomic_long_t *val)
68434 + loff_t *ppos, atomic_long_unchecked_t *val)
68435 {
68436 char tmpbuf[TMPBUFLEN];
68437 ssize_t len;
68438
68439 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
68440 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
68441 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
68442 }
68443
68444 diff -urNp linux-3.0.4/security/integrity/ima/ima.h linux-3.0.4/security/integrity/ima/ima.h
68445 --- linux-3.0.4/security/integrity/ima/ima.h 2011-07-21 22:17:23.000000000 -0400
68446 +++ linux-3.0.4/security/integrity/ima/ima.h 2011-08-23 21:47:56.000000000 -0400
68447 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
68448 extern spinlock_t ima_queue_lock;
68449
68450 struct ima_h_table {
68451 - atomic_long_t len; /* number of stored measurements in the list */
68452 - atomic_long_t violations;
68453 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
68454 + atomic_long_unchecked_t violations;
68455 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
68456 };
68457 extern struct ima_h_table ima_htable;
68458 diff -urNp linux-3.0.4/security/integrity/ima/ima_queue.c linux-3.0.4/security/integrity/ima/ima_queue.c
68459 --- linux-3.0.4/security/integrity/ima/ima_queue.c 2011-07-21 22:17:23.000000000 -0400
68460 +++ linux-3.0.4/security/integrity/ima/ima_queue.c 2011-08-23 21:47:56.000000000 -0400
68461 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
68462 INIT_LIST_HEAD(&qe->later);
68463 list_add_tail_rcu(&qe->later, &ima_measurements);
68464
68465 - atomic_long_inc(&ima_htable.len);
68466 + atomic_long_inc_unchecked(&ima_htable.len);
68467 key = ima_hash_key(entry->digest);
68468 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
68469 return 0;
68470 diff -urNp linux-3.0.4/security/Kconfig linux-3.0.4/security/Kconfig
68471 --- linux-3.0.4/security/Kconfig 2011-07-21 22:17:23.000000000 -0400
68472 +++ linux-3.0.4/security/Kconfig 2011-08-23 21:48:14.000000000 -0400
68473 @@ -4,6 +4,554 @@
68474
68475 menu "Security options"
68476
68477 +source grsecurity/Kconfig
68478 +
68479 +menu "PaX"
68480 +
68481 + config ARCH_TRACK_EXEC_LIMIT
68482 + bool
68483 +
68484 + config PAX_PER_CPU_PGD
68485 + bool
68486 +
68487 + config TASK_SIZE_MAX_SHIFT
68488 + int
68489 + depends on X86_64
68490 + default 47 if !PAX_PER_CPU_PGD
68491 + default 42 if PAX_PER_CPU_PGD
68492 +
68493 + config PAX_ENABLE_PAE
68494 + bool
68495 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
68496 +
68497 +config PAX
68498 + bool "Enable various PaX features"
68499 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
68500 + help
68501 + This allows you to enable various PaX features. PaX adds
68502 + intrusion prevention mechanisms to the kernel that reduce
68503 + the risks posed by exploitable memory corruption bugs.
68504 +
68505 +menu "PaX Control"
68506 + depends on PAX
68507 +
68508 +config PAX_SOFTMODE
68509 + bool 'Support soft mode'
68510 + select PAX_PT_PAX_FLAGS
68511 + help
68512 + Enabling this option will allow you to run PaX in soft mode, that
68513 + is, PaX features will not be enforced by default, only on executables
68514 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
68515 + is the only way to mark executables for soft mode use.
68516 +
68517 + Soft mode can be activated by using the "pax_softmode=1" kernel command
68518 + line option on boot. Furthermore you can control various PaX features
68519 + at runtime via the entries in /proc/sys/kernel/pax.
68520 +
68521 +config PAX_EI_PAX
68522 + bool 'Use legacy ELF header marking'
68523 + help
68524 + Enabling this option will allow you to control PaX features on
68525 + a per executable basis via the 'chpax' utility available at
68526 + http://pax.grsecurity.net/. The control flags will be read from
68527 + an otherwise reserved part of the ELF header. This marking has
68528 + numerous drawbacks (no support for soft-mode, toolchain does not
68529 + know about the non-standard use of the ELF header) therefore it
68530 + has been deprecated in favour of PT_PAX_FLAGS support.
68531 +
68532 + Note that if you enable PT_PAX_FLAGS marking support as well,
68533 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
68534 +
68535 +config PAX_PT_PAX_FLAGS
68536 + bool 'Use ELF program header marking'
68537 + help
68538 + Enabling this option will allow you to control PaX features on
68539 + a per executable basis via the 'paxctl' utility available at
68540 + http://pax.grsecurity.net/. The control flags will be read from
68541 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
68542 + has the benefits of supporting both soft mode and being fully
68543 + integrated into the toolchain (the binutils patch is available
68544 + from http://pax.grsecurity.net).
68545 +
68546 + If your toolchain does not support PT_PAX_FLAGS markings,
68547 + you can create one in most cases with 'paxctl -C'.
68548 +
68549 + Note that if you enable the legacy EI_PAX marking support as well,
68550 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
68551 +
68552 +choice
68553 + prompt 'MAC system integration'
68554 + default PAX_HAVE_ACL_FLAGS
68555 + help
68556 + Mandatory Access Control systems have the option of controlling
68557 + PaX flags on a per executable basis, choose the method supported
68558 + by your particular system.
68559 +
68560 + - "none": if your MAC system does not interact with PaX,
68561 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
68562 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
68563 +
68564 + NOTE: this option is for developers/integrators only.
68565 +
68566 + config PAX_NO_ACL_FLAGS
68567 + bool 'none'
68568 +
68569 + config PAX_HAVE_ACL_FLAGS
68570 + bool 'direct'
68571 +
68572 + config PAX_HOOK_ACL_FLAGS
68573 + bool 'hook'
68574 +endchoice
68575 +
68576 +endmenu
68577 +
68578 +menu "Non-executable pages"
68579 + depends on PAX
68580 +
68581 +config PAX_NOEXEC
68582 + bool "Enforce non-executable pages"
68583 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
68584 + help
68585 + By design some architectures do not allow for protecting memory
68586 + pages against execution or even if they do, Linux does not make
68587 + use of this feature. In practice this means that if a page is
68588 + readable (such as the stack or heap) it is also executable.
68589 +
68590 + There is a well known exploit technique that makes use of this
68591 + fact and a common programming mistake where an attacker can
68592 + introduce code of his choice somewhere in the attacked program's
68593 + memory (typically the stack or the heap) and then execute it.
68594 +
68595 + If the attacked program was running with different (typically
68596 + higher) privileges than that of the attacker, then he can elevate
68597 + his own privilege level (e.g. get a root shell, write to files for
68598 + which he does not have write access to, etc).
68599 +
68600 + Enabling this option will let you choose from various features
68601 + that prevent the injection and execution of 'foreign' code in
68602 + a program.
68603 +
68604 + This will also break programs that rely on the old behaviour and
68605 + expect that dynamically allocated memory via the malloc() family
68606 + of functions is executable (which it is not). Notable examples
68607 + are the XFree86 4.x server, the java runtime and wine.
68608 +
68609 +config PAX_PAGEEXEC
68610 + bool "Paging based non-executable pages"
68611 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
68612 + select S390_SWITCH_AMODE if S390
68613 + select S390_EXEC_PROTECT if S390
68614 + select ARCH_TRACK_EXEC_LIMIT if X86_32
68615 + help
68616 + This implementation is based on the paging feature of the CPU.
68617 + On i386 without hardware non-executable bit support there is a
68618 + variable but usually low performance impact, however on Intel's
68619 + P4 core based CPUs it is very high so you should not enable this
68620 + for kernels meant to be used on such CPUs.
68621 +
68622 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
68623 + with hardware non-executable bit support there is no performance
68624 + impact, on ppc the impact is negligible.
68625 +
68626 + Note that several architectures require various emulations due to
68627 + badly designed userland ABIs, this will cause a performance impact
68628 + but will disappear as soon as userland is fixed. For example, ppc
68629 + userland MUST have been built with secure-plt by a recent toolchain.
68630 +
68631 +config PAX_SEGMEXEC
68632 + bool "Segmentation based non-executable pages"
68633 + depends on PAX_NOEXEC && X86_32
68634 + help
68635 + This implementation is based on the segmentation feature of the
68636 + CPU and has a very small performance impact, however applications
68637 + will be limited to a 1.5 GB address space instead of the normal
68638 + 3 GB.
68639 +
68640 +config PAX_EMUTRAMP
68641 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
68642 + default y if PARISC
68643 + help
68644 + There are some programs and libraries that for one reason or
68645 + another attempt to execute special small code snippets from
68646 + non-executable memory pages. Most notable examples are the
68647 + signal handler return code generated by the kernel itself and
68648 + the GCC trampolines.
68649 +
68650 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
68651 + such programs will no longer work under your kernel.
68652 +
68653 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
68654 + utilities to enable trampoline emulation for the affected programs
68655 + yet still have the protection provided by the non-executable pages.
68656 +
68657 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
68658 + your system will not even boot.
68659 +
68660 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
68661 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
68662 + for the affected files.
68663 +
68664 + NOTE: enabling this feature *may* open up a loophole in the
68665 + protection provided by non-executable pages that an attacker
68666 + could abuse. Therefore the best solution is to not have any
68667 + files on your system that would require this option. This can
68668 + be achieved by not using libc5 (which relies on the kernel
68669 + signal handler return code) and not using or rewriting programs
68670 + that make use of the nested function implementation of GCC.
68671 + Skilled users can just fix GCC itself so that it implements
68672 + nested function calls in a way that does not interfere with PaX.
68673 +
68674 +config PAX_EMUSIGRT
68675 + bool "Automatically emulate sigreturn trampolines"
68676 + depends on PAX_EMUTRAMP && PARISC
68677 + default y
68678 + help
68679 + Enabling this option will have the kernel automatically detect
68680 + and emulate signal return trampolines executing on the stack
68681 + that would otherwise lead to task termination.
68682 +
68683 + This solution is intended as a temporary one for users with
68684 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
68685 + Modula-3 runtime, etc) or executables linked to such, basically
68686 + everything that does not specify its own SA_RESTORER function in
68687 + normal executable memory like glibc 2.1+ does.
68688 +
68689 + On parisc you MUST enable this option, otherwise your system will
68690 + not even boot.
68691 +
68692 + NOTE: this feature cannot be disabled on a per executable basis
68693 + and since it *does* open up a loophole in the protection provided
68694 + by non-executable pages, the best solution is to not have any
68695 + files on your system that would require this option.
68696 +
68697 +config PAX_MPROTECT
68698 + bool "Restrict mprotect()"
68699 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
68700 + help
68701 + Enabling this option will prevent programs from
68702 + - changing the executable status of memory pages that were
68703 + not originally created as executable,
68704 + - making read-only executable pages writable again,
68705 + - creating executable pages from anonymous memory,
68706 + - making read-only-after-relocations (RELRO) data pages writable again.
68707 +
68708 + You should say Y here to complete the protection provided by
68709 + the enforcement of non-executable pages.
68710 +
68711 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
68712 + this feature on a per file basis.
68713 +
68714 +config PAX_MPROTECT_COMPAT
68715 + bool "Use legacy/compat protection demoting (read help)"
68716 + depends on PAX_MPROTECT
68717 + default n
68718 + help
68719 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
68720 + by sending the proper error code to the application. For some broken
68721 + userland, this can cause problems with Python or other applications. The
68722 + current implementation however allows for applications like clamav to
68723 + detect if JIT compilation/execution is allowed and to fall back gracefully
68724 + to an interpreter-based mode if it does not. While we encourage everyone
68725 + to use the current implementation as-is and push upstream to fix broken
68726 + userland (note that the RWX logging option can assist with this), in some
68727 + environments this may not be possible. Having to disable MPROTECT
68728 + completely on certain binaries reduces the security benefit of PaX,
68729 + so this option is provided for those environments to revert to the old
68730 + behavior.
68731 +
68732 +config PAX_ELFRELOCS
68733 + bool "Allow ELF text relocations (read help)"
68734 + depends on PAX_MPROTECT
68735 + default n
68736 + help
68737 + Non-executable pages and mprotect() restrictions are effective
68738 + in preventing the introduction of new executable code into an
68739 + attacked task's address space. There remain only two venues
68740 + for this kind of attack: if the attacker can execute already
68741 + existing code in the attacked task then he can either have it
68742 + create and mmap() a file containing his code or have it mmap()
68743 + an already existing ELF library that does not have position
68744 + independent code in it and use mprotect() on it to make it
68745 + writable and copy his code there. While protecting against
68746 + the former approach is beyond PaX, the latter can be prevented
68747 + by having only PIC ELF libraries on one's system (which do not
68748 + need to relocate their code). If you are sure this is your case,
68749 + as is the case with all modern Linux distributions, then leave
68750 + this option disabled. You should say 'n' here.
68751 +
68752 +config PAX_ETEXECRELOCS
68753 + bool "Allow ELF ET_EXEC text relocations"
68754 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
68755 + select PAX_ELFRELOCS
68756 + default y
68757 + help
68758 + On some architectures there are incorrectly created applications
68759 + that require text relocations and would not work without enabling
68760 + this option. If you are an alpha, ia64 or parisc user, you should
68761 + enable this option and disable it once you have made sure that
68762 + none of your applications need it.
68763 +
68764 +config PAX_EMUPLT
68765 + bool "Automatically emulate ELF PLT"
68766 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
68767 + default y
68768 + help
68769 + Enabling this option will have the kernel automatically detect
68770 + and emulate the Procedure Linkage Table entries in ELF files.
68771 + On some architectures such entries are in writable memory, and
68772 + become non-executable leading to task termination. Therefore
68773 + it is mandatory that you enable this option on alpha, parisc,
68774 + sparc and sparc64, otherwise your system would not even boot.
68775 +
68776 + NOTE: this feature *does* open up a loophole in the protection
68777 + provided by the non-executable pages, therefore the proper
68778 + solution is to modify the toolchain to produce a PLT that does
68779 + not need to be writable.
68780 +
68781 +config PAX_DLRESOLVE
68782 + bool 'Emulate old glibc resolver stub'
68783 + depends on PAX_EMUPLT && SPARC
68784 + default n
68785 + help
68786 + This option is needed if userland has an old glibc (before 2.4)
68787 + that puts a 'save' instruction into the runtime generated resolver
68788 + stub that needs special emulation.
68789 +
68790 +config PAX_KERNEXEC
68791 + bool "Enforce non-executable kernel pages"
68792 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
68793 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
68794 + help
68795 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
68796 + that is, enabling this option will make it harder to inject
68797 + and execute 'foreign' code in kernel memory itself.
68798 +
68799 + Note that on x86_64 kernels there is a known regression when
68800 + this feature and KVM/VMX are both enabled in the host kernel.
68801 +
68802 +config PAX_KERNEXEC_MODULE_TEXT
68803 + int "Minimum amount of memory reserved for module code"
68804 + default "4"
68805 + depends on PAX_KERNEXEC && X86_32 && MODULES
68806 + help
68807 + Due to implementation details the kernel must reserve a fixed
68808 + amount of memory for module code at compile time that cannot be
68809 + changed at runtime. Here you can specify the minimum amount
68810 + in MB that will be reserved. Due to the same implementation
68811 + details this size will always be rounded up to the next 2/4 MB
68812 + boundary (depends on PAE) so the actually available memory for
68813 + module code will usually be more than this minimum.
68814 +
68815 + The default 4 MB should be enough for most users but if you have
68816 + an excessive number of modules (e.g., most distribution configs
68817 + compile many drivers as modules) or use huge modules such as
68818 + nvidia's kernel driver, you will need to adjust this amount.
68819 + A good rule of thumb is to look at your currently loaded kernel
68820 + modules and add up their sizes.
68821 +
68822 +endmenu
68823 +
68824 +menu "Address Space Layout Randomization"
68825 + depends on PAX
68826 +
68827 +config PAX_ASLR
68828 + bool "Address Space Layout Randomization"
68829 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
68830 + help
68831 + Many if not most exploit techniques rely on the knowledge of
68832 + certain addresses in the attacked program. The following options
68833 + will allow the kernel to apply a certain amount of randomization
68834 + to specific parts of the program thereby forcing an attacker to
68835 + guess them in most cases. Any failed guess will most likely crash
68836 + the attacked program which allows the kernel to detect such attempts
68837 + and react on them. PaX itself provides no reaction mechanisms,
68838 + instead it is strongly encouraged that you make use of Nergal's
68839 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
68840 + (http://www.grsecurity.net/) built-in crash detection features or
68841 + develop one yourself.
68842 +
68843 + By saying Y here you can choose to randomize the following areas:
68844 + - top of the task's kernel stack
68845 + - top of the task's userland stack
68846 + - base address for mmap() requests that do not specify one
68847 + (this includes all libraries)
68848 + - base address of the main executable
68849 +
68850 + It is strongly recommended to say Y here as address space layout
68851 + randomization has negligible impact on performance yet it provides
68852 + a very effective protection.
68853 +
68854 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
68855 + this feature on a per file basis.
68856 +
68857 +config PAX_RANDKSTACK
68858 + bool "Randomize kernel stack base"
68859 + depends on PAX_ASLR && X86_TSC && X86
68860 + help
68861 + By saying Y here the kernel will randomize every task's kernel
68862 + stack on every system call. This will not only force an attacker
68863 + to guess it but also prevent him from making use of possible
68864 + leaked information about it.
68865 +
68866 + Since the kernel stack is a rather scarce resource, randomization
68867 + may cause unexpected stack overflows, therefore you should very
68868 + carefully test your system. Note that once enabled in the kernel
68869 + configuration, this feature cannot be disabled on a per file basis.
68870 +
68871 +config PAX_RANDUSTACK
68872 + bool "Randomize user stack base"
68873 + depends on PAX_ASLR
68874 + help
68875 + By saying Y here the kernel will randomize every task's userland
68876 + stack. The randomization is done in two steps where the second
68877 + one may apply a big amount of shift to the top of the stack and
68878 + cause problems for programs that want to use lots of memory (more
68879 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
68880 + For this reason the second step can be controlled by 'chpax' or
68881 + 'paxctl' on a per file basis.
68882 +
68883 +config PAX_RANDMMAP
68884 + bool "Randomize mmap() base"
68885 + depends on PAX_ASLR
68886 + help
68887 + By saying Y here the kernel will use a randomized base address for
68888 + mmap() requests that do not specify one themselves. As a result
68889 + all dynamically loaded libraries will appear at random addresses
68890 + and therefore be harder to exploit by a technique where an attacker
68891 + attempts to execute library code for his purposes (e.g. spawn a
68892 + shell from an exploited program that is running at an elevated
68893 + privilege level).
68894 +
68895 + Furthermore, if a program is relinked as a dynamic ELF file, its
68896 + base address will be randomized as well, completing the full
68897 + randomization of the address space layout. Attacking such programs
68898 + becomes a guess game. You can find an example of doing this at
68899 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
68900 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
68901 +
68902 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
68903 + feature on a per file basis.
68904 +
68905 +endmenu
68906 +
68907 +menu "Miscellaneous hardening features"
68908 +
68909 +config PAX_MEMORY_SANITIZE
68910 + bool "Sanitize all freed memory"
68911 + help
68912 + By saying Y here the kernel will erase memory pages as soon as they
68913 + are freed. This in turn reduces the lifetime of data stored in the
68914 + pages, making it less likely that sensitive information such as
68915 + passwords, cryptographic secrets, etc stay in memory for too long.
68916 +
68917 + This is especially useful for programs whose runtime is short, long
68918 + lived processes and the kernel itself benefit from this as long as
68919 + they operate on whole memory pages and ensure timely freeing of pages
68920 + that may hold sensitive information.
68921 +
68922 + The tradeoff is performance impact, on a single CPU system kernel
68923 + compilation sees a 3% slowdown, other systems and workloads may vary
68924 + and you are advised to test this feature on your expected workload
68925 + before deploying it.
68926 +
68927 + Note that this feature does not protect data stored in live pages,
68928 + e.g., process memory swapped to disk may stay there for a long time.
68929 +
68930 +config PAX_MEMORY_STACKLEAK
68931 + bool "Sanitize kernel stack"
68932 + depends on X86
68933 + help
68934 + By saying Y here the kernel will erase the kernel stack before it
68935 + returns from a system call. This in turn reduces the information
68936 + that a kernel stack leak bug can reveal.
68937 +
68938 + Note that such a bug can still leak information that was put on
68939 + the stack by the current system call (the one eventually triggering
68940 + the bug) but traces of earlier system calls on the kernel stack
68941 + cannot leak anymore.
68942 +
68943 + The tradeoff is performance impact: on a single CPU system kernel
68944 + compilation sees a 1% slowdown, other systems and workloads may vary
68945 + and you are advised to test this feature on your expected workload
68946 + before deploying it.
68947 +
68948 + Note: full support for this feature requires gcc with plugin support
68949 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
68950 + is not supported). Using older gcc versions means that functions
68951 + with large enough stack frames may leave uninitialized memory behind
68952 + that may be exposed to a later syscall leaking the stack.
68953 +
68954 +config PAX_MEMORY_UDEREF
68955 + bool "Prevent invalid userland pointer dereference"
68956 + depends on X86 && !UML_X86 && !XEN
68957 + select PAX_PER_CPU_PGD if X86_64
68958 + help
68959 + By saying Y here the kernel will be prevented from dereferencing
68960 + userland pointers in contexts where the kernel expects only kernel
68961 + pointers. This is both a useful runtime debugging feature and a
68962 + security measure that prevents exploiting a class of kernel bugs.
68963 +
68964 + The tradeoff is that some virtualization solutions may experience
68965 + a huge slowdown and therefore you should not enable this feature
68966 + for kernels meant to run in such environments. Whether a given VM
68967 + solution is affected or not is best determined by simply trying it
68968 + out, the performance impact will be obvious right on boot as this
68969 + mechanism engages from very early on. A good rule of thumb is that
68970 + VMs running on CPUs without hardware virtualization support (i.e.,
68971 + the majority of IA-32 CPUs) will likely experience the slowdown.
68972 +
68973 +config PAX_REFCOUNT
68974 + bool "Prevent various kernel object reference counter overflows"
68975 + depends on GRKERNSEC && (X86 || SPARC64)
68976 + help
68977 + By saying Y here the kernel will detect and prevent overflowing
68978 + various (but not all) kinds of object reference counters. Such
68979 + overflows can normally occur due to bugs only and are often, if
68980 + not always, exploitable.
68981 +
68982 + The tradeoff is that data structures protected by an overflowed
68983 + refcount will never be freed and therefore will leak memory. Note
68984 + that this leak also happens even without this protection but in
68985 + that case the overflow can eventually trigger the freeing of the
68986 + data structure while it is still being used elsewhere, resulting
68987 + in the exploitable situation that this feature prevents.
68988 +
68989 + Since this has a negligible performance impact, you should enable
68990 + this feature.
68991 +
68992 +config PAX_USERCOPY
68993 + bool "Harden heap object copies between kernel and userland"
68994 + depends on X86 || PPC || SPARC || ARM
68995 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
68996 + help
68997 + By saying Y here the kernel will enforce the size of heap objects
68998 + when they are copied in either direction between the kernel and
68999 + userland, even if only a part of the heap object is copied.
69000 +
69001 + Specifically, this checking prevents information leaking from the
69002 + kernel heap during kernel to userland copies (if the kernel heap
69003 + object is otherwise fully initialized) and prevents kernel heap
69004 + overflows during userland to kernel copies.
69005 +
69006 + Note that the current implementation provides the strictest bounds
69007 + checks for the SLUB allocator.
69008 +
69009 + Enabling this option also enables per-slab cache protection against
69010 + data in a given cache being copied into/out of via userland
69011 + accessors. Though the whitelist of regions will be reduced over
69012 + time, it notably protects important data structures like task structs.
69013 +
69014 + If frame pointers are enabled on x86, this option will also restrict
69015 + copies into and out of the kernel stack to local variables within a
69016 + single frame.
69017 +
69018 + Since this has a negligible performance impact, you should enable
69019 + this feature.
69020 +
69021 +endmenu
69022 +
69023 +endmenu
69024 +
69025 config KEYS
69026 bool "Enable access key retention support"
69027 help
69028 @@ -167,7 +715,7 @@ config INTEL_TXT
69029 config LSM_MMAP_MIN_ADDR
69030 int "Low address space for LSM to protect from user allocation"
69031 depends on SECURITY && SECURITY_SELINUX
69032 - default 32768 if ARM
69033 + default 32768 if ALPHA || ARM || PARISC || SPARC32
69034 default 65536
69035 help
69036 This is the portion of low virtual memory which should be protected
69037 diff -urNp linux-3.0.4/security/keys/keyring.c linux-3.0.4/security/keys/keyring.c
69038 --- linux-3.0.4/security/keys/keyring.c 2011-07-21 22:17:23.000000000 -0400
69039 +++ linux-3.0.4/security/keys/keyring.c 2011-08-23 21:47:56.000000000 -0400
69040 @@ -215,15 +215,15 @@ static long keyring_read(const struct ke
69041 ret = -EFAULT;
69042
69043 for (loop = 0; loop < klist->nkeys; loop++) {
69044 + key_serial_t serial;
69045 key = klist->keys[loop];
69046 + serial = key->serial;
69047
69048 tmp = sizeof(key_serial_t);
69049 if (tmp > buflen)
69050 tmp = buflen;
69051
69052 - if (copy_to_user(buffer,
69053 - &key->serial,
69054 - tmp) != 0)
69055 + if (copy_to_user(buffer, &serial, tmp))
69056 goto error;
69057
69058 buflen -= tmp;
69059 diff -urNp linux-3.0.4/security/min_addr.c linux-3.0.4/security/min_addr.c
69060 --- linux-3.0.4/security/min_addr.c 2011-07-21 22:17:23.000000000 -0400
69061 +++ linux-3.0.4/security/min_addr.c 2011-08-23 21:48:14.000000000 -0400
69062 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
69063 */
69064 static void update_mmap_min_addr(void)
69065 {
69066 +#ifndef SPARC
69067 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
69068 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
69069 mmap_min_addr = dac_mmap_min_addr;
69070 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
69071 #else
69072 mmap_min_addr = dac_mmap_min_addr;
69073 #endif
69074 +#endif
69075 }
69076
69077 /*
69078 diff -urNp linux-3.0.4/security/security.c linux-3.0.4/security/security.c
69079 --- linux-3.0.4/security/security.c 2011-07-21 22:17:23.000000000 -0400
69080 +++ linux-3.0.4/security/security.c 2011-08-23 21:48:14.000000000 -0400
69081 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
69082 /* things that live in capability.c */
69083 extern void __init security_fixup_ops(struct security_operations *ops);
69084
69085 -static struct security_operations *security_ops;
69086 -static struct security_operations default_security_ops = {
69087 +static struct security_operations *security_ops __read_only;
69088 +static struct security_operations default_security_ops __read_only = {
69089 .name = "default",
69090 };
69091
69092 @@ -67,7 +67,9 @@ int __init security_init(void)
69093
69094 void reset_security_ops(void)
69095 {
69096 + pax_open_kernel();
69097 security_ops = &default_security_ops;
69098 + pax_close_kernel();
69099 }
69100
69101 /* Save user chosen LSM */
69102 diff -urNp linux-3.0.4/security/selinux/hooks.c linux-3.0.4/security/selinux/hooks.c
69103 --- linux-3.0.4/security/selinux/hooks.c 2011-07-21 22:17:23.000000000 -0400
69104 +++ linux-3.0.4/security/selinux/hooks.c 2011-08-23 21:48:14.000000000 -0400
69105 @@ -93,7 +93,6 @@
69106 #define NUM_SEL_MNT_OPTS 5
69107
69108 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
69109 -extern struct security_operations *security_ops;
69110
69111 /* SECMARK reference count */
69112 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
69113 @@ -5454,7 +5453,7 @@ static int selinux_key_getsecurity(struc
69114
69115 #endif
69116
69117 -static struct security_operations selinux_ops = {
69118 +static struct security_operations selinux_ops __read_only = {
69119 .name = "selinux",
69120
69121 .ptrace_access_check = selinux_ptrace_access_check,
69122 diff -urNp linux-3.0.4/security/selinux/include/xfrm.h linux-3.0.4/security/selinux/include/xfrm.h
69123 --- linux-3.0.4/security/selinux/include/xfrm.h 2011-07-21 22:17:23.000000000 -0400
69124 +++ linux-3.0.4/security/selinux/include/xfrm.h 2011-08-23 21:47:56.000000000 -0400
69125 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
69126
69127 static inline void selinux_xfrm_notify_policyload(void)
69128 {
69129 - atomic_inc(&flow_cache_genid);
69130 + atomic_inc_unchecked(&flow_cache_genid);
69131 }
69132 #else
69133 static inline int selinux_xfrm_enabled(void)
69134 diff -urNp linux-3.0.4/security/selinux/ss/services.c linux-3.0.4/security/selinux/ss/services.c
69135 --- linux-3.0.4/security/selinux/ss/services.c 2011-07-21 22:17:23.000000000 -0400
69136 +++ linux-3.0.4/security/selinux/ss/services.c 2011-08-23 21:48:14.000000000 -0400
69137 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, siz
69138 int rc = 0;
69139 struct policy_file file = { data, len }, *fp = &file;
69140
69141 + pax_track_stack();
69142 +
69143 if (!ss_initialized) {
69144 avtab_cache_init();
69145 rc = policydb_read(&policydb, fp);
69146 diff -urNp linux-3.0.4/security/smack/smack_lsm.c linux-3.0.4/security/smack/smack_lsm.c
69147 --- linux-3.0.4/security/smack/smack_lsm.c 2011-07-21 22:17:23.000000000 -0400
69148 +++ linux-3.0.4/security/smack/smack_lsm.c 2011-08-23 21:47:56.000000000 -0400
69149 @@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct
69150 return 0;
69151 }
69152
69153 -struct security_operations smack_ops = {
69154 +struct security_operations smack_ops __read_only = {
69155 .name = "smack",
69156
69157 .ptrace_access_check = smack_ptrace_access_check,
69158 diff -urNp linux-3.0.4/security/tomoyo/tomoyo.c linux-3.0.4/security/tomoyo/tomoyo.c
69159 --- linux-3.0.4/security/tomoyo/tomoyo.c 2011-07-21 22:17:23.000000000 -0400
69160 +++ linux-3.0.4/security/tomoyo/tomoyo.c 2011-08-23 21:47:56.000000000 -0400
69161 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
69162 * tomoyo_security_ops is a "struct security_operations" which is used for
69163 * registering TOMOYO.
69164 */
69165 -static struct security_operations tomoyo_security_ops = {
69166 +static struct security_operations tomoyo_security_ops __read_only = {
69167 .name = "tomoyo",
69168 .cred_alloc_blank = tomoyo_cred_alloc_blank,
69169 .cred_prepare = tomoyo_cred_prepare,
69170 diff -urNp linux-3.0.4/sound/aoa/codecs/onyx.c linux-3.0.4/sound/aoa/codecs/onyx.c
69171 --- linux-3.0.4/sound/aoa/codecs/onyx.c 2011-07-21 22:17:23.000000000 -0400
69172 +++ linux-3.0.4/sound/aoa/codecs/onyx.c 2011-08-23 21:47:56.000000000 -0400
69173 @@ -54,7 +54,7 @@ struct onyx {
69174 spdif_locked:1,
69175 analog_locked:1,
69176 original_mute:2;
69177 - int open_count;
69178 + local_t open_count;
69179 struct codec_info *codec_info;
69180
69181 /* mutex serializes concurrent access to the device
69182 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
69183 struct onyx *onyx = cii->codec_data;
69184
69185 mutex_lock(&onyx->mutex);
69186 - onyx->open_count++;
69187 + local_inc(&onyx->open_count);
69188 mutex_unlock(&onyx->mutex);
69189
69190 return 0;
69191 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
69192 struct onyx *onyx = cii->codec_data;
69193
69194 mutex_lock(&onyx->mutex);
69195 - onyx->open_count--;
69196 - if (!onyx->open_count)
69197 + if (local_dec_and_test(&onyx->open_count))
69198 onyx->spdif_locked = onyx->analog_locked = 0;
69199 mutex_unlock(&onyx->mutex);
69200
69201 diff -urNp linux-3.0.4/sound/aoa/codecs/onyx.h linux-3.0.4/sound/aoa/codecs/onyx.h
69202 --- linux-3.0.4/sound/aoa/codecs/onyx.h 2011-07-21 22:17:23.000000000 -0400
69203 +++ linux-3.0.4/sound/aoa/codecs/onyx.h 2011-08-23 21:47:56.000000000 -0400
69204 @@ -11,6 +11,7 @@
69205 #include <linux/i2c.h>
69206 #include <asm/pmac_low_i2c.h>
69207 #include <asm/prom.h>
69208 +#include <asm/local.h>
69209
69210 /* PCM3052 register definitions */
69211
69212 diff -urNp linux-3.0.4/sound/core/seq/seq_device.c linux-3.0.4/sound/core/seq/seq_device.c
69213 --- linux-3.0.4/sound/core/seq/seq_device.c 2011-07-21 22:17:23.000000000 -0400
69214 +++ linux-3.0.4/sound/core/seq/seq_device.c 2011-08-23 21:47:56.000000000 -0400
69215 @@ -63,7 +63,7 @@ struct ops_list {
69216 int argsize; /* argument size */
69217
69218 /* operators */
69219 - struct snd_seq_dev_ops ops;
69220 + struct snd_seq_dev_ops *ops;
69221
69222 /* registred devices */
69223 struct list_head dev_list; /* list of devices */
69224 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
69225
69226 mutex_lock(&ops->reg_mutex);
69227 /* copy driver operators */
69228 - ops->ops = *entry;
69229 + ops->ops = entry;
69230 ops->driver |= DRIVER_LOADED;
69231 ops->argsize = argsize;
69232
69233 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
69234 dev->name, ops->id, ops->argsize, dev->argsize);
69235 return -EINVAL;
69236 }
69237 - if (ops->ops.init_device(dev) >= 0) {
69238 + if (ops->ops->init_device(dev) >= 0) {
69239 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
69240 ops->num_init_devices++;
69241 } else {
69242 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
69243 dev->name, ops->id, ops->argsize, dev->argsize);
69244 return -EINVAL;
69245 }
69246 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
69247 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
69248 dev->status = SNDRV_SEQ_DEVICE_FREE;
69249 dev->driver_data = NULL;
69250 ops->num_init_devices--;
69251 diff -urNp linux-3.0.4/sound/drivers/mts64.c linux-3.0.4/sound/drivers/mts64.c
69252 --- linux-3.0.4/sound/drivers/mts64.c 2011-07-21 22:17:23.000000000 -0400
69253 +++ linux-3.0.4/sound/drivers/mts64.c 2011-08-23 21:47:56.000000000 -0400
69254 @@ -28,6 +28,7 @@
69255 #include <sound/initval.h>
69256 #include <sound/rawmidi.h>
69257 #include <sound/control.h>
69258 +#include <asm/local.h>
69259
69260 #define CARD_NAME "Miditerminal 4140"
69261 #define DRIVER_NAME "MTS64"
69262 @@ -66,7 +67,7 @@ struct mts64 {
69263 struct pardevice *pardev;
69264 int pardev_claimed;
69265
69266 - int open_count;
69267 + local_t open_count;
69268 int current_midi_output_port;
69269 int current_midi_input_port;
69270 u8 mode[MTS64_NUM_INPUT_PORTS];
69271 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
69272 {
69273 struct mts64 *mts = substream->rmidi->private_data;
69274
69275 - if (mts->open_count == 0) {
69276 + if (local_read(&mts->open_count) == 0) {
69277 /* We don't need a spinlock here, because this is just called
69278 if the device has not been opened before.
69279 So there aren't any IRQs from the device */
69280 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
69281
69282 msleep(50);
69283 }
69284 - ++(mts->open_count);
69285 + local_inc(&mts->open_count);
69286
69287 return 0;
69288 }
69289 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
69290 struct mts64 *mts = substream->rmidi->private_data;
69291 unsigned long flags;
69292
69293 - --(mts->open_count);
69294 - if (mts->open_count == 0) {
69295 + if (local_dec_return(&mts->open_count) == 0) {
69296 /* We need the spinlock_irqsave here because we can still
69297 have IRQs at this point */
69298 spin_lock_irqsave(&mts->lock, flags);
69299 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
69300
69301 msleep(500);
69302
69303 - } else if (mts->open_count < 0)
69304 - mts->open_count = 0;
69305 + } else if (local_read(&mts->open_count) < 0)
69306 + local_set(&mts->open_count, 0);
69307
69308 return 0;
69309 }
69310 diff -urNp linux-3.0.4/sound/drivers/opl4/opl4_lib.c linux-3.0.4/sound/drivers/opl4/opl4_lib.c
69311 --- linux-3.0.4/sound/drivers/opl4/opl4_lib.c 2011-07-21 22:17:23.000000000 -0400
69312 +++ linux-3.0.4/sound/drivers/opl4/opl4_lib.c 2011-08-23 21:47:56.000000000 -0400
69313 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
69314 MODULE_DESCRIPTION("OPL4 driver");
69315 MODULE_LICENSE("GPL");
69316
69317 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
69318 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
69319 {
69320 int timeout = 10;
69321 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
69322 diff -urNp linux-3.0.4/sound/drivers/portman2x4.c linux-3.0.4/sound/drivers/portman2x4.c
69323 --- linux-3.0.4/sound/drivers/portman2x4.c 2011-07-21 22:17:23.000000000 -0400
69324 +++ linux-3.0.4/sound/drivers/portman2x4.c 2011-08-23 21:47:56.000000000 -0400
69325 @@ -47,6 +47,7 @@
69326 #include <sound/initval.h>
69327 #include <sound/rawmidi.h>
69328 #include <sound/control.h>
69329 +#include <asm/local.h>
69330
69331 #define CARD_NAME "Portman 2x4"
69332 #define DRIVER_NAME "portman"
69333 @@ -84,7 +85,7 @@ struct portman {
69334 struct pardevice *pardev;
69335 int pardev_claimed;
69336
69337 - int open_count;
69338 + local_t open_count;
69339 int mode[PORTMAN_NUM_INPUT_PORTS];
69340 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
69341 };
69342 diff -urNp linux-3.0.4/sound/firewire/amdtp.c linux-3.0.4/sound/firewire/amdtp.c
69343 --- linux-3.0.4/sound/firewire/amdtp.c 2011-07-21 22:17:23.000000000 -0400
69344 +++ linux-3.0.4/sound/firewire/amdtp.c 2011-08-23 21:47:56.000000000 -0400
69345 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
69346 ptr = s->pcm_buffer_pointer + data_blocks;
69347 if (ptr >= pcm->runtime->buffer_size)
69348 ptr -= pcm->runtime->buffer_size;
69349 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
69350 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
69351
69352 s->pcm_period_pointer += data_blocks;
69353 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
69354 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
69355 */
69356 void amdtp_out_stream_update(struct amdtp_out_stream *s)
69357 {
69358 - ACCESS_ONCE(s->source_node_id_field) =
69359 + ACCESS_ONCE_RW(s->source_node_id_field) =
69360 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
69361 }
69362 EXPORT_SYMBOL(amdtp_out_stream_update);
69363 diff -urNp linux-3.0.4/sound/firewire/amdtp.h linux-3.0.4/sound/firewire/amdtp.h
69364 --- linux-3.0.4/sound/firewire/amdtp.h 2011-07-21 22:17:23.000000000 -0400
69365 +++ linux-3.0.4/sound/firewire/amdtp.h 2011-08-23 21:47:56.000000000 -0400
69366 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
69367 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
69368 struct snd_pcm_substream *pcm)
69369 {
69370 - ACCESS_ONCE(s->pcm) = pcm;
69371 + ACCESS_ONCE_RW(s->pcm) = pcm;
69372 }
69373
69374 /**
69375 diff -urNp linux-3.0.4/sound/firewire/isight.c linux-3.0.4/sound/firewire/isight.c
69376 --- linux-3.0.4/sound/firewire/isight.c 2011-07-21 22:17:23.000000000 -0400
69377 +++ linux-3.0.4/sound/firewire/isight.c 2011-08-23 21:47:56.000000000 -0400
69378 @@ -97,7 +97,7 @@ static void isight_update_pointers(struc
69379 ptr += count;
69380 if (ptr >= runtime->buffer_size)
69381 ptr -= runtime->buffer_size;
69382 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
69383 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
69384
69385 isight->period_counter += count;
69386 if (isight->period_counter >= runtime->period_size) {
69387 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_p
69388 if (err < 0)
69389 return err;
69390
69391 - ACCESS_ONCE(isight->pcm_active) = true;
69392 + ACCESS_ONCE_RW(isight->pcm_active) = true;
69393
69394 return 0;
69395 }
69396 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm
69397 {
69398 struct isight *isight = substream->private_data;
69399
69400 - ACCESS_ONCE(isight->pcm_active) = false;
69401 + ACCESS_ONCE_RW(isight->pcm_active) = false;
69402
69403 mutex_lock(&isight->mutex);
69404 isight_stop_streaming(isight);
69405 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm
69406
69407 switch (cmd) {
69408 case SNDRV_PCM_TRIGGER_START:
69409 - ACCESS_ONCE(isight->pcm_running) = true;
69410 + ACCESS_ONCE_RW(isight->pcm_running) = true;
69411 break;
69412 case SNDRV_PCM_TRIGGER_STOP:
69413 - ACCESS_ONCE(isight->pcm_running) = false;
69414 + ACCESS_ONCE_RW(isight->pcm_running) = false;
69415 break;
69416 default:
69417 return -EINVAL;
69418 diff -urNp linux-3.0.4/sound/isa/cmi8330.c linux-3.0.4/sound/isa/cmi8330.c
69419 --- linux-3.0.4/sound/isa/cmi8330.c 2011-07-21 22:17:23.000000000 -0400
69420 +++ linux-3.0.4/sound/isa/cmi8330.c 2011-08-23 21:47:56.000000000 -0400
69421 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
69422
69423 struct snd_pcm *pcm;
69424 struct snd_cmi8330_stream {
69425 - struct snd_pcm_ops ops;
69426 + snd_pcm_ops_no_const ops;
69427 snd_pcm_open_callback_t open;
69428 void *private_data; /* sb or wss */
69429 } streams[2];
69430 diff -urNp linux-3.0.4/sound/oss/sb_audio.c linux-3.0.4/sound/oss/sb_audio.c
69431 --- linux-3.0.4/sound/oss/sb_audio.c 2011-07-21 22:17:23.000000000 -0400
69432 +++ linux-3.0.4/sound/oss/sb_audio.c 2011-08-23 21:47:56.000000000 -0400
69433 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
69434 buf16 = (signed short *)(localbuf + localoffs);
69435 while (c)
69436 {
69437 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69438 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69439 if (copy_from_user(lbuf8,
69440 userbuf+useroffs + p,
69441 locallen))
69442 diff -urNp linux-3.0.4/sound/oss/swarm_cs4297a.c linux-3.0.4/sound/oss/swarm_cs4297a.c
69443 --- linux-3.0.4/sound/oss/swarm_cs4297a.c 2011-07-21 22:17:23.000000000 -0400
69444 +++ linux-3.0.4/sound/oss/swarm_cs4297a.c 2011-08-23 21:47:56.000000000 -0400
69445 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
69446 {
69447 struct cs4297a_state *s;
69448 u32 pwr, id;
69449 - mm_segment_t fs;
69450 int rval;
69451 #ifndef CONFIG_BCM_CS4297A_CSWARM
69452 u64 cfg;
69453 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
69454 if (!rval) {
69455 char *sb1250_duart_present;
69456
69457 +#if 0
69458 + mm_segment_t fs;
69459 fs = get_fs();
69460 set_fs(KERNEL_DS);
69461 -#if 0
69462 val = SOUND_MASK_LINE;
69463 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
69464 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
69465 val = initvol[i].vol;
69466 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
69467 }
69468 + set_fs(fs);
69469 // cs4297a_write_ac97(s, 0x18, 0x0808);
69470 #else
69471 // cs4297a_write_ac97(s, 0x5e, 0x180);
69472 cs4297a_write_ac97(s, 0x02, 0x0808);
69473 cs4297a_write_ac97(s, 0x18, 0x0808);
69474 #endif
69475 - set_fs(fs);
69476
69477 list_add(&s->list, &cs4297a_devs);
69478
69479 diff -urNp linux-3.0.4/sound/pci/hda/hda_codec.h linux-3.0.4/sound/pci/hda/hda_codec.h
69480 --- linux-3.0.4/sound/pci/hda/hda_codec.h 2011-07-21 22:17:23.000000000 -0400
69481 +++ linux-3.0.4/sound/pci/hda/hda_codec.h 2011-08-23 21:47:56.000000000 -0400
69482 @@ -615,7 +615,7 @@ struct hda_bus_ops {
69483 /* notify power-up/down from codec to controller */
69484 void (*pm_notify)(struct hda_bus *bus);
69485 #endif
69486 -};
69487 +} __no_const;
69488
69489 /* template to pass to the bus constructor */
69490 struct hda_bus_template {
69491 @@ -713,6 +713,7 @@ struct hda_codec_ops {
69492 #endif
69493 void (*reboot_notify)(struct hda_codec *codec);
69494 };
69495 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
69496
69497 /* record for amp information cache */
69498 struct hda_cache_head {
69499 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
69500 struct snd_pcm_substream *substream);
69501 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
69502 struct snd_pcm_substream *substream);
69503 -};
69504 +} __no_const;
69505
69506 /* PCM information for each substream */
69507 struct hda_pcm_stream {
69508 @@ -801,7 +802,7 @@ struct hda_codec {
69509 const char *modelname; /* model name for preset */
69510
69511 /* set by patch */
69512 - struct hda_codec_ops patch_ops;
69513 + hda_codec_ops_no_const patch_ops;
69514
69515 /* PCM to create, set by patch_ops.build_pcms callback */
69516 unsigned int num_pcms;
69517 diff -urNp linux-3.0.4/sound/pci/ice1712/ice1712.h linux-3.0.4/sound/pci/ice1712/ice1712.h
69518 --- linux-3.0.4/sound/pci/ice1712/ice1712.h 2011-07-21 22:17:23.000000000 -0400
69519 +++ linux-3.0.4/sound/pci/ice1712/ice1712.h 2011-08-23 21:47:56.000000000 -0400
69520 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
69521 unsigned int mask_flags; /* total mask bits */
69522 struct snd_akm4xxx_ops {
69523 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69524 - } ops;
69525 + } __no_const ops;
69526 };
69527
69528 struct snd_ice1712_spdif {
69529 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
69530 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69531 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69532 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69533 - } ops;
69534 + } __no_const ops;
69535 };
69536
69537
69538 diff -urNp linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c
69539 --- linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c 2011-07-21 22:17:23.000000000 -0400
69540 +++ linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c 2011-08-23 21:47:56.000000000 -0400
69541 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
69542 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
69543 break;
69544 }
69545 - if (atomic_read(&chip->interrupt_sleep_count)) {
69546 - atomic_set(&chip->interrupt_sleep_count, 0);
69547 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69548 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69549 wake_up(&chip->interrupt_sleep);
69550 }
69551 __end:
69552 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
69553 continue;
69554 init_waitqueue_entry(&wait, current);
69555 add_wait_queue(&chip->interrupt_sleep, &wait);
69556 - atomic_inc(&chip->interrupt_sleep_count);
69557 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
69558 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
69559 remove_wait_queue(&chip->interrupt_sleep, &wait);
69560 }
69561 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
69562 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
69563 spin_unlock(&chip->reg_lock);
69564
69565 - if (atomic_read(&chip->interrupt_sleep_count)) {
69566 - atomic_set(&chip->interrupt_sleep_count, 0);
69567 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69568 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69569 wake_up(&chip->interrupt_sleep);
69570 }
69571 }
69572 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
69573 spin_lock_init(&chip->reg_lock);
69574 spin_lock_init(&chip->voice_lock);
69575 init_waitqueue_head(&chip->interrupt_sleep);
69576 - atomic_set(&chip->interrupt_sleep_count, 0);
69577 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69578 chip->card = card;
69579 chip->pci = pci;
69580 chip->irq = -1;
69581 diff -urNp linux-3.0.4/sound/soc/soc-core.c linux-3.0.4/sound/soc/soc-core.c
69582 --- linux-3.0.4/sound/soc/soc-core.c 2011-08-23 21:44:40.000000000 -0400
69583 +++ linux-3.0.4/sound/soc/soc-core.c 2011-08-23 21:47:56.000000000 -0400
69584 @@ -1021,7 +1021,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
69585 }
69586
69587 /* ASoC PCM operations */
69588 -static struct snd_pcm_ops soc_pcm_ops = {
69589 +static snd_pcm_ops_no_const soc_pcm_ops = {
69590 .open = soc_pcm_open,
69591 .close = soc_codec_close,
69592 .hw_params = soc_pcm_hw_params,
69593 @@ -2128,6 +2128,7 @@ static int soc_new_pcm(struct snd_soc_pc
69594 rtd->pcm = pcm;
69595 pcm->private_data = rtd;
69596 if (platform->driver->ops) {
69597 + /* this whole logic is broken... */
69598 soc_pcm_ops.mmap = platform->driver->ops->mmap;
69599 soc_pcm_ops.pointer = platform->driver->ops->pointer;
69600 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
69601 diff -urNp linux-3.0.4/sound/usb/card.h linux-3.0.4/sound/usb/card.h
69602 --- linux-3.0.4/sound/usb/card.h 2011-07-21 22:17:23.000000000 -0400
69603 +++ linux-3.0.4/sound/usb/card.h 2011-08-23 21:47:56.000000000 -0400
69604 @@ -44,6 +44,7 @@ struct snd_urb_ops {
69605 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
69606 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
69607 };
69608 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
69609
69610 struct snd_usb_substream {
69611 struct snd_usb_stream *stream;
69612 @@ -93,7 +94,7 @@ struct snd_usb_substream {
69613 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
69614 spinlock_t lock;
69615
69616 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
69617 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
69618 };
69619
69620 struct snd_usb_stream {
69621 diff -urNp linux-3.0.4/tools/gcc/constify_plugin.c linux-3.0.4/tools/gcc/constify_plugin.c
69622 --- linux-3.0.4/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
69623 +++ linux-3.0.4/tools/gcc/constify_plugin.c 2011-08-29 22:01:36.000000000 -0400
69624 @@ -0,0 +1,289 @@
69625 +/*
69626 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
69627 + * Licensed under the GPL v2, or (at your option) v3
69628 + *
69629 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
69630 + *
69631 + * Usage:
69632 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
69633 + * $ gcc -fplugin=constify_plugin.so test.c -O2
69634 + */
69635 +
69636 +#include "gcc-plugin.h"
69637 +#include "config.h"
69638 +#include "system.h"
69639 +#include "coretypes.h"
69640 +#include "tree.h"
69641 +#include "tree-pass.h"
69642 +#include "intl.h"
69643 +#include "plugin-version.h"
69644 +#include "tm.h"
69645 +#include "toplev.h"
69646 +#include "function.h"
69647 +#include "tree-flow.h"
69648 +#include "plugin.h"
69649 +#include "diagnostic.h"
69650 +//#include "c-tree.h"
69651 +
69652 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
69653 +
69654 +int plugin_is_GPL_compatible;
69655 +
69656 +static struct plugin_info const_plugin_info = {
69657 + .version = "20110826",
69658 + .help = "no-constify\tturn off constification\n",
69659 +};
69660 +
69661 +static void constify_type(tree type);
69662 +static bool walk_struct(tree node);
69663 +
69664 +static tree deconstify_type(tree old_type)
69665 +{
69666 + tree new_type, field;
69667 +
69668 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
69669 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
69670 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
69671 + DECL_FIELD_CONTEXT(field) = new_type;
69672 + TYPE_READONLY(new_type) = 0;
69673 + C_TYPE_FIELDS_READONLY(new_type) = 0;
69674 + return new_type;
69675 +}
69676 +
69677 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
69678 +{
69679 + tree type;
69680 +
69681 + *no_add_attrs = true;
69682 + if (TREE_CODE(*node) == FUNCTION_DECL) {
69683 + error("%qE attribute does not apply to functions", name);
69684 + return NULL_TREE;
69685 + }
69686 +
69687 + if (TREE_CODE(*node) == VAR_DECL) {
69688 + error("%qE attribute does not apply to variables", name);
69689 + return NULL_TREE;
69690 + }
69691 +
69692 + if (TYPE_P(*node)) {
69693 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
69694 + *no_add_attrs = false;
69695 + else
69696 + error("%qE attribute applies to struct and union types only", name);
69697 + return NULL_TREE;
69698 + }
69699 +
69700 + type = TREE_TYPE(*node);
69701 +
69702 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
69703 + error("%qE attribute applies to struct and union types only", name);
69704 + return NULL_TREE;
69705 + }
69706 +
69707 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
69708 + error("%qE attribute is already applied to the type", name);
69709 + return NULL_TREE;
69710 + }
69711 +
69712 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
69713 + error("%qE attribute used on type that is not constified", name);
69714 + return NULL_TREE;
69715 + }
69716 +
69717 + if (TREE_CODE(*node) == TYPE_DECL) {
69718 + TREE_TYPE(*node) = deconstify_type(type);
69719 + TREE_READONLY(*node) = 0;
69720 + return NULL_TREE;
69721 + }
69722 +
69723 + return NULL_TREE;
69724 +}
69725 +
69726 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
69727 +{
69728 + *no_add_attrs = true;
69729 + if (!TYPE_P(*node)) {
69730 + error("%qE attribute applies to types only", name);
69731 + return NULL_TREE;
69732 + }
69733 +
69734 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
69735 + error("%qE attribute applies to struct and union types only", name);
69736 + return NULL_TREE;
69737 + }
69738 +
69739 + *no_add_attrs = false;
69740 + constify_type(*node);
69741 + return NULL_TREE;
69742 +}
69743 +
69744 +static struct attribute_spec no_const_attr = {
69745 + .name = "no_const",
69746 + .min_length = 0,
69747 + .max_length = 0,
69748 + .decl_required = false,
69749 + .type_required = false,
69750 + .function_type_required = false,
69751 + .handler = handle_no_const_attribute
69752 +};
69753 +
69754 +static struct attribute_spec do_const_attr = {
69755 + .name = "do_const",
69756 + .min_length = 0,
69757 + .max_length = 0,
69758 + .decl_required = false,
69759 + .type_required = false,
69760 + .function_type_required = false,
69761 + .handler = handle_do_const_attribute
69762 +};
69763 +
69764 +static void register_attributes(void *event_data, void *data)
69765 +{
69766 + register_attribute(&no_const_attr);
69767 + register_attribute(&do_const_attr);
69768 +}
69769 +
69770 +static void constify_type(tree type)
69771 +{
69772 + TYPE_READONLY(type) = 1;
69773 + C_TYPE_FIELDS_READONLY(type) = 1;
69774 +}
69775 +
69776 +static bool is_fptr(tree field)
69777 +{
69778 + tree ptr = TREE_TYPE(field);
69779 +
69780 + if (TREE_CODE(ptr) != POINTER_TYPE)
69781 + return false;
69782 +
69783 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
69784 +}
69785 +
69786 +static bool walk_struct(tree node)
69787 +{
69788 + tree field;
69789 +
69790 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
69791 + return false;
69792 +
69793 + if (TYPE_FIELDS(node) == NULL_TREE)
69794 + return false;
69795 +
69796 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
69797 + tree type = TREE_TYPE(field);
69798 + enum tree_code code = TREE_CODE(type);
69799 + if (code == RECORD_TYPE || code == UNION_TYPE) {
69800 + if (!(walk_struct(type)))
69801 + return false;
69802 + } else if (!is_fptr(field) && !TREE_READONLY(field))
69803 + return false;
69804 + }
69805 + return true;
69806 +}
69807 +
69808 +static void finish_type(void *event_data, void *data)
69809 +{
69810 + tree type = (tree)event_data;
69811 +
69812 + if (type == NULL_TREE)
69813 + return;
69814 +
69815 + if (TYPE_READONLY(type))
69816 + return;
69817 +
69818 + if (walk_struct(type))
69819 + constify_type(type);
69820 +}
69821 +
69822 +static unsigned int check_local_variables(void);
69823 +
69824 +struct gimple_opt_pass pass_local_variable = {
69825 + {
69826 + .type = GIMPLE_PASS,
69827 + .name = "check_local_variables",
69828 + .gate = NULL,
69829 + .execute = check_local_variables,
69830 + .sub = NULL,
69831 + .next = NULL,
69832 + .static_pass_number = 0,
69833 + .tv_id = TV_NONE,
69834 + .properties_required = 0,
69835 + .properties_provided = 0,
69836 + .properties_destroyed = 0,
69837 + .todo_flags_start = 0,
69838 + .todo_flags_finish = 0
69839 + }
69840 +};
69841 +
69842 +static unsigned int check_local_variables(void)
69843 +{
69844 + tree var;
69845 + referenced_var_iterator rvi;
69846 +
69847 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
69848 + FOR_EACH_REFERENCED_VAR(var, rvi) {
69849 +#else
69850 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
69851 +#endif
69852 + tree type = TREE_TYPE(var);
69853 +
69854 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
69855 + continue;
69856 +
69857 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
69858 + continue;
69859 +
69860 + if (!TYPE_READONLY(type))
69861 + continue;
69862 +
69863 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
69864 +// continue;
69865 +
69866 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
69867 +// continue;
69868 +
69869 + if (walk_struct(type)) {
69870 + error("constified variable %qE cannot be local", var);
69871 + return 1;
69872 + }
69873 + }
69874 + return 0;
69875 +}
69876 +
69877 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
69878 +{
69879 + const char * const plugin_name = plugin_info->base_name;
69880 + const int argc = plugin_info->argc;
69881 + const struct plugin_argument * const argv = plugin_info->argv;
69882 + int i;
69883 + bool constify = true;
69884 +
69885 + struct register_pass_info local_variable_pass_info = {
69886 + .pass = &pass_local_variable.pass,
69887 + .reference_pass_name = "*referenced_vars",
69888 + .ref_pass_instance_number = 0,
69889 + .pos_op = PASS_POS_INSERT_AFTER
69890 + };
69891 +
69892 + if (!plugin_default_version_check(version, &gcc_version)) {
69893 + error(G_("incompatible gcc/plugin versions"));
69894 + return 1;
69895 + }
69896 +
69897 + for (i = 0; i < argc; ++i) {
69898 + if (!(strcmp(argv[i].key, "no-constify"))) {
69899 + constify = false;
69900 + continue;
69901 + }
69902 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
69903 + }
69904 +
69905 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
69906 + if (constify) {
69907 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
69908 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
69909 + }
69910 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
69911 +
69912 + return 0;
69913 +}
69914 diff -urNp linux-3.0.4/tools/gcc/Makefile linux-3.0.4/tools/gcc/Makefile
69915 --- linux-3.0.4/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
69916 +++ linux-3.0.4/tools/gcc/Makefile 2011-08-23 21:47:56.000000000 -0400
69917 @@ -0,0 +1,12 @@
69918 +#CC := gcc
69919 +#PLUGIN_SOURCE_FILES := pax_plugin.c
69920 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
69921 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
69922 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
69923 +
69924 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
69925 +
69926 +hostlibs-y := stackleak_plugin.so constify_plugin.so
69927 +always := $(hostlibs-y)
69928 +stackleak_plugin-objs := stackleak_plugin.o
69929 +constify_plugin-objs := constify_plugin.o
69930 diff -urNp linux-3.0.4/tools/gcc/stackleak_plugin.c linux-3.0.4/tools/gcc/stackleak_plugin.c
69931 --- linux-3.0.4/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
69932 +++ linux-3.0.4/tools/gcc/stackleak_plugin.c 2011-08-23 21:47:56.000000000 -0400
69933 @@ -0,0 +1,243 @@
69934 +/*
69935 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
69936 + * Licensed under the GPL v2
69937 + *
69938 + * Note: the choice of the license means that the compilation process is
69939 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
69940 + * but for the kernel it doesn't matter since it doesn't link against
69941 + * any of the gcc libraries
69942 + *
69943 + * gcc plugin to help implement various PaX features
69944 + *
69945 + * - track lowest stack pointer
69946 + *
69947 + * TODO:
69948 + * - initialize all local variables
69949 + *
69950 + * BUGS:
69951 + * - cloned functions are instrumented twice
69952 + */
69953 +#include "gcc-plugin.h"
69954 +#include "config.h"
69955 +#include "system.h"
69956 +#include "coretypes.h"
69957 +#include "tree.h"
69958 +#include "tree-pass.h"
69959 +#include "intl.h"
69960 +#include "plugin-version.h"
69961 +#include "tm.h"
69962 +#include "toplev.h"
69963 +#include "basic-block.h"
69964 +#include "gimple.h"
69965 +//#include "expr.h" where are you...
69966 +#include "diagnostic.h"
69967 +#include "rtl.h"
69968 +#include "emit-rtl.h"
69969 +#include "function.h"
69970 +
69971 +int plugin_is_GPL_compatible;
69972 +
69973 +static int track_frame_size = -1;
69974 +static const char track_function[] = "pax_track_stack";
69975 +static bool init_locals;
69976 +
69977 +static struct plugin_info stackleak_plugin_info = {
69978 + .version = "201106030000",
69979 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
69980 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
69981 +};
69982 +
69983 +static bool gate_stackleak_track_stack(void);
69984 +static unsigned int execute_stackleak_tree_instrument(void);
69985 +static unsigned int execute_stackleak_final(void);
69986 +
69987 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
69988 + .pass = {
69989 + .type = GIMPLE_PASS,
69990 + .name = "stackleak_tree_instrument",
69991 + .gate = gate_stackleak_track_stack,
69992 + .execute = execute_stackleak_tree_instrument,
69993 + .sub = NULL,
69994 + .next = NULL,
69995 + .static_pass_number = 0,
69996 + .tv_id = TV_NONE,
69997 + .properties_required = PROP_gimple_leh | PROP_cfg,
69998 + .properties_provided = 0,
69999 + .properties_destroyed = 0,
70000 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
70001 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
70002 + }
70003 +};
70004 +
70005 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
70006 + .pass = {
70007 + .type = RTL_PASS,
70008 + .name = "stackleak_final",
70009 + .gate = gate_stackleak_track_stack,
70010 + .execute = execute_stackleak_final,
70011 + .sub = NULL,
70012 + .next = NULL,
70013 + .static_pass_number = 0,
70014 + .tv_id = TV_NONE,
70015 + .properties_required = 0,
70016 + .properties_provided = 0,
70017 + .properties_destroyed = 0,
70018 + .todo_flags_start = 0,
70019 + .todo_flags_finish = 0
70020 + }
70021 +};
70022 +
70023 +static bool gate_stackleak_track_stack(void)
70024 +{
70025 + return track_frame_size >= 0;
70026 +}
70027 +
70028 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
70029 +{
70030 + gimple call;
70031 + tree decl, type;
70032 +
70033 + // insert call to void pax_track_stack(void)
70034 + type = build_function_type_list(void_type_node, NULL_TREE);
70035 + decl = build_fn_decl(track_function, type);
70036 + DECL_ASSEMBLER_NAME(decl); // for LTO
70037 + call = gimple_build_call(decl, 0);
70038 + if (before)
70039 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
70040 + else
70041 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
70042 +}
70043 +
70044 +static unsigned int execute_stackleak_tree_instrument(void)
70045 +{
70046 + basic_block bb;
70047 + gimple_stmt_iterator gsi;
70048 +
70049 + // 1. loop through BBs and GIMPLE statements
70050 + FOR_EACH_BB(bb) {
70051 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
70052 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
70053 + tree decl;
70054 + gimple stmt = gsi_stmt(gsi);
70055 +
70056 + if (!is_gimple_call(stmt))
70057 + continue;
70058 + decl = gimple_call_fndecl(stmt);
70059 + if (!decl)
70060 + continue;
70061 + if (TREE_CODE(decl) != FUNCTION_DECL)
70062 + continue;
70063 + if (!DECL_BUILT_IN(decl))
70064 + continue;
70065 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
70066 + continue;
70067 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
70068 + continue;
70069 +
70070 + // 2. insert track call after each __builtin_alloca call
70071 + stackleak_add_instrumentation(&gsi, false);
70072 +// print_node(stderr, "pax", decl, 4);
70073 + }
70074 + }
70075 +
70076 + // 3. insert track call at the beginning
70077 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
70078 + gsi = gsi_start_bb(bb);
70079 + stackleak_add_instrumentation(&gsi, true);
70080 +
70081 + return 0;
70082 +}
70083 +
70084 +static unsigned int execute_stackleak_final(void)
70085 +{
70086 + rtx insn;
70087 +
70088 + if (cfun->calls_alloca)
70089 + return 0;
70090 +
70091 + // 1. find pax_track_stack calls
70092 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
70093 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
70094 + rtx body;
70095 +
70096 + if (!CALL_P(insn))
70097 + continue;
70098 + body = PATTERN(insn);
70099 + if (GET_CODE(body) != CALL)
70100 + continue;
70101 + body = XEXP(body, 0);
70102 + if (GET_CODE(body) != MEM)
70103 + continue;
70104 + body = XEXP(body, 0);
70105 + if (GET_CODE(body) != SYMBOL_REF)
70106 + continue;
70107 + if (strcmp(XSTR(body, 0), track_function))
70108 + continue;
70109 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70110 + // 2. delete call if function frame is not big enough
70111 + if (get_frame_size() >= track_frame_size)
70112 + continue;
70113 + delete_insn_and_edges(insn);
70114 + }
70115 +
70116 +// print_simple_rtl(stderr, get_insns());
70117 +// print_rtl(stderr, get_insns());
70118 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70119 +
70120 + return 0;
70121 +}
70122 +
70123 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70124 +{
70125 + const char * const plugin_name = plugin_info->base_name;
70126 + const int argc = plugin_info->argc;
70127 + const struct plugin_argument * const argv = plugin_info->argv;
70128 + int i;
70129 + struct register_pass_info stackleak_tree_instrument_pass_info = {
70130 + .pass = &stackleak_tree_instrument_pass.pass,
70131 +// .reference_pass_name = "tree_profile",
70132 + .reference_pass_name = "optimized",
70133 + .ref_pass_instance_number = 0,
70134 + .pos_op = PASS_POS_INSERT_AFTER
70135 + };
70136 + struct register_pass_info stackleak_final_pass_info = {
70137 + .pass = &stackleak_final_rtl_opt_pass.pass,
70138 + .reference_pass_name = "final",
70139 + .ref_pass_instance_number = 0,
70140 + .pos_op = PASS_POS_INSERT_BEFORE
70141 + };
70142 +
70143 + if (!plugin_default_version_check(version, &gcc_version)) {
70144 + error(G_("incompatible gcc/plugin versions"));
70145 + return 1;
70146 + }
70147 +
70148 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
70149 +
70150 + for (i = 0; i < argc; ++i) {
70151 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
70152 + if (!argv[i].value) {
70153 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70154 + continue;
70155 + }
70156 + track_frame_size = atoi(argv[i].value);
70157 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
70158 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70159 + continue;
70160 + }
70161 + if (!strcmp(argv[i].key, "initialize-locals")) {
70162 + if (argv[i].value) {
70163 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70164 + continue;
70165 + }
70166 + init_locals = true;
70167 + continue;
70168 + }
70169 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70170 + }
70171 +
70172 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
70173 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
70174 +
70175 + return 0;
70176 +}
70177 diff -urNp linux-3.0.4/usr/gen_init_cpio.c linux-3.0.4/usr/gen_init_cpio.c
70178 --- linux-3.0.4/usr/gen_init_cpio.c 2011-07-21 22:17:23.000000000 -0400
70179 +++ linux-3.0.4/usr/gen_init_cpio.c 2011-08-23 21:47:56.000000000 -0400
70180 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
70181 int retval;
70182 int rc = -1;
70183 int namesize;
70184 - int i;
70185 + unsigned int i;
70186
70187 mode |= S_IFREG;
70188
70189 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
70190 *env_var = *expanded = '\0';
70191 strncat(env_var, start + 2, end - start - 2);
70192 strncat(expanded, new_location, start - new_location);
70193 - strncat(expanded, getenv(env_var), PATH_MAX);
70194 - strncat(expanded, end + 1, PATH_MAX);
70195 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
70196 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
70197 strncpy(new_location, expanded, PATH_MAX);
70198 + new_location[PATH_MAX] = 0;
70199 } else
70200 break;
70201 }
70202 diff -urNp linux-3.0.4/virt/kvm/kvm_main.c linux-3.0.4/virt/kvm/kvm_main.c
70203 --- linux-3.0.4/virt/kvm/kvm_main.c 2011-07-21 22:17:23.000000000 -0400
70204 +++ linux-3.0.4/virt/kvm/kvm_main.c 2011-08-23 21:47:56.000000000 -0400
70205 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
70206
70207 static cpumask_var_t cpus_hardware_enabled;
70208 static int kvm_usage_count = 0;
70209 -static atomic_t hardware_enable_failed;
70210 +static atomic_unchecked_t hardware_enable_failed;
70211
70212 struct kmem_cache *kvm_vcpu_cache;
70213 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
70214 @@ -2176,7 +2176,7 @@ static void hardware_enable_nolock(void
70215
70216 if (r) {
70217 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
70218 - atomic_inc(&hardware_enable_failed);
70219 + atomic_inc_unchecked(&hardware_enable_failed);
70220 printk(KERN_INFO "kvm: enabling virtualization on "
70221 "CPU%d failed\n", cpu);
70222 }
70223 @@ -2230,10 +2230,10 @@ static int hardware_enable_all(void)
70224
70225 kvm_usage_count++;
70226 if (kvm_usage_count == 1) {
70227 - atomic_set(&hardware_enable_failed, 0);
70228 + atomic_set_unchecked(&hardware_enable_failed, 0);
70229 on_each_cpu(hardware_enable_nolock, NULL, 1);
70230
70231 - if (atomic_read(&hardware_enable_failed)) {
70232 + if (atomic_read_unchecked(&hardware_enable_failed)) {
70233 hardware_disable_all_nolock();
70234 r = -EBUSY;
70235 }
70236 @@ -2498,7 +2498,7 @@ static void kvm_sched_out(struct preempt
70237 kvm_arch_vcpu_put(vcpu);
70238 }
70239
70240 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70241 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70242 struct module *module)
70243 {
70244 int r;
70245 @@ -2561,7 +2561,7 @@ int kvm_init(void *opaque, unsigned vcpu
70246 if (!vcpu_align)
70247 vcpu_align = __alignof__(struct kvm_vcpu);
70248 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
70249 - 0, NULL);
70250 + SLAB_USERCOPY, NULL);
70251 if (!kvm_vcpu_cache) {
70252 r = -ENOMEM;
70253 goto out_free_3;
70254 @@ -2571,9 +2571,11 @@ int kvm_init(void *opaque, unsigned vcpu
70255 if (r)
70256 goto out_free;
70257
70258 - kvm_chardev_ops.owner = module;
70259 - kvm_vm_fops.owner = module;
70260 - kvm_vcpu_fops.owner = module;
70261 + pax_open_kernel();
70262 + *(void **)&kvm_chardev_ops.owner = module;
70263 + *(void **)&kvm_vm_fops.owner = module;
70264 + *(void **)&kvm_vcpu_fops.owner = module;
70265 + pax_close_kernel();
70266
70267 r = misc_register(&kvm_dev);
70268 if (r) {