]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.0.3-201108292233.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.3-201108292233.patch
1 diff -urNp linux-3.0.3/arch/alpha/include/asm/elf.h linux-3.0.3/arch/alpha/include/asm/elf.h
2 --- linux-3.0.3/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3 +++ linux-3.0.3/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-3.0.3/arch/alpha/include/asm/pgtable.h linux-3.0.3/arch/alpha/include/asm/pgtable.h
19 --- linux-3.0.3/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20 +++ linux-3.0.3/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-3.0.3/arch/alpha/kernel/module.c linux-3.0.3/arch/alpha/kernel/module.c
40 --- linux-3.0.3/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41 +++ linux-3.0.3/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-3.0.3/arch/alpha/kernel/osf_sys.c linux-3.0.3/arch/alpha/kernel/osf_sys.c
52 --- linux-3.0.3/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53 +++ linux-3.0.3/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-3.0.3/arch/alpha/mm/fault.c linux-3.0.3/arch/alpha/mm/fault.c
86 --- linux-3.0.3/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87 +++ linux-3.0.3/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-3.0.3/arch/arm/include/asm/elf.h linux-3.0.3/arch/arm/include/asm/elf.h
245 --- linux-3.0.3/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246 +++ linux-3.0.3/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-3.0.3/arch/arm/include/asm/kmap_types.h linux-3.0.3/arch/arm/include/asm/kmap_types.h
275 --- linux-3.0.3/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276 +++ linux-3.0.3/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-3.0.3/arch/arm/include/asm/uaccess.h linux-3.0.3/arch/arm/include/asm/uaccess.h
286 --- linux-3.0.3/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287 +++ linux-3.0.3/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-3.0.3/arch/arm/kernel/armksyms.c linux-3.0.3/arch/arm/kernel/armksyms.c
344 --- linux-3.0.3/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345 +++ linux-3.0.3/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-3.0.3/arch/arm/kernel/process.c linux-3.0.3/arch/arm/kernel/process.c
358 --- linux-3.0.3/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359 +++ linux-3.0.3/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-3.0.3/arch/arm/kernel/traps.c linux-3.0.3/arch/arm/kernel/traps.c
382 --- linux-3.0.3/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383 +++ linux-3.0.3/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384 @@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-3.0.3/arch/arm/lib/copy_from_user.S linux-3.0.3/arch/arm/lib/copy_from_user.S
404 --- linux-3.0.3/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405 +++ linux-3.0.3/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-3.0.3/arch/arm/lib/copy_to_user.S linux-3.0.3/arch/arm/lib/copy_to_user.S
430 --- linux-3.0.3/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431 +++ linux-3.0.3/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-3.0.3/arch/arm/lib/uaccess.S linux-3.0.3/arch/arm/lib/uaccess.S
456 --- linux-3.0.3/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457 +++ linux-3.0.3/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513 +++ linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525 +++ linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-3.0.3/arch/arm/mm/fault.c linux-3.0.3/arch/arm/mm/fault.c
536 --- linux-3.0.3/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537 +++ linux-3.0.3/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-3.0.3/arch/arm/mm/mmap.c linux-3.0.3/arch/arm/mm/mmap.c
587 --- linux-3.0.3/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588 +++ linux-3.0.3/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-3.0.3/arch/avr32/include/asm/elf.h linux-3.0.3/arch/avr32/include/asm/elf.h
639 --- linux-3.0.3/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640 +++ linux-3.0.3/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-3.0.3/arch/avr32/include/asm/kmap_types.h linux-3.0.3/arch/avr32/include/asm/kmap_types.h
658 --- linux-3.0.3/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659 +++ linux-3.0.3/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-3.0.3/arch/avr32/mm/fault.c linux-3.0.3/arch/avr32/mm/fault.c
671 --- linux-3.0.3/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672 +++ linux-3.0.3/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-3.0.3/arch/frv/include/asm/kmap_types.h linux-3.0.3/arch/frv/include/asm/kmap_types.h
715 --- linux-3.0.3/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716 +++ linux-3.0.3/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-3.0.3/arch/frv/mm/elf-fdpic.c linux-3.0.3/arch/frv/mm/elf-fdpic.c
726 --- linux-3.0.3/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727 +++ linux-3.0.3/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-3.0.3/arch/ia64/include/asm/elf.h linux-3.0.3/arch/ia64/include/asm/elf.h
757 --- linux-3.0.3/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758 +++ linux-3.0.3/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-3.0.3/arch/ia64/include/asm/pgtable.h linux-3.0.3/arch/ia64/include/asm/pgtable.h
774 --- linux-3.0.3/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775 +++ linux-3.0.3/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-3.0.3/arch/ia64/include/asm/spinlock.h linux-3.0.3/arch/ia64/include/asm/spinlock.h
804 --- linux-3.0.3/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805 +++ linux-3.0.3/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-3.0.3/arch/ia64/include/asm/uaccess.h linux-3.0.3/arch/ia64/include/asm/uaccess.h
816 --- linux-3.0.3/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817 +++ linux-3.0.3/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-3.0.3/arch/ia64/kernel/module.c linux-3.0.3/arch/ia64/kernel/module.c
837 --- linux-3.0.3/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838 +++ linux-3.0.3/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-3.0.3/arch/ia64/kernel/sys_ia64.c linux-3.0.3/arch/ia64/kernel/sys_ia64.c
928 --- linux-3.0.3/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929 +++ linux-3.0.3/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964 +++ linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-3.0.3/arch/ia64/mm/fault.c linux-3.0.3/arch/ia64/mm/fault.c
975 --- linux-3.0.3/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976 +++ linux-3.0.3/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-3.0.3/arch/ia64/mm/hugetlbpage.c linux-3.0.3/arch/ia64/mm/hugetlbpage.c
1027 --- linux-3.0.3/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028 +++ linux-3.0.3/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-3.0.3/arch/ia64/mm/init.c linux-3.0.3/arch/ia64/mm/init.c
1039 --- linux-3.0.3/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040 +++ linux-3.0.3/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-3.0.3/arch/m32r/lib/usercopy.c linux-3.0.3/arch/m32r/lib/usercopy.c
1062 --- linux-3.0.3/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063 +++ linux-3.0.3/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-3.0.3/arch/mips/include/asm/elf.h linux-3.0.3/arch/mips/include/asm/elf.h
1085 --- linux-3.0.3/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086 +++ linux-3.0.3/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-3.0.3/arch/mips/include/asm/page.h linux-3.0.3/arch/mips/include/asm/page.h
1109 --- linux-3.0.3/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110 +++ linux-3.0.3/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-3.0.3/arch/mips/include/asm/system.h linux-3.0.3/arch/mips/include/asm/system.h
1121 --- linux-3.0.3/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122 +++ linux-3.0.3/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133 +++ linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150 +++ linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-3.0.3/arch/mips/kernel/process.c linux-3.0.3/arch/mips/kernel/process.c
1166 --- linux-3.0.3/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167 +++ linux-3.0.3/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-3.0.3/arch/mips/mm/fault.c linux-3.0.3/arch/mips/mm/fault.c
1185 --- linux-3.0.3/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186 +++ linux-3.0.3/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187 @@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191 +#ifdef CONFIG_PAX_PAGEEXEC
1192 +void pax_report_insns(void *pc, void *sp)
1193 +{
1194 + unsigned long i;
1195 +
1196 + printk(KERN_ERR "PAX: bytes at PC: ");
1197 + for (i = 0; i < 5; i++) {
1198 + unsigned int c;
1199 + if (get_user(c, (unsigned int *)pc+i))
1200 + printk(KERN_CONT "???????? ");
1201 + else
1202 + printk(KERN_CONT "%08x ", c);
1203 + }
1204 + printk("\n");
1205 +}
1206 +#endif
1207 +
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211 diff -urNp linux-3.0.3/arch/mips/mm/mmap.c linux-3.0.3/arch/mips/mm/mmap.c
1212 --- linux-3.0.3/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213 +++ linux-3.0.3/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214 @@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218 +
1219 +#ifdef CONFIG_PAX_RANDMMAP
1220 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221 +#endif
1222 +
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229 - if (TASK_SIZE - len >= addr &&
1230 - (!vmm || addr + len <= vmm->vm_start))
1231 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235 @@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239 - if (!vmm || addr + len <= vmm->vm_start)
1240 + if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244 @@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248 -
1249 -static inline unsigned long brk_rnd(void)
1250 -{
1251 - unsigned long rnd = get_random_int();
1252 -
1253 - rnd = rnd << PAGE_SHIFT;
1254 - /* 8MB for 32bit, 256MB for 64bit */
1255 - if (TASK_IS_32BIT_ADDR)
1256 - rnd = rnd & 0x7ffffful;
1257 - else
1258 - rnd = rnd & 0xffffffful;
1259 -
1260 - return rnd;
1261 -}
1262 -
1263 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1264 -{
1265 - unsigned long base = mm->brk;
1266 - unsigned long ret;
1267 -
1268 - ret = PAGE_ALIGN(base + brk_rnd());
1269 -
1270 - if (ret < mm->brk)
1271 - return mm->brk;
1272 -
1273 - return ret;
1274 -}
1275 diff -urNp linux-3.0.3/arch/parisc/include/asm/elf.h linux-3.0.3/arch/parisc/include/asm/elf.h
1276 --- linux-3.0.3/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277 +++ linux-3.0.3/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282 +#ifdef CONFIG_PAX_ASLR
1283 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284 +
1285 +#define PAX_DELTA_MMAP_LEN 16
1286 +#define PAX_DELTA_STACK_LEN 16
1287 +#endif
1288 +
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292 diff -urNp linux-3.0.3/arch/parisc/include/asm/pgtable.h linux-3.0.3/arch/parisc/include/asm/pgtable.h
1293 --- linux-3.0.3/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294 +++ linux-3.0.3/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295 @@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299 +
1300 +#ifdef CONFIG_PAX_PAGEEXEC
1301 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304 +#else
1305 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306 +# define PAGE_COPY_NOEXEC PAGE_COPY
1307 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308 +#endif
1309 +
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313 diff -urNp linux-3.0.3/arch/parisc/kernel/module.c linux-3.0.3/arch/parisc/kernel/module.c
1314 --- linux-3.0.3/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315 +++ linux-3.0.3/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316 @@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320 +static inline int in_init_rx(struct module *me, void *loc)
1321 +{
1322 + return (loc >= me->module_init_rx &&
1323 + loc < (me->module_init_rx + me->init_size_rx));
1324 +}
1325 +
1326 +static inline int in_init_rw(struct module *me, void *loc)
1327 +{
1328 + return (loc >= me->module_init_rw &&
1329 + loc < (me->module_init_rw + me->init_size_rw));
1330 +}
1331 +
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334 - return (loc >= me->module_init &&
1335 - loc <= (me->module_init + me->init_size));
1336 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1337 +}
1338 +
1339 +static inline int in_core_rx(struct module *me, void *loc)
1340 +{
1341 + return (loc >= me->module_core_rx &&
1342 + loc < (me->module_core_rx + me->core_size_rx));
1343 +}
1344 +
1345 +static inline int in_core_rw(struct module *me, void *loc)
1346 +{
1347 + return (loc >= me->module_core_rw &&
1348 + loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353 - return (loc >= me->module_core &&
1354 - loc <= (me->module_core + me->core_size));
1355 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363 - me->core_size = ALIGN(me->core_size, 16);
1364 - me->arch.got_offset = me->core_size;
1365 - me->core_size += gots * sizeof(struct got_entry);
1366 -
1367 - me->core_size = ALIGN(me->core_size, 16);
1368 - me->arch.fdesc_offset = me->core_size;
1369 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1370 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371 + me->arch.got_offset = me->core_size_rw;
1372 + me->core_size_rw += gots * sizeof(struct got_entry);
1373 +
1374 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375 + me->arch.fdesc_offset = me->core_size_rw;
1376 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384 - got = me->module_core + me->arch.got_offset;
1385 + got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407 @@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416 diff -urNp linux-3.0.3/arch/parisc/kernel/sys_parisc.c linux-3.0.3/arch/parisc/kernel/sys_parisc.c
1417 --- linux-3.0.3/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418 +++ linux-3.0.3/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423 - if (!vma || addr + len <= vma->vm_start)
1424 + if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432 - if (!vma || addr + len <= vma->vm_start)
1433 + if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441 - addr = TASK_UNMAPPED_BASE;
1442 + addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446 diff -urNp linux-3.0.3/arch/parisc/kernel/traps.c linux-3.0.3/arch/parisc/kernel/traps.c
1447 --- linux-3.0.3/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448 +++ linux-3.0.3/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1454 - && (vma->vm_flags & VM_EXEC)) {
1455 -
1456 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460 diff -urNp linux-3.0.3/arch/parisc/mm/fault.c linux-3.0.3/arch/parisc/mm/fault.c
1461 --- linux-3.0.3/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462 +++ linux-3.0.3/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463 @@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467 +#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475 - if (code == 6 || code == 16)
1476 + if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484 +#ifdef CONFIG_PAX_PAGEEXEC
1485 +/*
1486 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487 + *
1488 + * returns 1 when task should be killed
1489 + * 2 when rt_sigreturn trampoline was detected
1490 + * 3 when unpatched PLT trampoline was detected
1491 + */
1492 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1493 +{
1494 +
1495 +#ifdef CONFIG_PAX_EMUPLT
1496 + int err;
1497 +
1498 + do { /* PaX: unpatched PLT emulation */
1499 + unsigned int bl, depwi;
1500 +
1501 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503 +
1504 + if (err)
1505 + break;
1506 +
1507 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509 +
1510 + err = get_user(ldw, (unsigned int *)addr);
1511 + err |= get_user(bv, (unsigned int *)(addr+4));
1512 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1513 +
1514 + if (err)
1515 + break;
1516 +
1517 + if (ldw == 0x0E801096U &&
1518 + bv == 0xEAC0C000U &&
1519 + ldw2 == 0x0E881095U)
1520 + {
1521 + unsigned int resolver, map;
1522 +
1523 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525 + if (err)
1526 + break;
1527 +
1528 + regs->gr[20] = instruction_pointer(regs)+8;
1529 + regs->gr[21] = map;
1530 + regs->gr[22] = resolver;
1531 + regs->iaoq[0] = resolver | 3UL;
1532 + regs->iaoq[1] = regs->iaoq[0] + 4;
1533 + return 3;
1534 + }
1535 + }
1536 + } while (0);
1537 +#endif
1538 +
1539 +#ifdef CONFIG_PAX_EMUTRAMP
1540 +
1541 +#ifndef CONFIG_PAX_EMUSIGRT
1542 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543 + return 1;
1544 +#endif
1545 +
1546 + do { /* PaX: rt_sigreturn emulation */
1547 + unsigned int ldi1, ldi2, bel, nop;
1548 +
1549 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553 +
1554 + if (err)
1555 + break;
1556 +
1557 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558 + ldi2 == 0x3414015AU &&
1559 + bel == 0xE4008200U &&
1560 + nop == 0x08000240U)
1561 + {
1562 + regs->gr[25] = (ldi1 & 2) >> 1;
1563 + regs->gr[20] = __NR_rt_sigreturn;
1564 + regs->gr[31] = regs->iaoq[1] + 16;
1565 + regs->sr[0] = regs->iasq[1];
1566 + regs->iaoq[0] = 0x100UL;
1567 + regs->iaoq[1] = regs->iaoq[0] + 4;
1568 + regs->iasq[0] = regs->sr[2];
1569 + regs->iasq[1] = regs->sr[2];
1570 + return 2;
1571 + }
1572 + } while (0);
1573 +#endif
1574 +
1575 + return 1;
1576 +}
1577 +
1578 +void pax_report_insns(void *pc, void *sp)
1579 +{
1580 + unsigned long i;
1581 +
1582 + printk(KERN_ERR "PAX: bytes at PC: ");
1583 + for (i = 0; i < 5; i++) {
1584 + unsigned int c;
1585 + if (get_user(c, (unsigned int *)pc+i))
1586 + printk(KERN_CONT "???????? ");
1587 + else
1588 + printk(KERN_CONT "%08x ", c);
1589 + }
1590 + printk("\n");
1591 +}
1592 +#endif
1593 +
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597 @@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601 - if ((vma->vm_flags & acc_type) != acc_type)
1602 + if ((vma->vm_flags & acc_type) != acc_type) {
1603 +
1604 +#ifdef CONFIG_PAX_PAGEEXEC
1605 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606 + (address & ~3UL) == instruction_pointer(regs))
1607 + {
1608 + up_read(&mm->mmap_sem);
1609 + switch (pax_handle_fetch_fault(regs)) {
1610 +
1611 +#ifdef CONFIG_PAX_EMUPLT
1612 + case 3:
1613 + return;
1614 +#endif
1615 +
1616 +#ifdef CONFIG_PAX_EMUTRAMP
1617 + case 2:
1618 + return;
1619 +#endif
1620 +
1621 + }
1622 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623 + do_group_exit(SIGKILL);
1624 + }
1625 +#endif
1626 +
1627 goto bad_area;
1628 + }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632 diff -urNp linux-3.0.3/arch/powerpc/include/asm/elf.h linux-3.0.3/arch/powerpc/include/asm/elf.h
1633 --- linux-3.0.3/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634 +++ linux-3.0.3/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639 -extern unsigned long randomize_et_dyn(unsigned long base);
1640 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641 +#define ELF_ET_DYN_BASE (0x20000000)
1642 +
1643 +#ifdef CONFIG_PAX_ASLR
1644 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645 +
1646 +#ifdef __powerpc64__
1647 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649 +#else
1650 +#define PAX_DELTA_MMAP_LEN 15
1651 +#define PAX_DELTA_STACK_LEN 15
1652 +#endif
1653 +#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662 -#define arch_randomize_brk arch_randomize_brk
1663 -
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667 diff -urNp linux-3.0.3/arch/powerpc/include/asm/kmap_types.h linux-3.0.3/arch/powerpc/include/asm/kmap_types.h
1668 --- linux-3.0.3/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669 +++ linux-3.0.3/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670 @@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674 + KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678 diff -urNp linux-3.0.3/arch/powerpc/include/asm/mman.h linux-3.0.3/arch/powerpc/include/asm/mman.h
1679 --- linux-3.0.3/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680 +++ linux-3.0.3/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690 diff -urNp linux-3.0.3/arch/powerpc/include/asm/page_64.h linux-3.0.3/arch/powerpc/include/asm/page_64.h
1691 --- linux-3.0.3/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692 +++ linux-3.0.3/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693 @@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699 +#define VM_STACK_DEFAULT_FLAGS32 \
1700 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706 +#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710 +#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714 diff -urNp linux-3.0.3/arch/powerpc/include/asm/page.h linux-3.0.3/arch/powerpc/include/asm/page.h
1715 --- linux-3.0.3/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716 +++ linux-3.0.3/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723 +#define VM_DATA_DEFAULT_FLAGS32 \
1724 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733 +#define ktla_ktva(addr) (addr)
1734 +#define ktva_ktla(addr) (addr)
1735 +
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739 diff -urNp linux-3.0.3/arch/powerpc/include/asm/pgtable.h linux-3.0.3/arch/powerpc/include/asm/pgtable.h
1740 --- linux-3.0.3/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741 +++ linux-3.0.3/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742 @@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746 +#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750 diff -urNp linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h
1751 --- linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752 +++ linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753 @@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757 +#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761 diff -urNp linux-3.0.3/arch/powerpc/include/asm/reg.h linux-3.0.3/arch/powerpc/include/asm/reg.h
1762 --- linux-3.0.3/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763 +++ linux-3.0.3/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764 @@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772 diff -urNp linux-3.0.3/arch/powerpc/include/asm/system.h linux-3.0.3/arch/powerpc/include/asm/system.h
1773 --- linux-3.0.3/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774 +++ linux-3.0.3/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779 -extern unsigned long arch_align_stack(unsigned long sp);
1780 +#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784 diff -urNp linux-3.0.3/arch/powerpc/include/asm/uaccess.h linux-3.0.3/arch/powerpc/include/asm/uaccess.h
1785 --- linux-3.0.3/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786 +++ linux-3.0.3/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787 @@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792 +
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796 @@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800 -#ifndef __powerpc64__
1801 -
1802 -static inline unsigned long copy_from_user(void *to,
1803 - const void __user *from, unsigned long n)
1804 -{
1805 - unsigned long over;
1806 -
1807 - if (access_ok(VERIFY_READ, from, n))
1808 - return __copy_tofrom_user((__force void __user *)to, from, n);
1809 - if ((unsigned long)from < TASK_SIZE) {
1810 - over = (unsigned long)from + n - TASK_SIZE;
1811 - return __copy_tofrom_user((__force void __user *)to, from,
1812 - n - over) + over;
1813 - }
1814 - return n;
1815 -}
1816 -
1817 -static inline unsigned long copy_to_user(void __user *to,
1818 - const void *from, unsigned long n)
1819 -{
1820 - unsigned long over;
1821 -
1822 - if (access_ok(VERIFY_WRITE, to, n))
1823 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1824 - if ((unsigned long)to < TASK_SIZE) {
1825 - over = (unsigned long)to + n - TASK_SIZE;
1826 - return __copy_tofrom_user(to, (__force void __user *)from,
1827 - n - over) + over;
1828 - }
1829 - return n;
1830 -}
1831 -
1832 -#else /* __powerpc64__ */
1833 -
1834 -#define __copy_in_user(to, from, size) \
1835 - __copy_tofrom_user((to), (from), (size))
1836 -
1837 -extern unsigned long copy_from_user(void *to, const void __user *from,
1838 - unsigned long n);
1839 -extern unsigned long copy_to_user(void __user *to, const void *from,
1840 - unsigned long n);
1841 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842 - unsigned long n);
1843 -
1844 -#endif /* __powerpc64__ */
1845 -
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853 +
1854 + if (!__builtin_constant_p(n))
1855 + check_object_size(to, n, false);
1856 +
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864 +
1865 + if (!__builtin_constant_p(n))
1866 + check_object_size(from, n, true);
1867 +
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875 +#ifndef __powerpc64__
1876 +
1877 +static inline unsigned long __must_check copy_from_user(void *to,
1878 + const void __user *from, unsigned long n)
1879 +{
1880 + unsigned long over;
1881 +
1882 + if ((long)n < 0)
1883 + return n;
1884 +
1885 + if (access_ok(VERIFY_READ, from, n)) {
1886 + if (!__builtin_constant_p(n))
1887 + check_object_size(to, n, false);
1888 + return __copy_tofrom_user((__force void __user *)to, from, n);
1889 + }
1890 + if ((unsigned long)from < TASK_SIZE) {
1891 + over = (unsigned long)from + n - TASK_SIZE;
1892 + if (!__builtin_constant_p(n - over))
1893 + check_object_size(to, n - over, false);
1894 + return __copy_tofrom_user((__force void __user *)to, from,
1895 + n - over) + over;
1896 + }
1897 + return n;
1898 +}
1899 +
1900 +static inline unsigned long __must_check copy_to_user(void __user *to,
1901 + const void *from, unsigned long n)
1902 +{
1903 + unsigned long over;
1904 +
1905 + if ((long)n < 0)
1906 + return n;
1907 +
1908 + if (access_ok(VERIFY_WRITE, to, n)) {
1909 + if (!__builtin_constant_p(n))
1910 + check_object_size(from, n, true);
1911 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1912 + }
1913 + if ((unsigned long)to < TASK_SIZE) {
1914 + over = (unsigned long)to + n - TASK_SIZE;
1915 + if (!__builtin_constant_p(n))
1916 + check_object_size(from, n - over, true);
1917 + return __copy_tofrom_user(to, (__force void __user *)from,
1918 + n - over) + over;
1919 + }
1920 + return n;
1921 +}
1922 +
1923 +#else /* __powerpc64__ */
1924 +
1925 +#define __copy_in_user(to, from, size) \
1926 + __copy_tofrom_user((to), (from), (size))
1927 +
1928 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929 +{
1930 + if ((long)n < 0 || n > INT_MAX)
1931 + return n;
1932 +
1933 + if (!__builtin_constant_p(n))
1934 + check_object_size(to, n, false);
1935 +
1936 + if (likely(access_ok(VERIFY_READ, from, n)))
1937 + n = __copy_from_user(to, from, n);
1938 + else
1939 + memset(to, 0, n);
1940 + return n;
1941 +}
1942 +
1943 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944 +{
1945 + if ((long)n < 0 || n > INT_MAX)
1946 + return n;
1947 +
1948 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949 + if (!__builtin_constant_p(n))
1950 + check_object_size(from, n, true);
1951 + n = __copy_to_user(to, from, n);
1952 + }
1953 + return n;
1954 +}
1955 +
1956 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957 + unsigned long n);
1958 +
1959 +#endif /* __powerpc64__ */
1960 +
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964 diff -urNp linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S
1965 --- linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966 +++ linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967 @@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971 + bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975 @@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979 -1: bl .save_nvgprs
1980 - mr r5,r3
1981 +1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985 diff -urNp linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S
1986 --- linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987 +++ linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988 @@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992 + bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996 - bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000 diff -urNp linux-3.0.3/arch/powerpc/kernel/module_32.c linux-3.0.3/arch/powerpc/kernel/module_32.c
2001 --- linux-3.0.3/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002 +++ linux-3.0.3/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2008 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016 - if (location >= mod->module_core
2017 - && location < mod->module_core + mod->core_size)
2018 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021 - else
2022 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025 + else {
2026 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027 + return ~0UL;
2028 + }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032 diff -urNp linux-3.0.3/arch/powerpc/kernel/module.c linux-3.0.3/arch/powerpc/kernel/module.c
2033 --- linux-3.0.3/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034 +++ linux-3.0.3/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035 @@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039 +#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045 + return vmalloc(size);
2046 +}
2047 +
2048 +void *module_alloc_exec(unsigned long size)
2049 +#else
2050 +void *module_alloc(unsigned long size)
2051 +#endif
2052 +
2053 +{
2054 + if (size == 0)
2055 + return NULL;
2056 +
2057 return vmalloc_exec(size);
2058 }
2059
2060 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064 +#ifdef CONFIG_PAX_KERNEXEC
2065 +void module_free_exec(struct module *mod, void *module_region)
2066 +{
2067 + module_free(mod, module_region);
2068 +}
2069 +#endif
2070 +
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074 diff -urNp linux-3.0.3/arch/powerpc/kernel/process.c linux-3.0.3/arch/powerpc/kernel/process.c
2075 --- linux-3.0.3/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076 +++ linux-3.0.3/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088 @@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096 - printk(" (%pS)",
2097 + printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101 @@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110 @@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114 -
2115 -unsigned long arch_align_stack(unsigned long sp)
2116 -{
2117 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118 - sp -= get_random_int() & ~PAGE_MASK;
2119 - return sp & ~0xf;
2120 -}
2121 -
2122 -static inline unsigned long brk_rnd(void)
2123 -{
2124 - unsigned long rnd = 0;
2125 -
2126 - /* 8MB for 32bit, 1GB for 64bit */
2127 - if (is_32bit_task())
2128 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129 - else
2130 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131 -
2132 - return rnd << PAGE_SHIFT;
2133 -}
2134 -
2135 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2136 -{
2137 - unsigned long base = mm->brk;
2138 - unsigned long ret;
2139 -
2140 -#ifdef CONFIG_PPC_STD_MMU_64
2141 - /*
2142 - * If we are using 1TB segments and we are allowed to randomise
2143 - * the heap, we can put it above 1TB so it is backed by a 1TB
2144 - * segment. Otherwise the heap will be in the bottom 1TB
2145 - * which always uses 256MB segments and this may result in a
2146 - * performance penalty.
2147 - */
2148 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150 -#endif
2151 -
2152 - ret = PAGE_ALIGN(base + brk_rnd());
2153 -
2154 - if (ret < mm->brk)
2155 - return mm->brk;
2156 -
2157 - return ret;
2158 -}
2159 -
2160 -unsigned long randomize_et_dyn(unsigned long base)
2161 -{
2162 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163 -
2164 - if (ret < base)
2165 - return base;
2166 -
2167 - return ret;
2168 -}
2169 diff -urNp linux-3.0.3/arch/powerpc/kernel/signal_32.c linux-3.0.3/arch/powerpc/kernel/signal_32.c
2170 --- linux-3.0.3/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171 +++ linux-3.0.3/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181 diff -urNp linux-3.0.3/arch/powerpc/kernel/signal_64.c linux-3.0.3/arch/powerpc/kernel/signal_64.c
2182 --- linux-3.0.3/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183 +++ linux-3.0.3/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193 diff -urNp linux-3.0.3/arch/powerpc/kernel/traps.c linux-3.0.3/arch/powerpc/kernel/traps.c
2194 --- linux-3.0.3/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195 +++ linux-3.0.3/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200 +extern void gr_handle_kernel_exploit(void);
2201 +
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209 + gr_handle_kernel_exploit();
2210 +
2211 oops_exit();
2212 do_exit(err);
2213
2214 diff -urNp linux-3.0.3/arch/powerpc/kernel/vdso.c linux-3.0.3/arch/powerpc/kernel/vdso.c
2215 --- linux-3.0.3/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216 +++ linux-3.0.3/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217 @@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221 +#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229 - current->mm->context.vdso_base = 0;
2230 + current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238 - 0, 0);
2239 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243 diff -urNp linux-3.0.3/arch/powerpc/lib/usercopy_64.c linux-3.0.3/arch/powerpc/lib/usercopy_64.c
2244 --- linux-3.0.3/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245 +++ linux-3.0.3/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246 @@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_READ, from, n)))
2253 - n = __copy_from_user(to, from, n);
2254 - else
2255 - memset(to, 0, n);
2256 - return n;
2257 -}
2258 -
2259 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260 -{
2261 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2262 - n = __copy_to_user(to, from, n);
2263 - return n;
2264 -}
2265 -
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273 -EXPORT_SYMBOL(copy_from_user);
2274 -EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277 diff -urNp linux-3.0.3/arch/powerpc/mm/fault.c linux-3.0.3/arch/powerpc/mm/fault.c
2278 --- linux-3.0.3/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279 +++ linux-3.0.3/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280 @@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284 +#include <linux/slab.h>
2285 +#include <linux/pagemap.h>
2286 +#include <linux/compiler.h>
2287 +#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291 @@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295 +#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299 @@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303 +#ifdef CONFIG_PAX_PAGEEXEC
2304 +/*
2305 + * PaX: decide what to do with offenders (regs->nip = fault address)
2306 + *
2307 + * returns 1 when task should be killed
2308 + */
2309 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2310 +{
2311 + return 1;
2312 +}
2313 +
2314 +void pax_report_insns(void *pc, void *sp)
2315 +{
2316 + unsigned long i;
2317 +
2318 + printk(KERN_ERR "PAX: bytes at PC: ");
2319 + for (i = 0; i < 5; i++) {
2320 + unsigned int c;
2321 + if (get_user(c, (unsigned int __user *)pc+i))
2322 + printk(KERN_CONT "???????? ");
2323 + else
2324 + printk(KERN_CONT "%08x ", c);
2325 + }
2326 + printk("\n");
2327 +}
2328 +#endif
2329 +
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337 - error_code &= 0x48200000;
2338 + error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342 @@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346 - if (error_code & 0x10000000)
2347 + if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351 @@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355 - if (error_code & DSISR_PROTFAULT)
2356 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360 @@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364 +
2365 +#ifdef CONFIG_PAX_PAGEEXEC
2366 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367 +#ifdef CONFIG_PPC_STD_MMU
2368 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369 +#else
2370 + if (is_exec && regs->nip == address) {
2371 +#endif
2372 + switch (pax_handle_fetch_fault(regs)) {
2373 + }
2374 +
2375 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376 + do_group_exit(SIGKILL);
2377 + }
2378 + }
2379 +#endif
2380 +
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384 diff -urNp linux-3.0.3/arch/powerpc/mm/mmap_64.c linux-3.0.3/arch/powerpc/mm/mmap_64.c
2385 --- linux-3.0.3/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386 +++ linux-3.0.3/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391 +
2392 +#ifdef CONFIG_PAX_RANDMMAP
2393 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2394 + mm->mmap_base += mm->delta_mmap;
2395 +#endif
2396 +
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401 +
2402 +#ifdef CONFIG_PAX_RANDMMAP
2403 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2404 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405 +#endif
2406 +
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410 diff -urNp linux-3.0.3/arch/powerpc/mm/slice.c linux-3.0.3/arch/powerpc/mm/slice.c
2411 --- linux-3.0.3/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412 +++ linux-3.0.3/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417 - return (!vma || (addr + len) <= vma->vm_start);
2418 + return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422 @@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426 - if (!vma || addr + len <= vma->vm_start) {
2427 + if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435 - addr = mm->mmap_base;
2436 - while (addr > len) {
2437 + if (mm->mmap_base < len)
2438 + addr = -ENOMEM;
2439 + else
2440 + addr = mm->mmap_base - len;
2441 +
2442 + while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453 - if (!vma || (addr + len) <= vma->vm_start) {
2454 + if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462 - addr = vma->vm_start;
2463 + addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471 +#ifdef CONFIG_PAX_RANDMMAP
2472 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473 + addr = 0;
2474 +#endif
2475 +
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479 diff -urNp linux-3.0.3/arch/s390/include/asm/elf.h linux-3.0.3/arch/s390/include/asm/elf.h
2480 --- linux-3.0.3/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481 +++ linux-3.0.3/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486 -extern unsigned long randomize_et_dyn(unsigned long base);
2487 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489 +
2490 +#ifdef CONFIG_PAX_ASLR
2491 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492 +
2493 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495 +#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499 @@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504 -#define arch_randomize_brk arch_randomize_brk
2505 -
2506 #endif
2507 diff -urNp linux-3.0.3/arch/s390/include/asm/system.h linux-3.0.3/arch/s390/include/asm/system.h
2508 --- linux-3.0.3/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509 +++ linux-3.0.3/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514 -extern unsigned long arch_align_stack(unsigned long sp);
2515 +#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519 diff -urNp linux-3.0.3/arch/s390/include/asm/uaccess.h linux-3.0.3/arch/s390/include/asm/uaccess.h
2520 --- linux-3.0.3/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521 +++ linux-3.0.3/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526 +
2527 + if ((long)n < 0)
2528 + return n;
2529 +
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537 + if ((long)n < 0)
2538 + return n;
2539 +
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547 +
2548 + if ((long)n < 0)
2549 + return n;
2550 +
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554 diff -urNp linux-3.0.3/arch/s390/kernel/module.c linux-3.0.3/arch/s390/kernel/module.c
2555 --- linux-3.0.3/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556 +++ linux-3.0.3/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561 - me->core_size = ALIGN(me->core_size, 4);
2562 - me->arch.got_offset = me->core_size;
2563 - me->core_size += me->arch.got_size;
2564 - me->arch.plt_offset = me->core_size;
2565 - me->core_size += me->arch.plt_size;
2566 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567 + me->arch.got_offset = me->core_size_rw;
2568 + me->core_size_rw += me->arch.got_size;
2569 + me->arch.plt_offset = me->core_size_rx;
2570 + me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578 - gotent = me->module_core + me->arch.got_offset +
2579 + gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2588 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596 - ip = me->module_core + me->arch.plt_offset +
2597 + ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605 - val = (Elf_Addr) me->module_core +
2606 + val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2615 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628 diff -urNp linux-3.0.3/arch/s390/kernel/process.c linux-3.0.3/arch/s390/kernel/process.c
2629 --- linux-3.0.3/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630 +++ linux-3.0.3/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635 -
2636 -unsigned long arch_align_stack(unsigned long sp)
2637 -{
2638 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639 - sp -= get_random_int() & ~PAGE_MASK;
2640 - return sp & ~0xf;
2641 -}
2642 -
2643 -static inline unsigned long brk_rnd(void)
2644 -{
2645 - /* 8MB for 32bit, 1GB for 64bit */
2646 - if (is_32bit_task())
2647 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648 - else
2649 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650 -}
2651 -
2652 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2653 -{
2654 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655 -
2656 - if (ret < mm->brk)
2657 - return mm->brk;
2658 - return ret;
2659 -}
2660 -
2661 -unsigned long randomize_et_dyn(unsigned long base)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664 -
2665 - if (!(current->flags & PF_RANDOMIZE))
2666 - return base;
2667 - if (ret < base)
2668 - return base;
2669 - return ret;
2670 -}
2671 diff -urNp linux-3.0.3/arch/s390/kernel/setup.c linux-3.0.3/arch/s390/kernel/setup.c
2672 --- linux-3.0.3/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673 +++ linux-3.0.3/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678 -unsigned int user_mode = HOME_SPACE_MODE;
2679 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683 diff -urNp linux-3.0.3/arch/s390/mm/mmap.c linux-3.0.3/arch/s390/mm/mmap.c
2684 --- linux-3.0.3/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685 +++ linux-3.0.3/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690 +
2691 +#ifdef CONFIG_PAX_RANDMMAP
2692 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2693 + mm->mmap_base += mm->delta_mmap;
2694 +#endif
2695 +
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700 +
2701 +#ifdef CONFIG_PAX_RANDMMAP
2702 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2703 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704 +#endif
2705 +
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713 +
2714 +#ifdef CONFIG_PAX_RANDMMAP
2715 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2716 + mm->mmap_base += mm->delta_mmap;
2717 +#endif
2718 +
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723 +
2724 +#ifdef CONFIG_PAX_RANDMMAP
2725 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2726 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727 +#endif
2728 +
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732 diff -urNp linux-3.0.3/arch/score/include/asm/system.h linux-3.0.3/arch/score/include/asm/system.h
2733 --- linux-3.0.3/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734 +++ linux-3.0.3/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735 @@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739 -extern unsigned long arch_align_stack(unsigned long sp);
2740 +#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744 diff -urNp linux-3.0.3/arch/score/kernel/process.c linux-3.0.3/arch/score/kernel/process.c
2745 --- linux-3.0.3/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746 +++ linux-3.0.3/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751 -
2752 -unsigned long arch_align_stack(unsigned long sp)
2753 -{
2754 - return sp;
2755 -}
2756 diff -urNp linux-3.0.3/arch/sh/mm/mmap.c linux-3.0.3/arch/sh/mm/mmap.c
2757 --- linux-3.0.3/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758 +++ linux-3.0.3/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763 - if (TASK_SIZE - len >= addr &&
2764 - (!vma || addr + len <= vma->vm_start))
2765 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769 @@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773 - if (likely(!vma || addr + len <= vma->vm_start)) {
2774 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782 - if (TASK_SIZE - len >= addr &&
2783 - (!vma || addr + len <= vma->vm_start))
2784 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792 - if (!vma || addr <= vma->vm_start) {
2793 + if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801 - addr = mm->mmap_base-len;
2802 - if (do_colour_align)
2803 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804 + addr = mm->mmap_base - len;
2805
2806 do {
2807 + if (do_colour_align)
2808 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815 - if (likely(!vma || addr+len <= vma->vm_start)) {
2816 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824 - addr = vma->vm_start-len;
2825 - if (do_colour_align)
2826 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827 - } while (likely(len < vma->vm_start));
2828 + addr = skip_heap_stack_gap(vma, len);
2829 + } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833 diff -urNp linux-3.0.3/arch/sparc/include/asm/atomic_64.h linux-3.0.3/arch/sparc/include/asm/atomic_64.h
2834 --- linux-3.0.3/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835 +++ linux-3.0.3/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836 @@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841 +{
2842 + return v->counter;
2843 +}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846 +{
2847 + return v->counter;
2848 +}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852 +{
2853 + v->counter = i;
2854 +}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857 +{
2858 + v->counter = i;
2859 +}
2860
2861 extern void atomic_add(int, atomic_t *);
2862 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882 +{
2883 + return atomic_add_ret_unchecked(1, v);
2884 +}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887 +{
2888 + return atomic64_add_ret_unchecked(1, v);
2889 +}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896 +{
2897 + return atomic_add_ret_unchecked(i, v);
2898 +}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901 +{
2902 + return atomic64_add_ret_unchecked(i, v);
2903 +}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912 +{
2913 + return atomic_inc_return_unchecked(v) == 0;
2914 +}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923 +{
2924 + atomic_add_unchecked(1, v);
2925 +}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928 +{
2929 + atomic64_add_unchecked(1, v);
2930 +}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934 +{
2935 + atomic_sub_unchecked(1, v);
2936 +}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939 +{
2940 + atomic64_sub_unchecked(1, v);
2941 +}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948 +{
2949 + return cmpxchg(&v->counter, old, new);
2950 +}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953 +{
2954 + return xchg(&v->counter, new);
2955 +}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959 - int c, old;
2960 + int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963 - if (unlikely(c == (u)))
2964 + if (unlikely(c == u))
2965 break;
2966 - old = atomic_cmpxchg((v), c, c + (a));
2967 +
2968 + asm volatile("addcc %2, %0, %0\n"
2969 +
2970 +#ifdef CONFIG_PAX_REFCOUNT
2971 + "tvs %%icc, 6\n"
2972 +#endif
2973 +
2974 + : "=r" (new)
2975 + : "0" (c), "ir" (a)
2976 + : "cc");
2977 +
2978 + old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983 - return c != (u);
2984 + return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993 +{
2994 + return xchg(&v->counter, new);
2995 +}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999 - long c, old;
3000 + long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003 - if (unlikely(c == (u)))
3004 + if (unlikely(c == u))
3005 break;
3006 - old = atomic64_cmpxchg((v), c, c + (a));
3007 +
3008 + asm volatile("addcc %2, %0, %0\n"
3009 +
3010 +#ifdef CONFIG_PAX_REFCOUNT
3011 + "tvs %%xcc, 6\n"
3012 +#endif
3013 +
3014 + : "=r" (new)
3015 + : "0" (c), "ir" (a)
3016 + : "cc");
3017 +
3018 + old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023 - return c != (u);
3024 + return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028 diff -urNp linux-3.0.3/arch/sparc/include/asm/cache.h linux-3.0.3/arch/sparc/include/asm/cache.h
3029 --- linux-3.0.3/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030 +++ linux-3.0.3/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031 @@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035 -#define L1_CACHE_BYTES 32
3036 +#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040 diff -urNp linux-3.0.3/arch/sparc/include/asm/elf_32.h linux-3.0.3/arch/sparc/include/asm/elf_32.h
3041 --- linux-3.0.3/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042 +++ linux-3.0.3/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043 @@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047 +#ifdef CONFIG_PAX_ASLR
3048 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049 +
3050 +#define PAX_DELTA_MMAP_LEN 16
3051 +#define PAX_DELTA_STACK_LEN 16
3052 +#endif
3053 +
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057 diff -urNp linux-3.0.3/arch/sparc/include/asm/elf_64.h linux-3.0.3/arch/sparc/include/asm/elf_64.h
3058 --- linux-3.0.3/arch/sparc/include/asm/elf_64.h 2011-08-23 21:44:40.000000000 -0400
3059 +++ linux-3.0.3/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060 @@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064 +#ifdef CONFIG_PAX_ASLR
3065 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066 +
3067 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069 +#endif
3070 +
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074 diff -urNp linux-3.0.3/arch/sparc/include/asm/pgtable_32.h linux-3.0.3/arch/sparc/include/asm/pgtable_32.h
3075 --- linux-3.0.3/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076 +++ linux-3.0.3/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081 +
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +BTFIXUPDEF_INT(page_shared_noexec)
3084 +BTFIXUPDEF_INT(page_copy_noexec)
3085 +BTFIXUPDEF_INT(page_readonly_noexec)
3086 +#endif
3087 +
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095 +#ifdef CONFIG_PAX_PAGEEXEC
3096 +extern pgprot_t PAGE_SHARED_NOEXEC;
3097 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099 +#else
3100 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101 +# define PAGE_COPY_NOEXEC PAGE_COPY
3102 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103 +#endif
3104 +
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108 diff -urNp linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h
3109 --- linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110 +++ linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111 @@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115 +
3116 +#ifdef CONFIG_PAX_PAGEEXEC
3117 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120 +#endif
3121 +
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125 diff -urNp linux-3.0.3/arch/sparc/include/asm/spinlock_64.h linux-3.0.3/arch/sparc/include/asm/spinlock_64.h
3126 --- linux-3.0.3/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127 +++ linux-3.0.3/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132 -static void inline arch_read_lock(arch_rwlock_t *lock)
3133 +static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140 -"4: add %0, 1, %1\n"
3141 +"4: addcc %0, 1, %1\n"
3142 +
3143 +#ifdef CONFIG_PAX_REFCOUNT
3144 +" tvs %%icc, 6\n"
3145 +#endif
3146 +
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154 - : "memory");
3155 + : "memory", "cc");
3156 }
3157
3158 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3159 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167 -" add %0, 1, %1\n"
3168 +" addcc %0, 1, %1\n"
3169 +
3170 +#ifdef CONFIG_PAX_REFCOUNT
3171 +" tvs %%icc, 6\n"
3172 +#endif
3173 +
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3182 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188 -" sub %0, 1, %1\n"
3189 +" subcc %0, 1, %1\n"
3190 +
3191 +#ifdef CONFIG_PAX_REFCOUNT
3192 +" tvs %%icc, 6\n"
3193 +#endif
3194 +
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202 -static void inline arch_write_lock(arch_rwlock_t *lock)
3203 +static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3212 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3221 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225 diff -urNp linux-3.0.3/arch/sparc/include/asm/thread_info_32.h linux-3.0.3/arch/sparc/include/asm/thread_info_32.h
3226 --- linux-3.0.3/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227 +++ linux-3.0.3/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228 @@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232 +
3233 + unsigned long lowest_stack;
3234 };
3235
3236 /*
3237 diff -urNp linux-3.0.3/arch/sparc/include/asm/thread_info_64.h linux-3.0.3/arch/sparc/include/asm/thread_info_64.h
3238 --- linux-3.0.3/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239 +++ linux-3.0.3/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240 @@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244 + unsigned long lowest_stack;
3245 +
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249 diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess_32.h linux-3.0.3/arch/sparc/include/asm/uaccess_32.h
3250 --- linux-3.0.3/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251 +++ linux-3.0.3/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 - if (n && __access_ok((unsigned long) to, n))
3257 + if ((long)n < 0)
3258 + return n;
3259 +
3260 + if (n && __access_ok((unsigned long) to, n)) {
3261 + if (!__builtin_constant_p(n))
3262 + check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264 - else
3265 + } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271 + if ((long)n < 0)
3272 + return n;
3273 +
3274 + if (!__builtin_constant_p(n))
3275 + check_object_size(from, n, true);
3276 +
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282 - if (n && __access_ok((unsigned long) from, n))
3283 + if ((long)n < 0)
3284 + return n;
3285 +
3286 + if (n && __access_ok((unsigned long) from, n)) {
3287 + if (!__builtin_constant_p(n))
3288 + check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290 - else
3291 + } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297 + if ((long)n < 0)
3298 + return n;
3299 +
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303 diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess_64.h linux-3.0.3/arch/sparc/include/asm/uaccess_64.h
3304 --- linux-3.0.3/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305 +++ linux-3.0.3/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306 @@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310 +#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318 - unsigned long ret = ___copy_from_user(to, from, size);
3319 + unsigned long ret;
3320
3321 + if ((long)size < 0 || size > INT_MAX)
3322 + return size;
3323 +
3324 + if (!__builtin_constant_p(size))
3325 + check_object_size(to, size, false);
3326 +
3327 + ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335 - unsigned long ret = ___copy_to_user(to, from, size);
3336 + unsigned long ret;
3337 +
3338 + if ((long)size < 0 || size > INT_MAX)
3339 + return size;
3340 +
3341 + if (!__builtin_constant_p(size))
3342 + check_object_size(from, size, true);
3343
3344 + ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348 diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess.h linux-3.0.3/arch/sparc/include/asm/uaccess.h
3349 --- linux-3.0.3/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350 +++ linux-3.0.3/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351 @@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354 +
3355 +#ifdef __KERNEL__
3356 +#ifndef __ASSEMBLY__
3357 +#include <linux/types.h>
3358 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359 +#endif
3360 +#endif
3361 +
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365 diff -urNp linux-3.0.3/arch/sparc/kernel/Makefile linux-3.0.3/arch/sparc/kernel/Makefile
3366 --- linux-3.0.3/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367 +++ linux-3.0.3/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368 @@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372 -ccflags-y := -Werror
3373 +#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377 diff -urNp linux-3.0.3/arch/sparc/kernel/process_32.c linux-3.0.3/arch/sparc/kernel/process_32.c
3378 --- linux-3.0.3/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379 +++ linux-3.0.3/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384 - printk("%pS\n", (void *) rw->ins[7]);
3385 + printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393 - printk("PC: <%pS>\n", (void *) r->pc);
3394 + printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410 - printk("%pS ] ", (void *) pc);
3411 + printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415 diff -urNp linux-3.0.3/arch/sparc/kernel/process_64.c linux-3.0.3/arch/sparc/kernel/process_64.c
3416 --- linux-3.0.3/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417 +++ linux-3.0.3/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3431 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453 diff -urNp linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c
3454 --- linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455 +++ linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460 - addr = TASK_UNMAPPED_BASE;
3461 + addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469 - if (!vmm || addr + len <= vmm->vm_start)
3470 + if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474 diff -urNp linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c
3475 --- linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476 +++ linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481 - if ((flags & MAP_SHARED) &&
3482 + if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490 +#ifdef CONFIG_PAX_RANDMMAP
3491 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492 +#endif
3493 +
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501 - if (task_size - len >= addr &&
3502 - (!vma || addr + len <= vma->vm_start))
3503 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508 - start_addr = addr = mm->free_area_cache;
3509 + start_addr = addr = mm->free_area_cache;
3510 } else {
3511 - start_addr = addr = TASK_UNMAPPED_BASE;
3512 + start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516 @@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520 - if (start_addr != TASK_UNMAPPED_BASE) {
3521 - start_addr = addr = TASK_UNMAPPED_BASE;
3522 + if (start_addr != mm->mmap_base) {
3523 + start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529 - if (likely(!vma || addr + len <= vma->vm_start)) {
3530 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538 - if ((flags & MAP_SHARED) &&
3539 + if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547 - if (task_size - len >= addr &&
3548 - (!vma || addr + len <= vma->vm_start))
3549 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557 - if (!vma || addr <= vma->vm_start) {
3558 + if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566 - addr = mm->mmap_base-len;
3567 - if (do_color_align)
3568 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569 + addr = mm->mmap_base - len;
3570
3571 do {
3572 + if (do_color_align)
3573 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580 - if (likely(!vma || addr+len <= vma->vm_start)) {
3581 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589 - addr = vma->vm_start-len;
3590 - if (do_color_align)
3591 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592 - } while (likely(len < vma->vm_start));
3593 + addr = skip_heap_stack_gap(vma, len);
3594 + } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602 +
3603 +#ifdef CONFIG_PAX_RANDMMAP
3604 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3605 + mm->mmap_base += mm->delta_mmap;
3606 +#endif
3607 +
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615 +
3616 +#ifdef CONFIG_PAX_RANDMMAP
3617 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3618 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619 +#endif
3620 +
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624 diff -urNp linux-3.0.3/arch/sparc/kernel/traps_32.c linux-3.0.3/arch/sparc/kernel/traps_32.c
3625 --- linux-3.0.3/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626 +++ linux-3.0.3/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631 +extern void gr_handle_kernel_exploit(void);
3632 +
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648 - if(regs->psr & PSR_PS)
3649 + if(regs->psr & PSR_PS) {
3650 + gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652 + }
3653 do_exit(SIGSEGV);
3654 }
3655
3656 diff -urNp linux-3.0.3/arch/sparc/kernel/traps_64.c linux-3.0.3/arch/sparc/kernel/traps_64.c
3657 --- linux-3.0.3/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658 +++ linux-3.0.3/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672 +
3673 +#ifdef CONFIG_PAX_REFCOUNT
3674 + if (lvl == 6)
3675 + pax_report_refcount_overflow(regs);
3676 +#endif
3677 +
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685 -
3686 +
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691 +#ifdef CONFIG_PAX_REFCOUNT
3692 + if (lvl == 6)
3693 + pax_report_refcount_overflow(regs);
3694 +#endif
3695 +
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703 - printk("TPC<%pS>\n", (void *) regs->tpc);
3704 + printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3755 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3762 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770 +extern void gr_handle_kernel_exploit(void);
3771 +
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788 - if (regs->tstate & TSTATE_PRIV)
3789 + if (regs->tstate & TSTATE_PRIV) {
3790 + gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792 + }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796 diff -urNp linux-3.0.3/arch/sparc/kernel/unaligned_64.c linux-3.0.3/arch/sparc/kernel/unaligned_64.c
3797 --- linux-3.0.3/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:44:40.000000000 -0400
3798 +++ linux-3.0.3/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808 diff -urNp linux-3.0.3/arch/sparc/lib/atomic_64.S linux-3.0.3/arch/sparc/lib/atomic_64.S
3809 --- linux-3.0.3/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810 +++ linux-3.0.3/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811 @@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815 - add %g1, %o0, %g7
3816 + addcc %g1, %o0, %g7
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 + tvs %icc, 6
3820 +#endif
3821 +
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829 + .globl atomic_add_unchecked
3830 + .type atomic_add_unchecked,#function
3831 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832 + BACKOFF_SETUP(%o2)
3833 +1: lduw [%o1], %g1
3834 + add %g1, %o0, %g7
3835 + cas [%o1], %g1, %g7
3836 + cmp %g1, %g7
3837 + bne,pn %icc, 2f
3838 + nop
3839 + retl
3840 + nop
3841 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3842 + .size atomic_add_unchecked, .-atomic_add_unchecked
3843 +
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849 - sub %g1, %o0, %g7
3850 + subcc %g1, %o0, %g7
3851 +
3852 +#ifdef CONFIG_PAX_REFCOUNT
3853 + tvs %icc, 6
3854 +#endif
3855 +
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863 + .globl atomic_sub_unchecked
3864 + .type atomic_sub_unchecked,#function
3865 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866 + BACKOFF_SETUP(%o2)
3867 +1: lduw [%o1], %g1
3868 + sub %g1, %o0, %g7
3869 + cas [%o1], %g1, %g7
3870 + cmp %g1, %g7
3871 + bne,pn %icc, 2f
3872 + nop
3873 + retl
3874 + nop
3875 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3876 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877 +
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883 - add %g1, %o0, %g7
3884 + addcc %g1, %o0, %g7
3885 +
3886 +#ifdef CONFIG_PAX_REFCOUNT
3887 + tvs %icc, 6
3888 +#endif
3889 +
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897 + .globl atomic_add_ret_unchecked
3898 + .type atomic_add_ret_unchecked,#function
3899 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900 + BACKOFF_SETUP(%o2)
3901 +1: lduw [%o1], %g1
3902 + addcc %g1, %o0, %g7
3903 + cas [%o1], %g1, %g7
3904 + cmp %g1, %g7
3905 + bne,pn %icc, 2f
3906 + add %g7, %o0, %g7
3907 + sra %g7, 0, %o0
3908 + retl
3909 + nop
3910 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3911 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912 +
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918 - sub %g1, %o0, %g7
3919 + subcc %g1, %o0, %g7
3920 +
3921 +#ifdef CONFIG_PAX_REFCOUNT
3922 + tvs %icc, 6
3923 +#endif
3924 +
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932 - add %g1, %o0, %g7
3933 + addcc %g1, %o0, %g7
3934 +
3935 +#ifdef CONFIG_PAX_REFCOUNT
3936 + tvs %xcc, 6
3937 +#endif
3938 +
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946 + .globl atomic64_add_unchecked
3947 + .type atomic64_add_unchecked,#function
3948 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949 + BACKOFF_SETUP(%o2)
3950 +1: ldx [%o1], %g1
3951 + addcc %g1, %o0, %g7
3952 + casx [%o1], %g1, %g7
3953 + cmp %g1, %g7
3954 + bne,pn %xcc, 2f
3955 + nop
3956 + retl
3957 + nop
3958 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3959 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960 +
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966 - sub %g1, %o0, %g7
3967 + subcc %g1, %o0, %g7
3968 +
3969 +#ifdef CONFIG_PAX_REFCOUNT
3970 + tvs %xcc, 6
3971 +#endif
3972 +
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980 + .globl atomic64_sub_unchecked
3981 + .type atomic64_sub_unchecked,#function
3982 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983 + BACKOFF_SETUP(%o2)
3984 +1: ldx [%o1], %g1
3985 + subcc %g1, %o0, %g7
3986 + casx [%o1], %g1, %g7
3987 + cmp %g1, %g7
3988 + bne,pn %xcc, 2f
3989 + nop
3990 + retl
3991 + nop
3992 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3993 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994 +
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000 - add %g1, %o0, %g7
4001 + addcc %g1, %o0, %g7
4002 +
4003 +#ifdef CONFIG_PAX_REFCOUNT
4004 + tvs %xcc, 6
4005 +#endif
4006 +
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014 + .globl atomic64_add_ret_unchecked
4015 + .type atomic64_add_ret_unchecked,#function
4016 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017 + BACKOFF_SETUP(%o2)
4018 +1: ldx [%o1], %g1
4019 + addcc %g1, %o0, %g7
4020 + casx [%o1], %g1, %g7
4021 + cmp %g1, %g7
4022 + bne,pn %xcc, 2f
4023 + add %g7, %o0, %g7
4024 + mov %g7, %o0
4025 + retl
4026 + nop
4027 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4028 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029 +
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035 - sub %g1, %o0, %g7
4036 + subcc %g1, %o0, %g7
4037 +
4038 +#ifdef CONFIG_PAX_REFCOUNT
4039 + tvs %xcc, 6
4040 +#endif
4041 +
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045 diff -urNp linux-3.0.3/arch/sparc/lib/ksyms.c linux-3.0.3/arch/sparc/lib/ksyms.c
4046 --- linux-3.0.3/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047 +++ linux-3.0.3/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052 +EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056 +EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059 +EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067 diff -urNp linux-3.0.3/arch/sparc/lib/Makefile linux-3.0.3/arch/sparc/lib/Makefile
4068 --- linux-3.0.3/arch/sparc/lib/Makefile 2011-08-23 21:44:40.000000000 -0400
4069 +++ linux-3.0.3/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070 @@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074 -ccflags-y := -Werror
4075 +#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079 diff -urNp linux-3.0.3/arch/sparc/Makefile linux-3.0.3/arch/sparc/Makefile
4080 --- linux-3.0.3/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081 +++ linux-3.0.3/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091 diff -urNp linux-3.0.3/arch/sparc/mm/fault_32.c linux-3.0.3/arch/sparc/mm/fault_32.c
4092 --- linux-3.0.3/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093 +++ linux-3.0.3/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094 @@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098 +#include <linux/slab.h>
4099 +#include <linux/pagemap.h>
4100 +#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108 +#ifdef CONFIG_PAX_PAGEEXEC
4109 +#ifdef CONFIG_PAX_DLRESOLVE
4110 +static void pax_emuplt_close(struct vm_area_struct *vma)
4111 +{
4112 + vma->vm_mm->call_dl_resolve = 0UL;
4113 +}
4114 +
4115 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116 +{
4117 + unsigned int *kaddr;
4118 +
4119 + vmf->page = alloc_page(GFP_HIGHUSER);
4120 + if (!vmf->page)
4121 + return VM_FAULT_OOM;
4122 +
4123 + kaddr = kmap(vmf->page);
4124 + memset(kaddr, 0, PAGE_SIZE);
4125 + kaddr[0] = 0x9DE3BFA8U; /* save */
4126 + flush_dcache_page(vmf->page);
4127 + kunmap(vmf->page);
4128 + return VM_FAULT_MAJOR;
4129 +}
4130 +
4131 +static const struct vm_operations_struct pax_vm_ops = {
4132 + .close = pax_emuplt_close,
4133 + .fault = pax_emuplt_fault
4134 +};
4135 +
4136 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137 +{
4138 + int ret;
4139 +
4140 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4141 + vma->vm_mm = current->mm;
4142 + vma->vm_start = addr;
4143 + vma->vm_end = addr + PAGE_SIZE;
4144 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146 + vma->vm_ops = &pax_vm_ops;
4147 +
4148 + ret = insert_vm_struct(current->mm, vma);
4149 + if (ret)
4150 + return ret;
4151 +
4152 + ++current->mm->total_vm;
4153 + return 0;
4154 +}
4155 +#endif
4156 +
4157 +/*
4158 + * PaX: decide what to do with offenders (regs->pc = fault address)
4159 + *
4160 + * returns 1 when task should be killed
4161 + * 2 when patched PLT trampoline was detected
4162 + * 3 when unpatched PLT trampoline was detected
4163 + */
4164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4165 +{
4166 +
4167 +#ifdef CONFIG_PAX_EMUPLT
4168 + int err;
4169 +
4170 + do { /* PaX: patched PLT emulation #1 */
4171 + unsigned int sethi1, sethi2, jmpl;
4172 +
4173 + err = get_user(sethi1, (unsigned int *)regs->pc);
4174 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176 +
4177 + if (err)
4178 + break;
4179 +
4180 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183 + {
4184 + unsigned int addr;
4185 +
4186 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187 + addr = regs->u_regs[UREG_G1];
4188 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189 + regs->pc = addr;
4190 + regs->npc = addr+4;
4191 + return 2;
4192 + }
4193 + } while (0);
4194 +
4195 + { /* PaX: patched PLT emulation #2 */
4196 + unsigned int ba;
4197 +
4198 + err = get_user(ba, (unsigned int *)regs->pc);
4199 +
4200 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201 + unsigned int addr;
4202 +
4203 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204 + regs->pc = addr;
4205 + regs->npc = addr+4;
4206 + return 2;
4207 + }
4208 + }
4209 +
4210 + do { /* PaX: patched PLT emulation #3 */
4211 + unsigned int sethi, jmpl, nop;
4212 +
4213 + err = get_user(sethi, (unsigned int *)regs->pc);
4214 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216 +
4217 + if (err)
4218 + break;
4219 +
4220 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222 + nop == 0x01000000U)
4223 + {
4224 + unsigned int addr;
4225 +
4226 + addr = (sethi & 0x003FFFFFU) << 10;
4227 + regs->u_regs[UREG_G1] = addr;
4228 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229 + regs->pc = addr;
4230 + regs->npc = addr+4;
4231 + return 2;
4232 + }
4233 + } while (0);
4234 +
4235 + do { /* PaX: unpatched PLT emulation step 1 */
4236 + unsigned int sethi, ba, nop;
4237 +
4238 + err = get_user(sethi, (unsigned int *)regs->pc);
4239 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241 +
4242 + if (err)
4243 + break;
4244 +
4245 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247 + nop == 0x01000000U)
4248 + {
4249 + unsigned int addr, save, call;
4250 +
4251 + if ((ba & 0xFFC00000U) == 0x30800000U)
4252 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253 + else
4254 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255 +
4256 + err = get_user(save, (unsigned int *)addr);
4257 + err |= get_user(call, (unsigned int *)(addr+4));
4258 + err |= get_user(nop, (unsigned int *)(addr+8));
4259 + if (err)
4260 + break;
4261 +
4262 +#ifdef CONFIG_PAX_DLRESOLVE
4263 + if (save == 0x9DE3BFA8U &&
4264 + (call & 0xC0000000U) == 0x40000000U &&
4265 + nop == 0x01000000U)
4266 + {
4267 + struct vm_area_struct *vma;
4268 + unsigned long call_dl_resolve;
4269 +
4270 + down_read(&current->mm->mmap_sem);
4271 + call_dl_resolve = current->mm->call_dl_resolve;
4272 + up_read(&current->mm->mmap_sem);
4273 + if (likely(call_dl_resolve))
4274 + goto emulate;
4275 +
4276 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277 +
4278 + down_write(&current->mm->mmap_sem);
4279 + if (current->mm->call_dl_resolve) {
4280 + call_dl_resolve = current->mm->call_dl_resolve;
4281 + up_write(&current->mm->mmap_sem);
4282 + if (vma)
4283 + kmem_cache_free(vm_area_cachep, vma);
4284 + goto emulate;
4285 + }
4286 +
4287 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289 + up_write(&current->mm->mmap_sem);
4290 + if (vma)
4291 + kmem_cache_free(vm_area_cachep, vma);
4292 + return 1;
4293 + }
4294 +
4295 + if (pax_insert_vma(vma, call_dl_resolve)) {
4296 + up_write(&current->mm->mmap_sem);
4297 + kmem_cache_free(vm_area_cachep, vma);
4298 + return 1;
4299 + }
4300 +
4301 + current->mm->call_dl_resolve = call_dl_resolve;
4302 + up_write(&current->mm->mmap_sem);
4303 +
4304 +emulate:
4305 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306 + regs->pc = call_dl_resolve;
4307 + regs->npc = addr+4;
4308 + return 3;
4309 + }
4310 +#endif
4311 +
4312 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313 + if ((save & 0xFFC00000U) == 0x05000000U &&
4314 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4315 + nop == 0x01000000U)
4316 + {
4317 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318 + regs->u_regs[UREG_G2] = addr + 4;
4319 + addr = (save & 0x003FFFFFU) << 10;
4320 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321 + regs->pc = addr;
4322 + regs->npc = addr+4;
4323 + return 3;
4324 + }
4325 + }
4326 + } while (0);
4327 +
4328 + do { /* PaX: unpatched PLT emulation step 2 */
4329 + unsigned int save, call, nop;
4330 +
4331 + err = get_user(save, (unsigned int *)(regs->pc-4));
4332 + err |= get_user(call, (unsigned int *)regs->pc);
4333 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334 + if (err)
4335 + break;
4336 +
4337 + if (save == 0x9DE3BFA8U &&
4338 + (call & 0xC0000000U) == 0x40000000U &&
4339 + nop == 0x01000000U)
4340 + {
4341 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342 +
4343 + regs->u_regs[UREG_RETPC] = regs->pc;
4344 + regs->pc = dl_resolve;
4345 + regs->npc = dl_resolve+4;
4346 + return 3;
4347 + }
4348 + } while (0);
4349 +#endif
4350 +
4351 + return 1;
4352 +}
4353 +
4354 +void pax_report_insns(void *pc, void *sp)
4355 +{
4356 + unsigned long i;
4357 +
4358 + printk(KERN_ERR "PAX: bytes at PC: ");
4359 + for (i = 0; i < 8; i++) {
4360 + unsigned int c;
4361 + if (get_user(c, (unsigned int *)pc+i))
4362 + printk(KERN_CONT "???????? ");
4363 + else
4364 + printk(KERN_CONT "%08x ", c);
4365 + }
4366 + printk("\n");
4367 +}
4368 +#endif
4369 +
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373 @@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377 +
4378 +#ifdef CONFIG_PAX_PAGEEXEC
4379 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380 + up_read(&mm->mmap_sem);
4381 + switch (pax_handle_fetch_fault(regs)) {
4382 +
4383 +#ifdef CONFIG_PAX_EMUPLT
4384 + case 2:
4385 + case 3:
4386 + return;
4387 +#endif
4388 +
4389 + }
4390 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391 + do_group_exit(SIGKILL);
4392 + }
4393 +#endif
4394 +
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398 diff -urNp linux-3.0.3/arch/sparc/mm/fault_64.c linux-3.0.3/arch/sparc/mm/fault_64.c
4399 --- linux-3.0.3/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400 +++ linux-3.0.3/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401 @@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405 +#include <linux/slab.h>
4406 +#include <linux/pagemap.h>
4407 +#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424 +#ifdef CONFIG_PAX_PAGEEXEC
4425 +#ifdef CONFIG_PAX_DLRESOLVE
4426 +static void pax_emuplt_close(struct vm_area_struct *vma)
4427 +{
4428 + vma->vm_mm->call_dl_resolve = 0UL;
4429 +}
4430 +
4431 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432 +{
4433 + unsigned int *kaddr;
4434 +
4435 + vmf->page = alloc_page(GFP_HIGHUSER);
4436 + if (!vmf->page)
4437 + return VM_FAULT_OOM;
4438 +
4439 + kaddr = kmap(vmf->page);
4440 + memset(kaddr, 0, PAGE_SIZE);
4441 + kaddr[0] = 0x9DE3BFA8U; /* save */
4442 + flush_dcache_page(vmf->page);
4443 + kunmap(vmf->page);
4444 + return VM_FAULT_MAJOR;
4445 +}
4446 +
4447 +static const struct vm_operations_struct pax_vm_ops = {
4448 + .close = pax_emuplt_close,
4449 + .fault = pax_emuplt_fault
4450 +};
4451 +
4452 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453 +{
4454 + int ret;
4455 +
4456 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4457 + vma->vm_mm = current->mm;
4458 + vma->vm_start = addr;
4459 + vma->vm_end = addr + PAGE_SIZE;
4460 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462 + vma->vm_ops = &pax_vm_ops;
4463 +
4464 + ret = insert_vm_struct(current->mm, vma);
4465 + if (ret)
4466 + return ret;
4467 +
4468 + ++current->mm->total_vm;
4469 + return 0;
4470 +}
4471 +#endif
4472 +
4473 +/*
4474 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4475 + *
4476 + * returns 1 when task should be killed
4477 + * 2 when patched PLT trampoline was detected
4478 + * 3 when unpatched PLT trampoline was detected
4479 + */
4480 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4481 +{
4482 +
4483 +#ifdef CONFIG_PAX_EMUPLT
4484 + int err;
4485 +
4486 + do { /* PaX: patched PLT emulation #1 */
4487 + unsigned int sethi1, sethi2, jmpl;
4488 +
4489 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4490 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492 +
4493 + if (err)
4494 + break;
4495 +
4496 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499 + {
4500 + unsigned long addr;
4501 +
4502 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503 + addr = regs->u_regs[UREG_G1];
4504 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505 +
4506 + if (test_thread_flag(TIF_32BIT))
4507 + addr &= 0xFFFFFFFFUL;
4508 +
4509 + regs->tpc = addr;
4510 + regs->tnpc = addr+4;
4511 + return 2;
4512 + }
4513 + } while (0);
4514 +
4515 + { /* PaX: patched PLT emulation #2 */
4516 + unsigned int ba;
4517 +
4518 + err = get_user(ba, (unsigned int *)regs->tpc);
4519 +
4520 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521 + unsigned long addr;
4522 +
4523 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524 +
4525 + if (test_thread_flag(TIF_32BIT))
4526 + addr &= 0xFFFFFFFFUL;
4527 +
4528 + regs->tpc = addr;
4529 + regs->tnpc = addr+4;
4530 + return 2;
4531 + }
4532 + }
4533 +
4534 + do { /* PaX: patched PLT emulation #3 */
4535 + unsigned int sethi, jmpl, nop;
4536 +
4537 + err = get_user(sethi, (unsigned int *)regs->tpc);
4538 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540 +
4541 + if (err)
4542 + break;
4543 +
4544 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546 + nop == 0x01000000U)
4547 + {
4548 + unsigned long addr;
4549 +
4550 + addr = (sethi & 0x003FFFFFU) << 10;
4551 + regs->u_regs[UREG_G1] = addr;
4552 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553 +
4554 + if (test_thread_flag(TIF_32BIT))
4555 + addr &= 0xFFFFFFFFUL;
4556 +
4557 + regs->tpc = addr;
4558 + regs->tnpc = addr+4;
4559 + return 2;
4560 + }
4561 + } while (0);
4562 +
4563 + do { /* PaX: patched PLT emulation #4 */
4564 + unsigned int sethi, mov1, call, mov2;
4565 +
4566 + err = get_user(sethi, (unsigned int *)regs->tpc);
4567 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570 +
4571 + if (err)
4572 + break;
4573 +
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + mov1 == 0x8210000FU &&
4576 + (call & 0xC0000000U) == 0x40000000U &&
4577 + mov2 == 0x9E100001U)
4578 + {
4579 + unsigned long addr;
4580 +
4581 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583 +
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4586 +
4587 + regs->tpc = addr;
4588 + regs->tnpc = addr+4;
4589 + return 2;
4590 + }
4591 + } while (0);
4592 +
4593 + do { /* PaX: patched PLT emulation #5 */
4594 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595 +
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604 +
4605 + if (err)
4606 + break;
4607 +
4608 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4612 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613 + sllx == 0x83287020U &&
4614 + jmpl == 0x81C04005U &&
4615 + nop == 0x01000000U)
4616 + {
4617 + unsigned long addr;
4618 +
4619 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620 + regs->u_regs[UREG_G1] <<= 32;
4621 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623 + regs->tpc = addr;
4624 + regs->tnpc = addr+4;
4625 + return 2;
4626 + }
4627 + } while (0);
4628 +
4629 + do { /* PaX: patched PLT emulation #6 */
4630 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631 +
4632 + err = get_user(sethi, (unsigned int *)regs->tpc);
4633 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639 +
4640 + if (err)
4641 + break;
4642 +
4643 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646 + sllx == 0x83287020U &&
4647 + (or & 0xFFFFE000U) == 0x8A116000U &&
4648 + jmpl == 0x81C04005U &&
4649 + nop == 0x01000000U)
4650 + {
4651 + unsigned long addr;
4652 +
4653 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654 + regs->u_regs[UREG_G1] <<= 32;
4655 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657 + regs->tpc = addr;
4658 + regs->tnpc = addr+4;
4659 + return 2;
4660 + }
4661 + } while (0);
4662 +
4663 + do { /* PaX: unpatched PLT emulation step 1 */
4664 + unsigned int sethi, ba, nop;
4665 +
4666 + err = get_user(sethi, (unsigned int *)regs->tpc);
4667 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675 + nop == 0x01000000U)
4676 + {
4677 + unsigned long addr;
4678 + unsigned int save, call;
4679 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680 +
4681 + if ((ba & 0xFFC00000U) == 0x30800000U)
4682 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683 + else
4684 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685 +
4686 + if (test_thread_flag(TIF_32BIT))
4687 + addr &= 0xFFFFFFFFUL;
4688 +
4689 + err = get_user(save, (unsigned int *)addr);
4690 + err |= get_user(call, (unsigned int *)(addr+4));
4691 + err |= get_user(nop, (unsigned int *)(addr+8));
4692 + if (err)
4693 + break;
4694 +
4695 +#ifdef CONFIG_PAX_DLRESOLVE
4696 + if (save == 0x9DE3BFA8U &&
4697 + (call & 0xC0000000U) == 0x40000000U &&
4698 + nop == 0x01000000U)
4699 + {
4700 + struct vm_area_struct *vma;
4701 + unsigned long call_dl_resolve;
4702 +
4703 + down_read(&current->mm->mmap_sem);
4704 + call_dl_resolve = current->mm->call_dl_resolve;
4705 + up_read(&current->mm->mmap_sem);
4706 + if (likely(call_dl_resolve))
4707 + goto emulate;
4708 +
4709 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710 +
4711 + down_write(&current->mm->mmap_sem);
4712 + if (current->mm->call_dl_resolve) {
4713 + call_dl_resolve = current->mm->call_dl_resolve;
4714 + up_write(&current->mm->mmap_sem);
4715 + if (vma)
4716 + kmem_cache_free(vm_area_cachep, vma);
4717 + goto emulate;
4718 + }
4719 +
4720 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722 + up_write(&current->mm->mmap_sem);
4723 + if (vma)
4724 + kmem_cache_free(vm_area_cachep, vma);
4725 + return 1;
4726 + }
4727 +
4728 + if (pax_insert_vma(vma, call_dl_resolve)) {
4729 + up_write(&current->mm->mmap_sem);
4730 + kmem_cache_free(vm_area_cachep, vma);
4731 + return 1;
4732 + }
4733 +
4734 + current->mm->call_dl_resolve = call_dl_resolve;
4735 + up_write(&current->mm->mmap_sem);
4736 +
4737 +emulate:
4738 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739 + regs->tpc = call_dl_resolve;
4740 + regs->tnpc = addr+4;
4741 + return 3;
4742 + }
4743 +#endif
4744 +
4745 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746 + if ((save & 0xFFC00000U) == 0x05000000U &&
4747 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4748 + nop == 0x01000000U)
4749 + {
4750 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751 + regs->u_regs[UREG_G2] = addr + 4;
4752 + addr = (save & 0x003FFFFFU) << 10;
4753 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754 +
4755 + if (test_thread_flag(TIF_32BIT))
4756 + addr &= 0xFFFFFFFFUL;
4757 +
4758 + regs->tpc = addr;
4759 + regs->tnpc = addr+4;
4760 + return 3;
4761 + }
4762 +
4763 + /* PaX: 64-bit PLT stub */
4764 + err = get_user(sethi1, (unsigned int *)addr);
4765 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4766 + err |= get_user(or1, (unsigned int *)(addr+8));
4767 + err |= get_user(or2, (unsigned int *)(addr+12));
4768 + err |= get_user(sllx, (unsigned int *)(addr+16));
4769 + err |= get_user(add, (unsigned int *)(addr+20));
4770 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4771 + err |= get_user(nop, (unsigned int *)(addr+28));
4772 + if (err)
4773 + break;
4774 +
4775 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4778 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779 + sllx == 0x89293020U &&
4780 + add == 0x8A010005U &&
4781 + jmpl == 0x89C14000U &&
4782 + nop == 0x01000000U)
4783 + {
4784 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786 + regs->u_regs[UREG_G4] <<= 32;
4787 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789 + regs->u_regs[UREG_G4] = addr + 24;
4790 + addr = regs->u_regs[UREG_G5];
4791 + regs->tpc = addr;
4792 + regs->tnpc = addr+4;
4793 + return 3;
4794 + }
4795 + }
4796 + } while (0);
4797 +
4798 +#ifdef CONFIG_PAX_DLRESOLVE
4799 + do { /* PaX: unpatched PLT emulation step 2 */
4800 + unsigned int save, call, nop;
4801 +
4802 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4803 + err |= get_user(call, (unsigned int *)regs->tpc);
4804 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805 + if (err)
4806 + break;
4807 +
4808 + if (save == 0x9DE3BFA8U &&
4809 + (call & 0xC0000000U) == 0x40000000U &&
4810 + nop == 0x01000000U)
4811 + {
4812 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813 +
4814 + if (test_thread_flag(TIF_32BIT))
4815 + dl_resolve &= 0xFFFFFFFFUL;
4816 +
4817 + regs->u_regs[UREG_RETPC] = regs->tpc;
4818 + regs->tpc = dl_resolve;
4819 + regs->tnpc = dl_resolve+4;
4820 + return 3;
4821 + }
4822 + } while (0);
4823 +#endif
4824 +
4825 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826 + unsigned int sethi, ba, nop;
4827 +
4828 + err = get_user(sethi, (unsigned int *)regs->tpc);
4829 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831 +
4832 + if (err)
4833 + break;
4834 +
4835 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836 + (ba & 0xFFF00000U) == 0x30600000U &&
4837 + nop == 0x01000000U)
4838 + {
4839 + unsigned long addr;
4840 +
4841 + addr = (sethi & 0x003FFFFFU) << 10;
4842 + regs->u_regs[UREG_G1] = addr;
4843 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844 +
4845 + if (test_thread_flag(TIF_32BIT))
4846 + addr &= 0xFFFFFFFFUL;
4847 +
4848 + regs->tpc = addr;
4849 + regs->tnpc = addr+4;
4850 + return 2;
4851 + }
4852 + } while (0);
4853 +
4854 +#endif
4855 +
4856 + return 1;
4857 +}
4858 +
4859 +void pax_report_insns(void *pc, void *sp)
4860 +{
4861 + unsigned long i;
4862 +
4863 + printk(KERN_ERR "PAX: bytes at PC: ");
4864 + for (i = 0; i < 8; i++) {
4865 + unsigned int c;
4866 + if (get_user(c, (unsigned int *)pc+i))
4867 + printk(KERN_CONT "???????? ");
4868 + else
4869 + printk(KERN_CONT "%08x ", c);
4870 + }
4871 + printk("\n");
4872 +}
4873 +#endif
4874 +
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882 +#ifdef CONFIG_PAX_PAGEEXEC
4883 + /* PaX: detect ITLB misses on non-exec pages */
4884 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886 + {
4887 + if (address != regs->tpc)
4888 + goto good_area;
4889 +
4890 + up_read(&mm->mmap_sem);
4891 + switch (pax_handle_fetch_fault(regs)) {
4892 +
4893 +#ifdef CONFIG_PAX_EMUPLT
4894 + case 2:
4895 + case 3:
4896 + return;
4897 +#endif
4898 +
4899 + }
4900 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901 + do_group_exit(SIGKILL);
4902 + }
4903 +#endif
4904 +
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908 diff -urNp linux-3.0.3/arch/sparc/mm/hugetlbpage.c linux-3.0.3/arch/sparc/mm/hugetlbpage.c
4909 --- linux-3.0.3/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910 +++ linux-3.0.3/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911 @@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915 - if (likely(!vma || addr + len <= vma->vm_start)) {
4916 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924 - if (!vma || addr <= vma->vm_start) {
4925 + if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4934 + addr = mm->mmap_base - len;
4935
4936 do {
4937 + addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944 - if (likely(!vma || addr+len <= vma->vm_start)) {
4945 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953 - addr = (vma->vm_start-len) & HPAGE_MASK;
4954 - } while (likely(len < vma->vm_start));
4955 + addr = skip_heap_stack_gap(vma, len);
4956 + } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964 - if (task_size - len >= addr &&
4965 - (!vma || addr + len <= vma->vm_start))
4966 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970 diff -urNp linux-3.0.3/arch/sparc/mm/init_32.c linux-3.0.3/arch/sparc/mm/init_32.c
4971 --- linux-3.0.3/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972 +++ linux-3.0.3/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973 @@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979 +
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983 @@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987 - protection_map[1] = PAGE_READONLY;
4988 - protection_map[2] = PAGE_COPY;
4989 - protection_map[3] = PAGE_COPY;
4990 + protection_map[1] = PAGE_READONLY_NOEXEC;
4991 + protection_map[2] = PAGE_COPY_NOEXEC;
4992 + protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998 - protection_map[9] = PAGE_READONLY;
4999 - protection_map[10] = PAGE_SHARED;
5000 - protection_map[11] = PAGE_SHARED;
5001 + protection_map[9] = PAGE_READONLY_NOEXEC;
5002 + protection_map[10] = PAGE_SHARED_NOEXEC;
5003 + protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007 diff -urNp linux-3.0.3/arch/sparc/mm/Makefile linux-3.0.3/arch/sparc/mm/Makefile
5008 --- linux-3.0.3/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009 +++ linux-3.0.3/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010 @@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014 -ccflags-y := -Werror
5015 +#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019 diff -urNp linux-3.0.3/arch/sparc/mm/srmmu.c linux-3.0.3/arch/sparc/mm/srmmu.c
5020 --- linux-3.0.3/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021 +++ linux-3.0.3/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026 +
5027 +#ifdef CONFIG_PAX_PAGEEXEC
5028 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031 +#endif
5032 +
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036 diff -urNp linux-3.0.3/arch/um/include/asm/kmap_types.h linux-3.0.3/arch/um/include/asm/kmap_types.h
5037 --- linux-3.0.3/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038 +++ linux-3.0.3/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039 @@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043 + KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047 diff -urNp linux-3.0.3/arch/um/include/asm/page.h linux-3.0.3/arch/um/include/asm/page.h
5048 --- linux-3.0.3/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049 +++ linux-3.0.3/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050 @@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054 +#define ktla_ktva(addr) (addr)
5055 +#define ktva_ktla(addr) (addr)
5056 +
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060 diff -urNp linux-3.0.3/arch/um/kernel/process.c linux-3.0.3/arch/um/kernel/process.c
5061 --- linux-3.0.3/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062 +++ linux-3.0.3/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067 -/*
5068 - * Only x86 and x86_64 have an arch_align_stack().
5069 - * All other arches have "#define arch_align_stack(x) (x)"
5070 - * in their asm/system.h
5071 - * As this is included in UML from asm-um/system-generic.h,
5072 - * we can use it to behave as the subarch does.
5073 - */
5074 -#ifndef arch_align_stack
5075 -unsigned long arch_align_stack(unsigned long sp)
5076 -{
5077 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078 - sp -= get_random_int() % 8192;
5079 - return sp & ~0xf;
5080 -}
5081 -#endif
5082 -
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086 diff -urNp linux-3.0.3/arch/um/sys-i386/syscalls.c linux-3.0.3/arch/um/sys-i386/syscalls.c
5087 --- linux-3.0.3/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088 +++ linux-3.0.3/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089 @@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094 +{
5095 + unsigned long pax_task_size = TASK_SIZE;
5096 +
5097 +#ifdef CONFIG_PAX_SEGMEXEC
5098 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099 + pax_task_size = SEGMEXEC_TASK_SIZE;
5100 +#endif
5101 +
5102 + if (len > pax_task_size || addr > pax_task_size - len)
5103 + return -EINVAL;
5104 +
5105 + return 0;
5106 +}
5107 +
5108 /*
5109 * The prototype on i386 is:
5110 *
5111 diff -urNp linux-3.0.3/arch/x86/boot/bitops.h linux-3.0.3/arch/x86/boot/bitops.h
5112 --- linux-3.0.3/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113 +++ linux-3.0.3/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132 diff -urNp linux-3.0.3/arch/x86/boot/boot.h linux-3.0.3/arch/x86/boot/boot.h
5133 --- linux-3.0.3/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134 +++ linux-3.0.3/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139 - asm("movw %%ds,%0" : "=rm" (seg));
5140 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148 - asm("repe; cmpsb; setnz %0"
5149 + asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153 diff -urNp linux-3.0.3/arch/x86/boot/compressed/head_32.S linux-3.0.3/arch/x86/boot/compressed/head_32.S
5154 --- linux-3.0.3/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155 +++ linux-3.0.3/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160 - movl $LOAD_PHYSICAL_ADDR, %ebx
5161 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165 @@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169 - subl $LOAD_PHYSICAL_ADDR, %ebx
5170 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174 @@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178 - testl %ecx, %ecx
5179 - jz 2f
5180 + jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184 diff -urNp linux-3.0.3/arch/x86/boot/compressed/head_64.S linux-3.0.3/arch/x86/boot/compressed/head_64.S
5185 --- linux-3.0.3/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186 +++ linux-3.0.3/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191 - movl $LOAD_PHYSICAL_ADDR, %ebx
5192 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200 - movq $LOAD_PHYSICAL_ADDR, %rbp
5201 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205 diff -urNp linux-3.0.3/arch/x86/boot/compressed/Makefile linux-3.0.3/arch/x86/boot/compressed/Makefile
5206 --- linux-3.0.3/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207 +++ linux-3.0.3/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212 +ifdef CONSTIFY_PLUGIN
5213 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214 +endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218 diff -urNp linux-3.0.3/arch/x86/boot/compressed/misc.c linux-3.0.3/arch/x86/boot/compressed/misc.c
5219 --- linux-3.0.3/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220 +++ linux-3.0.3/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239 diff -urNp linux-3.0.3/arch/x86/boot/compressed/relocs.c linux-3.0.3/arch/x86/boot/compressed/relocs.c
5240 --- linux-3.0.3/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241 +++ linux-3.0.3/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242 @@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246 +#include "../../../../include/generated/autoconf.h"
5247 +
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250 +static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258 +static void read_phdrs(FILE *fp)
5259 +{
5260 + unsigned int i;
5261 +
5262 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263 + if (!phdr) {
5264 + die("Unable to allocate %d program headers\n",
5265 + ehdr.e_phnum);
5266 + }
5267 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268 + die("Seek to %d failed: %s\n",
5269 + ehdr.e_phoff, strerror(errno));
5270 + }
5271 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272 + die("Cannot read ELF program headers: %s\n",
5273 + strerror(errno));
5274 + }
5275 + for(i = 0; i < ehdr.e_phnum; i++) {
5276 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284 + }
5285 +
5286 +}
5287 +
5288 static void read_shdrs(FILE *fp)
5289 {
5290 - int i;
5291 + unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299 - int i;
5300 + unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308 - int i,j;
5309 + unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317 - int i,j;
5318 + unsigned int i,j;
5319 + uint32_t base;
5320 +
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328 + base = 0;
5329 + for (j = 0; j < ehdr.e_phnum; j++) {
5330 + if (phdr[j].p_type != PT_LOAD )
5331 + continue;
5332 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333 + continue;
5334 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335 + break;
5336 + }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5340 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348 - int i;
5349 + unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356 - int j;
5357 + unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365 - int i, printed = 0;
5366 + unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373 - int j;
5374 + unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382 - int i;
5383 + unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389 - int j;
5390 + unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400 + continue;
5401 +
5402 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405 + continue;
5406 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407 + continue;
5408 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409 + continue;
5410 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411 + continue;
5412 +#endif
5413 +
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421 - int i;
5422 + unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430 + read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434 diff -urNp linux-3.0.3/arch/x86/boot/cpucheck.c linux-3.0.3/arch/x86/boot/cpucheck.c
5435 --- linux-3.0.3/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436 +++ linux-3.0.3/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437 @@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441 - asm("movl %%cr0,%0" : "=r" (cr0));
5442 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450 - asm("pushfl ; "
5451 + asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455 @@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459 - asm("cpuid"
5460 + asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464 @@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468 - asm("cpuid"
5469 + asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473 @@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477 - asm("cpuid"
5478 + asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482 @@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486 - asm("cpuid"
5487 + asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521 - asm("cpuid"
5522 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524 + asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532 diff -urNp linux-3.0.3/arch/x86/boot/header.S linux-3.0.3/arch/x86/boot/header.S
5533 --- linux-3.0.3/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534 +++ linux-3.0.3/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544 diff -urNp linux-3.0.3/arch/x86/boot/Makefile linux-3.0.3/arch/x86/boot/Makefile
5545 --- linux-3.0.3/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546 +++ linux-3.0.3/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551 +ifdef CONSTIFY_PLUGIN
5552 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553 +endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557 diff -urNp linux-3.0.3/arch/x86/boot/memory.c linux-3.0.3/arch/x86/boot/memory.c
5558 --- linux-3.0.3/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559 +++ linux-3.0.3/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560 @@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564 - int count = 0;
5565 + unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569 diff -urNp linux-3.0.3/arch/x86/boot/video.c linux-3.0.3/arch/x86/boot/video.c
5570 --- linux-3.0.3/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571 +++ linux-3.0.3/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576 - int i, len = 0;
5577 + unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581 diff -urNp linux-3.0.3/arch/x86/boot/video-vesa.c linux-3.0.3/arch/x86/boot/video-vesa.c
5582 --- linux-3.0.3/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583 +++ linux-3.0.3/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588 + boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592 diff -urNp linux-3.0.3/arch/x86/ia32/ia32_aout.c linux-3.0.3/arch/x86/ia32/ia32_aout.c
5593 --- linux-3.0.3/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5594 +++ linux-3.0.3/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5595 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5596 unsigned long dump_start, dump_size;
5597 struct user32 dump;
5598
5599 + memset(&dump, 0, sizeof(dump));
5600 +
5601 fs = get_fs();
5602 set_fs(KERNEL_DS);
5603 has_dumped = 1;
5604 diff -urNp linux-3.0.3/arch/x86/ia32/ia32entry.S linux-3.0.3/arch/x86/ia32/ia32entry.S
5605 --- linux-3.0.3/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5606 +++ linux-3.0.3/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5607 @@ -13,6 +13,7 @@
5608 #include <asm/thread_info.h>
5609 #include <asm/segment.h>
5610 #include <asm/irqflags.h>
5611 +#include <asm/pgtable.h>
5612 #include <linux/linkage.h>
5613
5614 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5615 @@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5616 ENDPROC(native_irq_enable_sysexit)
5617 #endif
5618
5619 + .macro pax_enter_kernel_user
5620 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5621 + call pax_enter_kernel_user
5622 +#endif
5623 + .endm
5624 +
5625 + .macro pax_exit_kernel_user
5626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5627 + call pax_exit_kernel_user
5628 +#endif
5629 +#ifdef CONFIG_PAX_RANDKSTACK
5630 + pushq %rax
5631 + call pax_randomize_kstack
5632 + popq %rax
5633 +#endif
5634 + .endm
5635 +
5636 + .macro pax_erase_kstack
5637 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5638 + call pax_erase_kstack
5639 +#endif
5640 + .endm
5641 +
5642 /*
5643 * 32bit SYSENTER instruction entry.
5644 *
5645 @@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5646 CFI_REGISTER rsp,rbp
5647 SWAPGS_UNSAFE_STACK
5648 movq PER_CPU_VAR(kernel_stack), %rsp
5649 - addq $(KERNEL_STACK_OFFSET),%rsp
5650 + pax_enter_kernel_user
5651 /*
5652 * No need to follow this irqs on/off section: the syscall
5653 * disabled irqs, here we enable it straight after entry:
5654 @@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5655 CFI_REL_OFFSET rsp,0
5656 pushfq_cfi
5657 /*CFI_REL_OFFSET rflags,0*/
5658 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5659 + GET_THREAD_INFO(%r10)
5660 + movl TI_sysenter_return(%r10), %r10d
5661 CFI_REGISTER rip,r10
5662 pushq_cfi $__USER32_CS
5663 /*CFI_REL_OFFSET cs,0*/
5664 @@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5665 SAVE_ARGS 0,0,1
5666 /* no need to do an access_ok check here because rbp has been
5667 32bit zero extended */
5668 +
5669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5670 + mov $PAX_USER_SHADOW_BASE,%r10
5671 + add %r10,%rbp
5672 +#endif
5673 +
5674 1: movl (%rbp),%ebp
5675 .section __ex_table,"a"
5676 .quad 1b,ia32_badarg
5677 @@ -168,6 +199,8 @@ sysenter_dispatch:
5678 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5679 jnz sysexit_audit
5680 sysexit_from_sys_call:
5681 + pax_exit_kernel_user
5682 + pax_erase_kstack
5683 andl $~TS_COMPAT,TI_status(%r10)
5684 /* clear IF, that popfq doesn't enable interrupts early */
5685 andl $~0x200,EFLAGS-R11(%rsp)
5686 @@ -194,6 +227,9 @@ sysexit_from_sys_call:
5687 movl %eax,%esi /* 2nd arg: syscall number */
5688 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5689 call audit_syscall_entry
5690 +
5691 + pax_erase_kstack
5692 +
5693 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5694 cmpq $(IA32_NR_syscalls-1),%rax
5695 ja ia32_badsys
5696 @@ -246,6 +282,9 @@ sysenter_tracesys:
5697 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5698 movq %rsp,%rdi /* &pt_regs -> arg1 */
5699 call syscall_trace_enter
5700 +
5701 + pax_erase_kstack
5702 +
5703 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5704 RESTORE_REST
5705 cmpq $(IA32_NR_syscalls-1),%rax
5706 @@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5707 ENTRY(ia32_cstar_target)
5708 CFI_STARTPROC32 simple
5709 CFI_SIGNAL_FRAME
5710 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5711 + CFI_DEF_CFA rsp,0
5712 CFI_REGISTER rip,rcx
5713 /*CFI_REGISTER rflags,r11*/
5714 SWAPGS_UNSAFE_STACK
5715 movl %esp,%r8d
5716 CFI_REGISTER rsp,r8
5717 movq PER_CPU_VAR(kernel_stack),%rsp
5718 +
5719 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5720 + pax_enter_kernel_user
5721 +#endif
5722 +
5723 /*
5724 * No need to follow this irqs on/off section: the syscall
5725 * disabled irqs and here we enable it straight after entry:
5726 */
5727 ENABLE_INTERRUPTS(CLBR_NONE)
5728 - SAVE_ARGS 8,1,1
5729 + SAVE_ARGS 8*6,1,1
5730 movl %eax,%eax /* zero extension */
5731 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5732 movq %rcx,RIP-ARGOFFSET(%rsp)
5733 @@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5734 /* no need to do an access_ok check here because r8 has been
5735 32bit zero extended */
5736 /* hardware stack frame is complete now */
5737 +
5738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5739 + mov $PAX_USER_SHADOW_BASE,%r10
5740 + add %r10,%r8
5741 +#endif
5742 +
5743 1: movl (%r8),%r9d
5744 .section __ex_table,"a"
5745 .quad 1b,ia32_badarg
5746 @@ -327,6 +377,8 @@ cstar_dispatch:
5747 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5748 jnz sysretl_audit
5749 sysretl_from_sys_call:
5750 + pax_exit_kernel_user
5751 + pax_erase_kstack
5752 andl $~TS_COMPAT,TI_status(%r10)
5753 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5754 movl RIP-ARGOFFSET(%rsp),%ecx
5755 @@ -364,6 +416,9 @@ cstar_tracesys:
5756 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5757 movq %rsp,%rdi /* &pt_regs -> arg1 */
5758 call syscall_trace_enter
5759 +
5760 + pax_erase_kstack
5761 +
5762 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5763 RESTORE_REST
5764 xchgl %ebp,%r9d
5765 @@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5766 CFI_REL_OFFSET rip,RIP-RIP
5767 PARAVIRT_ADJUST_EXCEPTION_FRAME
5768 SWAPGS
5769 + pax_enter_kernel_user
5770 /*
5771 * No need to follow this irqs on/off section: the syscall
5772 * disabled irqs and here we enable it straight after entry:
5773 @@ -441,6 +497,9 @@ ia32_tracesys:
5774 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5775 movq %rsp,%rdi /* &pt_regs -> arg1 */
5776 call syscall_trace_enter
5777 +
5778 + pax_erase_kstack
5779 +
5780 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5781 RESTORE_REST
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783 diff -urNp linux-3.0.3/arch/x86/ia32/ia32_signal.c linux-3.0.3/arch/x86/ia32/ia32_signal.c
5784 --- linux-3.0.3/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5785 +++ linux-3.0.3/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5786 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5787 sp -= frame_size;
5788 /* Align the stack pointer according to the i386 ABI,
5789 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5790 - sp = ((sp + 4) & -16ul) - 4;
5791 + sp = ((sp - 12) & -16ul) - 4;
5792 return (void __user *) sp;
5793 }
5794
5795 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5796 * These are actually not used anymore, but left because some
5797 * gdb versions depend on them as a marker.
5798 */
5799 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5800 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5801 } put_user_catch(err);
5802
5803 if (err)
5804 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5805 0xb8,
5806 __NR_ia32_rt_sigreturn,
5807 0x80cd,
5808 - 0,
5809 + 0
5810 };
5811
5812 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5813 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5814
5815 if (ka->sa.sa_flags & SA_RESTORER)
5816 restorer = ka->sa.sa_restorer;
5817 + else if (current->mm->context.vdso)
5818 + /* Return stub is in 32bit vsyscall page */
5819 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5820 else
5821 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5822 - rt_sigreturn);
5823 + restorer = &frame->retcode;
5824 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5825
5826 /*
5827 * Not actually used anymore, but left because some gdb
5828 * versions need it.
5829 */
5830 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835 diff -urNp linux-3.0.3/arch/x86/include/asm/alternative.h linux-3.0.3/arch/x86/include/asm/alternative.h
5836 --- linux-3.0.3/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5837 +++ linux-3.0.3/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5838 @@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5839 ".section .discard,\"aw\",@progbits\n" \
5840 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5841 ".previous\n" \
5842 - ".section .altinstr_replacement, \"ax\"\n" \
5843 + ".section .altinstr_replacement, \"a\"\n" \
5844 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5845 ".previous"
5846
5847 diff -urNp linux-3.0.3/arch/x86/include/asm/apic.h linux-3.0.3/arch/x86/include/asm/apic.h
5848 --- linux-3.0.3/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5849 +++ linux-3.0.3/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5850 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5851
5852 #ifdef CONFIG_X86_LOCAL_APIC
5853
5854 -extern unsigned int apic_verbosity;
5855 +extern int apic_verbosity;
5856 extern int local_apic_timer_c2_ok;
5857
5858 extern int disable_apic;
5859 diff -urNp linux-3.0.3/arch/x86/include/asm/apm.h linux-3.0.3/arch/x86/include/asm/apm.h
5860 --- linux-3.0.3/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5861 +++ linux-3.0.3/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5862 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5863 __asm__ __volatile__(APM_DO_ZERO_SEGS
5864 "pushl %%edi\n\t"
5865 "pushl %%ebp\n\t"
5866 - "lcall *%%cs:apm_bios_entry\n\t"
5867 + "lcall *%%ss:apm_bios_entry\n\t"
5868 "setc %%al\n\t"
5869 "popl %%ebp\n\t"
5870 "popl %%edi\n\t"
5871 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5872 __asm__ __volatile__(APM_DO_ZERO_SEGS
5873 "pushl %%edi\n\t"
5874 "pushl %%ebp\n\t"
5875 - "lcall *%%cs:apm_bios_entry\n\t"
5876 + "lcall *%%ss:apm_bios_entry\n\t"
5877 "setc %%bl\n\t"
5878 "popl %%ebp\n\t"
5879 "popl %%edi\n\t"
5880 diff -urNp linux-3.0.3/arch/x86/include/asm/atomic64_32.h linux-3.0.3/arch/x86/include/asm/atomic64_32.h
5881 --- linux-3.0.3/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5882 +++ linux-3.0.3/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5883 @@ -12,6 +12,14 @@ typedef struct {
5884 u64 __aligned(8) counter;
5885 } atomic64_t;
5886
5887 +#ifdef CONFIG_PAX_REFCOUNT
5888 +typedef struct {
5889 + u64 __aligned(8) counter;
5890 +} atomic64_unchecked_t;
5891 +#else
5892 +typedef atomic64_t atomic64_unchecked_t;
5893 +#endif
5894 +
5895 #define ATOMIC64_INIT(val) { (val) }
5896
5897 #ifdef CONFIG_X86_CMPXCHG64
5898 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5899 }
5900
5901 /**
5902 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5903 + * @p: pointer to type atomic64_unchecked_t
5904 + * @o: expected value
5905 + * @n: new value
5906 + *
5907 + * Atomically sets @v to @n if it was equal to @o and returns
5908 + * the old value.
5909 + */
5910 +
5911 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5912 +{
5913 + return cmpxchg64(&v->counter, o, n);
5914 +}
5915 +
5916 +/**
5917 * atomic64_xchg - xchg atomic64 variable
5918 * @v: pointer to type atomic64_t
5919 * @n: value to assign
5920 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5921 }
5922
5923 /**
5924 + * atomic64_set_unchecked - set atomic64 variable
5925 + * @v: pointer to type atomic64_unchecked_t
5926 + * @n: value to assign
5927 + *
5928 + * Atomically sets the value of @v to @n.
5929 + */
5930 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5931 +{
5932 + unsigned high = (unsigned)(i >> 32);
5933 + unsigned low = (unsigned)i;
5934 + asm volatile(ATOMIC64_ALTERNATIVE(set)
5935 + : "+b" (low), "+c" (high)
5936 + : "S" (v)
5937 + : "eax", "edx", "memory"
5938 + );
5939 +}
5940 +
5941 +/**
5942 * atomic64_read - read atomic64 variable
5943 * @v: pointer to type atomic64_t
5944 *
5945 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5946 }
5947
5948 /**
5949 + * atomic64_read_unchecked - read atomic64 variable
5950 + * @v: pointer to type atomic64_unchecked_t
5951 + *
5952 + * Atomically reads the value of @v and returns it.
5953 + */
5954 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5955 +{
5956 + long long r;
5957 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5958 + : "=A" (r), "+c" (v)
5959 + : : "memory"
5960 + );
5961 + return r;
5962 + }
5963 +
5964 +/**
5965 * atomic64_add_return - add and return
5966 * @i: integer value to add
5967 * @v: pointer to type atomic64_t
5968 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5969 return i;
5970 }
5971
5972 +/**
5973 + * atomic64_add_return_unchecked - add and return
5974 + * @i: integer value to add
5975 + * @v: pointer to type atomic64_unchecked_t
5976 + *
5977 + * Atomically adds @i to @v and returns @i + *@v
5978 + */
5979 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5980 +{
5981 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
5982 + : "+A" (i), "+c" (v)
5983 + : : "memory"
5984 + );
5985 + return i;
5986 +}
5987 +
5988 /*
5989 * Other variants with different arithmetic operators:
5990 */
5991 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
5992 return a;
5993 }
5994
5995 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5996 +{
5997 + long long a;
5998 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
5999 + : "=A" (a)
6000 + : "S" (v)
6001 + : "memory", "ecx"
6002 + );
6003 + return a;
6004 +}
6005 +
6006 static inline long long atomic64_dec_return(atomic64_t *v)
6007 {
6008 long long a;
6009 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6010 }
6011
6012 /**
6013 + * atomic64_add_unchecked - add integer to atomic64 variable
6014 + * @i: integer value to add
6015 + * @v: pointer to type atomic64_unchecked_t
6016 + *
6017 + * Atomically adds @i to @v.
6018 + */
6019 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6020 +{
6021 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6022 + : "+A" (i), "+c" (v)
6023 + : : "memory"
6024 + );
6025 + return i;
6026 +}
6027 +
6028 +/**
6029 * atomic64_sub - subtract the atomic64 variable
6030 * @i: integer value to subtract
6031 * @v: pointer to type atomic64_t
6032 diff -urNp linux-3.0.3/arch/x86/include/asm/atomic64_64.h linux-3.0.3/arch/x86/include/asm/atomic64_64.h
6033 --- linux-3.0.3/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6034 +++ linux-3.0.3/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6035 @@ -18,7 +18,19 @@
6036 */
6037 static inline long atomic64_read(const atomic64_t *v)
6038 {
6039 - return (*(volatile long *)&(v)->counter);
6040 + return (*(volatile const long *)&(v)->counter);
6041 +}
6042 +
6043 +/**
6044 + * atomic64_read_unchecked - read atomic64 variable
6045 + * @v: pointer of type atomic64_unchecked_t
6046 + *
6047 + * Atomically reads the value of @v.
6048 + * Doesn't imply a read memory barrier.
6049 + */
6050 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6051 +{
6052 + return (*(volatile const long *)&(v)->counter);
6053 }
6054
6055 /**
6056 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6057 }
6058
6059 /**
6060 + * atomic64_set_unchecked - set atomic64 variable
6061 + * @v: pointer to type atomic64_unchecked_t
6062 + * @i: required value
6063 + *
6064 + * Atomically sets the value of @v to @i.
6065 + */
6066 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6067 +{
6068 + v->counter = i;
6069 +}
6070 +
6071 +/**
6072 * atomic64_add - add integer to atomic64 variable
6073 * @i: integer value to add
6074 * @v: pointer to type atomic64_t
6075 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6076 */
6077 static inline void atomic64_add(long i, atomic64_t *v)
6078 {
6079 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6080 +
6081 +#ifdef CONFIG_PAX_REFCOUNT
6082 + "jno 0f\n"
6083 + LOCK_PREFIX "subq %1,%0\n"
6084 + "int $4\n0:\n"
6085 + _ASM_EXTABLE(0b, 0b)
6086 +#endif
6087 +
6088 + : "=m" (v->counter)
6089 + : "er" (i), "m" (v->counter));
6090 +}
6091 +
6092 +/**
6093 + * atomic64_add_unchecked - add integer to atomic64 variable
6094 + * @i: integer value to add
6095 + * @v: pointer to type atomic64_unchecked_t
6096 + *
6097 + * Atomically adds @i to @v.
6098 + */
6099 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6100 +{
6101 asm volatile(LOCK_PREFIX "addq %1,%0"
6102 : "=m" (v->counter)
6103 : "er" (i), "m" (v->counter));
6104 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6105 */
6106 static inline void atomic64_sub(long i, atomic64_t *v)
6107 {
6108 - asm volatile(LOCK_PREFIX "subq %1,%0"
6109 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6110 +
6111 +#ifdef CONFIG_PAX_REFCOUNT
6112 + "jno 0f\n"
6113 + LOCK_PREFIX "addq %1,%0\n"
6114 + "int $4\n0:\n"
6115 + _ASM_EXTABLE(0b, 0b)
6116 +#endif
6117 +
6118 + : "=m" (v->counter)
6119 + : "er" (i), "m" (v->counter));
6120 +}
6121 +
6122 +/**
6123 + * atomic64_sub_unchecked - subtract the atomic64 variable
6124 + * @i: integer value to subtract
6125 + * @v: pointer to type atomic64_unchecked_t
6126 + *
6127 + * Atomically subtracts @i from @v.
6128 + */
6129 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6130 +{
6131 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6132 : "=m" (v->counter)
6133 : "er" (i), "m" (v->counter));
6134 }
6135 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6136 {
6137 unsigned char c;
6138
6139 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6140 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6141 +
6142 +#ifdef CONFIG_PAX_REFCOUNT
6143 + "jno 0f\n"
6144 + LOCK_PREFIX "addq %2,%0\n"
6145 + "int $4\n0:\n"
6146 + _ASM_EXTABLE(0b, 0b)
6147 +#endif
6148 +
6149 + "sete %1\n"
6150 : "=m" (v->counter), "=qm" (c)
6151 : "er" (i), "m" (v->counter) : "memory");
6152 return c;
6153 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6154 */
6155 static inline void atomic64_inc(atomic64_t *v)
6156 {
6157 + asm volatile(LOCK_PREFIX "incq %0\n"
6158 +
6159 +#ifdef CONFIG_PAX_REFCOUNT
6160 + "jno 0f\n"
6161 + LOCK_PREFIX "decq %0\n"
6162 + "int $4\n0:\n"
6163 + _ASM_EXTABLE(0b, 0b)
6164 +#endif
6165 +
6166 + : "=m" (v->counter)
6167 + : "m" (v->counter));
6168 +}
6169 +
6170 +/**
6171 + * atomic64_inc_unchecked - increment atomic64 variable
6172 + * @v: pointer to type atomic64_unchecked_t
6173 + *
6174 + * Atomically increments @v by 1.
6175 + */
6176 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6177 +{
6178 asm volatile(LOCK_PREFIX "incq %0"
6179 : "=m" (v->counter)
6180 : "m" (v->counter));
6181 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6182 */
6183 static inline void atomic64_dec(atomic64_t *v)
6184 {
6185 - asm volatile(LOCK_PREFIX "decq %0"
6186 + asm volatile(LOCK_PREFIX "decq %0\n"
6187 +
6188 +#ifdef CONFIG_PAX_REFCOUNT
6189 + "jno 0f\n"
6190 + LOCK_PREFIX "incq %0\n"
6191 + "int $4\n0:\n"
6192 + _ASM_EXTABLE(0b, 0b)
6193 +#endif
6194 +
6195 + : "=m" (v->counter)
6196 + : "m" (v->counter));
6197 +}
6198 +
6199 +/**
6200 + * atomic64_dec_unchecked - decrement atomic64 variable
6201 + * @v: pointer to type atomic64_t
6202 + *
6203 + * Atomically decrements @v by 1.
6204 + */
6205 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6206 +{
6207 + asm volatile(LOCK_PREFIX "decq %0\n"
6208 : "=m" (v->counter)
6209 : "m" (v->counter));
6210 }
6211 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6212 {
6213 unsigned char c;
6214
6215 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6216 + asm volatile(LOCK_PREFIX "decq %0\n"
6217 +
6218 +#ifdef CONFIG_PAX_REFCOUNT
6219 + "jno 0f\n"
6220 + LOCK_PREFIX "incq %0\n"
6221 + "int $4\n0:\n"
6222 + _ASM_EXTABLE(0b, 0b)
6223 +#endif
6224 +
6225 + "sete %1\n"
6226 : "=m" (v->counter), "=qm" (c)
6227 : "m" (v->counter) : "memory");
6228 return c != 0;
6229 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6230 {
6231 unsigned char c;
6232
6233 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6234 + asm volatile(LOCK_PREFIX "incq %0\n"
6235 +
6236 +#ifdef CONFIG_PAX_REFCOUNT
6237 + "jno 0f\n"
6238 + LOCK_PREFIX "decq %0\n"
6239 + "int $4\n0:\n"
6240 + _ASM_EXTABLE(0b, 0b)
6241 +#endif
6242 +
6243 + "sete %1\n"
6244 : "=m" (v->counter), "=qm" (c)
6245 : "m" (v->counter) : "memory");
6246 return c != 0;
6247 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6248 {
6249 unsigned char c;
6250
6251 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6252 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6253 +
6254 +#ifdef CONFIG_PAX_REFCOUNT
6255 + "jno 0f\n"
6256 + LOCK_PREFIX "subq %2,%0\n"
6257 + "int $4\n0:\n"
6258 + _ASM_EXTABLE(0b, 0b)
6259 +#endif
6260 +
6261 + "sets %1\n"
6262 : "=m" (v->counter), "=qm" (c)
6263 : "er" (i), "m" (v->counter) : "memory");
6264 return c;
6265 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6266 static inline long atomic64_add_return(long i, atomic64_t *v)
6267 {
6268 long __i = i;
6269 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6270 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6271 +
6272 +#ifdef CONFIG_PAX_REFCOUNT
6273 + "jno 0f\n"
6274 + "movq %0, %1\n"
6275 + "int $4\n0:\n"
6276 + _ASM_EXTABLE(0b, 0b)
6277 +#endif
6278 +
6279 + : "+r" (i), "+m" (v->counter)
6280 + : : "memory");
6281 + return i + __i;
6282 +}
6283 +
6284 +/**
6285 + * atomic64_add_return_unchecked - add and return
6286 + * @i: integer value to add
6287 + * @v: pointer to type atomic64_unchecked_t
6288 + *
6289 + * Atomically adds @i to @v and returns @i + @v
6290 + */
6291 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6292 +{
6293 + long __i = i;
6294 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6295 : "+r" (i), "+m" (v->counter)
6296 : : "memory");
6297 return i + __i;
6298 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6299 }
6300
6301 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6302 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6303 +{
6304 + return atomic64_add_return_unchecked(1, v);
6305 +}
6306 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6307
6308 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6309 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6310 return cmpxchg(&v->counter, old, new);
6311 }
6312
6313 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6314 +{
6315 + return cmpxchg(&v->counter, old, new);
6316 +}
6317 +
6318 static inline long atomic64_xchg(atomic64_t *v, long new)
6319 {
6320 return xchg(&v->counter, new);
6321 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6322 */
6323 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6324 {
6325 - long c, old;
6326 + long c, old, new;
6327 c = atomic64_read(v);
6328 for (;;) {
6329 - if (unlikely(c == (u)))
6330 + if (unlikely(c == u))
6331 break;
6332 - old = atomic64_cmpxchg((v), c, c + (a));
6333 +
6334 + asm volatile("add %2,%0\n"
6335 +
6336 +#ifdef CONFIG_PAX_REFCOUNT
6337 + "jno 0f\n"
6338 + "sub %2,%0\n"
6339 + "int $4\n0:\n"
6340 + _ASM_EXTABLE(0b, 0b)
6341 +#endif
6342 +
6343 + : "=r" (new)
6344 + : "0" (c), "ir" (a));
6345 +
6346 + old = atomic64_cmpxchg(v, c, new);
6347 if (likely(old == c))
6348 break;
6349 c = old;
6350 }
6351 - return c != (u);
6352 + return c != u;
6353 }
6354
6355 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6356 diff -urNp linux-3.0.3/arch/x86/include/asm/atomic.h linux-3.0.3/arch/x86/include/asm/atomic.h
6357 --- linux-3.0.3/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6358 +++ linux-3.0.3/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6359 @@ -22,7 +22,18 @@
6360 */
6361 static inline int atomic_read(const atomic_t *v)
6362 {
6363 - return (*(volatile int *)&(v)->counter);
6364 + return (*(volatile const int *)&(v)->counter);
6365 +}
6366 +
6367 +/**
6368 + * atomic_read_unchecked - read atomic variable
6369 + * @v: pointer of type atomic_unchecked_t
6370 + *
6371 + * Atomically reads the value of @v.
6372 + */
6373 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6374 +{
6375 + return (*(volatile const int *)&(v)->counter);
6376 }
6377
6378 /**
6379 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6380 }
6381
6382 /**
6383 + * atomic_set_unchecked - set atomic variable
6384 + * @v: pointer of type atomic_unchecked_t
6385 + * @i: required value
6386 + *
6387 + * Atomically sets the value of @v to @i.
6388 + */
6389 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6390 +{
6391 + v->counter = i;
6392 +}
6393 +
6394 +/**
6395 * atomic_add - add integer to atomic variable
6396 * @i: integer value to add
6397 * @v: pointer of type atomic_t
6398 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6399 */
6400 static inline void atomic_add(int i, atomic_t *v)
6401 {
6402 - asm volatile(LOCK_PREFIX "addl %1,%0"
6403 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6404 +
6405 +#ifdef CONFIG_PAX_REFCOUNT
6406 + "jno 0f\n"
6407 + LOCK_PREFIX "subl %1,%0\n"
6408 + "int $4\n0:\n"
6409 + _ASM_EXTABLE(0b, 0b)
6410 +#endif
6411 +
6412 + : "+m" (v->counter)
6413 + : "ir" (i));
6414 +}
6415 +
6416 +/**
6417 + * atomic_add_unchecked - add integer to atomic variable
6418 + * @i: integer value to add
6419 + * @v: pointer of type atomic_unchecked_t
6420 + *
6421 + * Atomically adds @i to @v.
6422 + */
6423 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6424 +{
6425 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6426 : "+m" (v->counter)
6427 : "ir" (i));
6428 }
6429 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6430 */
6431 static inline void atomic_sub(int i, atomic_t *v)
6432 {
6433 - asm volatile(LOCK_PREFIX "subl %1,%0"
6434 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6435 +
6436 +#ifdef CONFIG_PAX_REFCOUNT
6437 + "jno 0f\n"
6438 + LOCK_PREFIX "addl %1,%0\n"
6439 + "int $4\n0:\n"
6440 + _ASM_EXTABLE(0b, 0b)
6441 +#endif
6442 +
6443 + : "+m" (v->counter)
6444 + : "ir" (i));
6445 +}
6446 +
6447 +/**
6448 + * atomic_sub_unchecked - subtract integer from atomic variable
6449 + * @i: integer value to subtract
6450 + * @v: pointer of type atomic_unchecked_t
6451 + *
6452 + * Atomically subtracts @i from @v.
6453 + */
6454 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6455 +{
6456 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6457 : "+m" (v->counter)
6458 : "ir" (i));
6459 }
6460 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6461 {
6462 unsigned char c;
6463
6464 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6465 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6466 +
6467 +#ifdef CONFIG_PAX_REFCOUNT
6468 + "jno 0f\n"
6469 + LOCK_PREFIX "addl %2,%0\n"
6470 + "int $4\n0:\n"
6471 + _ASM_EXTABLE(0b, 0b)
6472 +#endif
6473 +
6474 + "sete %1\n"
6475 : "+m" (v->counter), "=qm" (c)
6476 : "ir" (i) : "memory");
6477 return c;
6478 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6479 */
6480 static inline void atomic_inc(atomic_t *v)
6481 {
6482 - asm volatile(LOCK_PREFIX "incl %0"
6483 + asm volatile(LOCK_PREFIX "incl %0\n"
6484 +
6485 +#ifdef CONFIG_PAX_REFCOUNT
6486 + "jno 0f\n"
6487 + LOCK_PREFIX "decl %0\n"
6488 + "int $4\n0:\n"
6489 + _ASM_EXTABLE(0b, 0b)
6490 +#endif
6491 +
6492 + : "+m" (v->counter));
6493 +}
6494 +
6495 +/**
6496 + * atomic_inc_unchecked - increment atomic variable
6497 + * @v: pointer of type atomic_unchecked_t
6498 + *
6499 + * Atomically increments @v by 1.
6500 + */
6501 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6502 +{
6503 + asm volatile(LOCK_PREFIX "incl %0\n"
6504 : "+m" (v->counter));
6505 }
6506
6507 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6508 */
6509 static inline void atomic_dec(atomic_t *v)
6510 {
6511 - asm volatile(LOCK_PREFIX "decl %0"
6512 + asm volatile(LOCK_PREFIX "decl %0\n"
6513 +
6514 +#ifdef CONFIG_PAX_REFCOUNT
6515 + "jno 0f\n"
6516 + LOCK_PREFIX "incl %0\n"
6517 + "int $4\n0:\n"
6518 + _ASM_EXTABLE(0b, 0b)
6519 +#endif
6520 +
6521 + : "+m" (v->counter));
6522 +}
6523 +
6524 +/**
6525 + * atomic_dec_unchecked - decrement atomic variable
6526 + * @v: pointer of type atomic_unchecked_t
6527 + *
6528 + * Atomically decrements @v by 1.
6529 + */
6530 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6531 +{
6532 + asm volatile(LOCK_PREFIX "decl %0\n"
6533 : "+m" (v->counter));
6534 }
6535
6536 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6537 {
6538 unsigned char c;
6539
6540 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6541 + asm volatile(LOCK_PREFIX "decl %0\n"
6542 +
6543 +#ifdef CONFIG_PAX_REFCOUNT
6544 + "jno 0f\n"
6545 + LOCK_PREFIX "incl %0\n"
6546 + "int $4\n0:\n"
6547 + _ASM_EXTABLE(0b, 0b)
6548 +#endif
6549 +
6550 + "sete %1\n"
6551 : "+m" (v->counter), "=qm" (c)
6552 : : "memory");
6553 return c != 0;
6554 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6555 {
6556 unsigned char c;
6557
6558 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6559 + asm volatile(LOCK_PREFIX "incl %0\n"
6560 +
6561 +#ifdef CONFIG_PAX_REFCOUNT
6562 + "jno 0f\n"
6563 + LOCK_PREFIX "decl %0\n"
6564 + "int $4\n0:\n"
6565 + _ASM_EXTABLE(0b, 0b)
6566 +#endif
6567 +
6568 + "sete %1\n"
6569 + : "+m" (v->counter), "=qm" (c)
6570 + : : "memory");
6571 + return c != 0;
6572 +}
6573 +
6574 +/**
6575 + * atomic_inc_and_test_unchecked - increment and test
6576 + * @v: pointer of type atomic_unchecked_t
6577 + *
6578 + * Atomically increments @v by 1
6579 + * and returns true if the result is zero, or false for all
6580 + * other cases.
6581 + */
6582 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6583 +{
6584 + unsigned char c;
6585 +
6586 + asm volatile(LOCK_PREFIX "incl %0\n"
6587 + "sete %1\n"
6588 : "+m" (v->counter), "=qm" (c)
6589 : : "memory");
6590 return c != 0;
6591 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6592 {
6593 unsigned char c;
6594
6595 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6596 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6597 +
6598 +#ifdef CONFIG_PAX_REFCOUNT
6599 + "jno 0f\n"
6600 + LOCK_PREFIX "subl %2,%0\n"
6601 + "int $4\n0:\n"
6602 + _ASM_EXTABLE(0b, 0b)
6603 +#endif
6604 +
6605 + "sets %1\n"
6606 : "+m" (v->counter), "=qm" (c)
6607 : "ir" (i) : "memory");
6608 return c;
6609 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6610 #endif
6611 /* Modern 486+ processor */
6612 __i = i;
6613 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6614 +
6615 +#ifdef CONFIG_PAX_REFCOUNT
6616 + "jno 0f\n"
6617 + "movl %0, %1\n"
6618 + "int $4\n0:\n"
6619 + _ASM_EXTABLE(0b, 0b)
6620 +#endif
6621 +
6622 + : "+r" (i), "+m" (v->counter)
6623 + : : "memory");
6624 + return i + __i;
6625 +
6626 +#ifdef CONFIG_M386
6627 +no_xadd: /* Legacy 386 processor */
6628 + local_irq_save(flags);
6629 + __i = atomic_read(v);
6630 + atomic_set(v, i + __i);
6631 + local_irq_restore(flags);
6632 + return i + __i;
6633 +#endif
6634 +}
6635 +
6636 +/**
6637 + * atomic_add_return_unchecked - add integer and return
6638 + * @v: pointer of type atomic_unchecked_t
6639 + * @i: integer value to add
6640 + *
6641 + * Atomically adds @i to @v and returns @i + @v
6642 + */
6643 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6644 +{
6645 + int __i;
6646 +#ifdef CONFIG_M386
6647 + unsigned long flags;
6648 + if (unlikely(boot_cpu_data.x86 <= 3))
6649 + goto no_xadd;
6650 +#endif
6651 + /* Modern 486+ processor */
6652 + __i = i;
6653 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6654 : "+r" (i), "+m" (v->counter)
6655 : : "memory");
6656 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6657 }
6658
6659 #define atomic_inc_return(v) (atomic_add_return(1, v))
6660 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6661 +{
6662 + return atomic_add_return_unchecked(1, v);
6663 +}
6664 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6665
6666 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6667 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6668 return cmpxchg(&v->counter, old, new);
6669 }
6670
6671 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6672 +{
6673 + return cmpxchg(&v->counter, old, new);
6674 +}
6675 +
6676 static inline int atomic_xchg(atomic_t *v, int new)
6677 {
6678 return xchg(&v->counter, new);
6679 }
6680
6681 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6682 +{
6683 + return xchg(&v->counter, new);
6684 +}
6685 +
6686 /**
6687 * atomic_add_unless - add unless the number is already a given value
6688 * @v: pointer of type atomic_t
6689 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6690 */
6691 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6692 {
6693 - int c, old;
6694 + int c, old, new;
6695 c = atomic_read(v);
6696 for (;;) {
6697 - if (unlikely(c == (u)))
6698 + if (unlikely(c == u))
6699 break;
6700 - old = atomic_cmpxchg((v), c, c + (a));
6701 +
6702 + asm volatile("addl %2,%0\n"
6703 +
6704 +#ifdef CONFIG_PAX_REFCOUNT
6705 + "jno 0f\n"
6706 + "subl %2,%0\n"
6707 + "int $4\n0:\n"
6708 + _ASM_EXTABLE(0b, 0b)
6709 +#endif
6710 +
6711 + : "=r" (new)
6712 + : "0" (c), "ir" (a));
6713 +
6714 + old = atomic_cmpxchg(v, c, new);
6715 if (likely(old == c))
6716 break;
6717 c = old;
6718 }
6719 - return c != (u);
6720 + return c != u;
6721 }
6722
6723 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6724
6725 +/**
6726 + * atomic_inc_not_zero_hint - increment if not null
6727 + * @v: pointer of type atomic_t
6728 + * @hint: probable value of the atomic before the increment
6729 + *
6730 + * This version of atomic_inc_not_zero() gives a hint of probable
6731 + * value of the atomic. This helps processor to not read the memory
6732 + * before doing the atomic read/modify/write cycle, lowering
6733 + * number of bus transactions on some arches.
6734 + *
6735 + * Returns: 0 if increment was not done, 1 otherwise.
6736 + */
6737 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6738 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6739 +{
6740 + int val, c = hint, new;
6741 +
6742 + /* sanity test, should be removed by compiler if hint is a constant */
6743 + if (!hint)
6744 + return atomic_inc_not_zero(v);
6745 +
6746 + do {
6747 + asm volatile("incl %0\n"
6748 +
6749 +#ifdef CONFIG_PAX_REFCOUNT
6750 + "jno 0f\n"
6751 + "decl %0\n"
6752 + "int $4\n0:\n"
6753 + _ASM_EXTABLE(0b, 0b)
6754 +#endif
6755 +
6756 + : "=r" (new)
6757 + : "0" (c));
6758 +
6759 + val = atomic_cmpxchg(v, c, new);
6760 + if (val == c)
6761 + return 1;
6762 + c = val;
6763 + } while (c);
6764 +
6765 + return 0;
6766 +}
6767 +
6768 /*
6769 * atomic_dec_if_positive - decrement by 1 if old value positive
6770 * @v: pointer of type atomic_t
6771 diff -urNp linux-3.0.3/arch/x86/include/asm/bitops.h linux-3.0.3/arch/x86/include/asm/bitops.h
6772 --- linux-3.0.3/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6773 +++ linux-3.0.3/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6774 @@ -38,7 +38,7 @@
6775 * a mask operation on a byte.
6776 */
6777 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6778 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6779 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6780 #define CONST_MASK(nr) (1 << ((nr) & 7))
6781
6782 /**
6783 diff -urNp linux-3.0.3/arch/x86/include/asm/boot.h linux-3.0.3/arch/x86/include/asm/boot.h
6784 --- linux-3.0.3/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6785 +++ linux-3.0.3/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6786 @@ -11,10 +11,15 @@
6787 #include <asm/pgtable_types.h>
6788
6789 /* Physical address where kernel should be loaded. */
6790 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6791 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6792 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6793 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6794
6795 +#ifndef __ASSEMBLY__
6796 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6797 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6798 +#endif
6799 +
6800 /* Minimum kernel alignment, as a power of two */
6801 #ifdef CONFIG_X86_64
6802 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6803 diff -urNp linux-3.0.3/arch/x86/include/asm/cacheflush.h linux-3.0.3/arch/x86/include/asm/cacheflush.h
6804 --- linux-3.0.3/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6805 +++ linux-3.0.3/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6806 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6807 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6808
6809 if (pg_flags == _PGMT_DEFAULT)
6810 - return -1;
6811 + return ~0UL;
6812 else if (pg_flags == _PGMT_WC)
6813 return _PAGE_CACHE_WC;
6814 else if (pg_flags == _PGMT_UC_MINUS)
6815 diff -urNp linux-3.0.3/arch/x86/include/asm/cache.h linux-3.0.3/arch/x86/include/asm/cache.h
6816 --- linux-3.0.3/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6817 +++ linux-3.0.3/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6818 @@ -5,12 +5,13 @@
6819
6820 /* L1 cache line size */
6821 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6822 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6823 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6824
6825 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6826 +#define __read_only __attribute__((__section__(".data..read_only")))
6827
6828 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6829 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6830 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6831
6832 #ifdef CONFIG_X86_VSMP
6833 #ifdef CONFIG_SMP
6834 diff -urNp linux-3.0.3/arch/x86/include/asm/checksum_32.h linux-3.0.3/arch/x86/include/asm/checksum_32.h
6835 --- linux-3.0.3/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6836 +++ linux-3.0.3/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6837 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6838 int len, __wsum sum,
6839 int *src_err_ptr, int *dst_err_ptr);
6840
6841 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6842 + int len, __wsum sum,
6843 + int *src_err_ptr, int *dst_err_ptr);
6844 +
6845 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6846 + int len, __wsum sum,
6847 + int *src_err_ptr, int *dst_err_ptr);
6848 +
6849 /*
6850 * Note: when you get a NULL pointer exception here this means someone
6851 * passed in an incorrect kernel address to one of these functions.
6852 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6853 int *err_ptr)
6854 {
6855 might_sleep();
6856 - return csum_partial_copy_generic((__force void *)src, dst,
6857 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6858 len, sum, err_ptr, NULL);
6859 }
6860
6861 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6862 {
6863 might_sleep();
6864 if (access_ok(VERIFY_WRITE, dst, len))
6865 - return csum_partial_copy_generic(src, (__force void *)dst,
6866 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6867 len, sum, NULL, err_ptr);
6868
6869 if (len)
6870 diff -urNp linux-3.0.3/arch/x86/include/asm/cpufeature.h linux-3.0.3/arch/x86/include/asm/cpufeature.h
6871 --- linux-3.0.3/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6872 +++ linux-3.0.3/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6873 @@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6874 ".section .discard,\"aw\",@progbits\n"
6875 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6876 ".previous\n"
6877 - ".section .altinstr_replacement,\"ax\"\n"
6878 + ".section .altinstr_replacement,\"a\"\n"
6879 "3: movb $1,%0\n"
6880 "4:\n"
6881 ".previous\n"
6882 diff -urNp linux-3.0.3/arch/x86/include/asm/desc_defs.h linux-3.0.3/arch/x86/include/asm/desc_defs.h
6883 --- linux-3.0.3/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6884 +++ linux-3.0.3/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6885 @@ -31,6 +31,12 @@ struct desc_struct {
6886 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6887 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6888 };
6889 + struct {
6890 + u16 offset_low;
6891 + u16 seg;
6892 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6893 + unsigned offset_high: 16;
6894 + } gate;
6895 };
6896 } __attribute__((packed));
6897
6898 diff -urNp linux-3.0.3/arch/x86/include/asm/desc.h linux-3.0.3/arch/x86/include/asm/desc.h
6899 --- linux-3.0.3/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6900 +++ linux-3.0.3/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6901 @@ -4,6 +4,7 @@
6902 #include <asm/desc_defs.h>
6903 #include <asm/ldt.h>
6904 #include <asm/mmu.h>
6905 +#include <asm/pgtable.h>
6906
6907 #include <linux/smp.h>
6908
6909 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6910
6911 desc->type = (info->read_exec_only ^ 1) << 1;
6912 desc->type |= info->contents << 2;
6913 + desc->type |= info->seg_not_present ^ 1;
6914
6915 desc->s = 1;
6916 desc->dpl = 0x3;
6917 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6918 }
6919
6920 extern struct desc_ptr idt_descr;
6921 -extern gate_desc idt_table[];
6922 -
6923 -struct gdt_page {
6924 - struct desc_struct gdt[GDT_ENTRIES];
6925 -} __attribute__((aligned(PAGE_SIZE)));
6926 -
6927 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6928 +extern gate_desc idt_table[256];
6929
6930 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6931 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6932 {
6933 - return per_cpu(gdt_page, cpu).gdt;
6934 + return cpu_gdt_table[cpu];
6935 }
6936
6937 #ifdef CONFIG_X86_64
6938 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
6939 unsigned long base, unsigned dpl, unsigned flags,
6940 unsigned short seg)
6941 {
6942 - gate->a = (seg << 16) | (base & 0xffff);
6943 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6944 + gate->gate.offset_low = base;
6945 + gate->gate.seg = seg;
6946 + gate->gate.reserved = 0;
6947 + gate->gate.type = type;
6948 + gate->gate.s = 0;
6949 + gate->gate.dpl = dpl;
6950 + gate->gate.p = 1;
6951 + gate->gate.offset_high = base >> 16;
6952 }
6953
6954 #endif
6955 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
6956
6957 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
6958 {
6959 + pax_open_kernel();
6960 memcpy(&idt[entry], gate, sizeof(*gate));
6961 + pax_close_kernel();
6962 }
6963
6964 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
6965 {
6966 + pax_open_kernel();
6967 memcpy(&ldt[entry], desc, 8);
6968 + pax_close_kernel();
6969 }
6970
6971 static inline void
6972 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
6973 default: size = sizeof(*gdt); break;
6974 }
6975
6976 + pax_open_kernel();
6977 memcpy(&gdt[entry], desc, size);
6978 + pax_close_kernel();
6979 }
6980
6981 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6982 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const
6983
6984 static inline void native_load_tr_desc(void)
6985 {
6986 + pax_open_kernel();
6987 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6988 + pax_close_kernel();
6989 }
6990
6991 static inline void native_load_gdt(const struct desc_ptr *dtr)
6992 @@ -244,8 +255,10 @@ static inline void native_load_tls(struc
6993 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6994 unsigned int i;
6995
6996 + pax_open_kernel();
6997 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6998 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
6999 + pax_close_kernel();
7000 }
7001
7002 #define _LDT_empty(info) \
7003 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7004 desc->limit = (limit >> 16) & 0xf;
7005 }
7006
7007 -static inline void _set_gate(int gate, unsigned type, void *addr,
7008 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7009 unsigned dpl, unsigned ist, unsigned seg)
7010 {
7011 gate_desc s;
7012 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7013 * Pentium F0 0F bugfix can have resulted in the mapped
7014 * IDT being write-protected.
7015 */
7016 -static inline void set_intr_gate(unsigned int n, void *addr)
7017 +static inline void set_intr_gate(unsigned int n, const void *addr)
7018 {
7019 BUG_ON((unsigned)n > 0xFF);
7020 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7021 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7022 /*
7023 * This routine sets up an interrupt gate at directory privilege level 3.
7024 */
7025 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7026 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7027 {
7028 BUG_ON((unsigned)n > 0xFF);
7029 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7030 }
7031
7032 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7033 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7034 {
7035 BUG_ON((unsigned)n > 0xFF);
7036 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7037 }
7038
7039 -static inline void set_trap_gate(unsigned int n, void *addr)
7040 +static inline void set_trap_gate(unsigned int n, const void *addr)
7041 {
7042 BUG_ON((unsigned)n > 0xFF);
7043 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7044 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7045 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7046 {
7047 BUG_ON((unsigned)n > 0xFF);
7048 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7049 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7050 }
7051
7052 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7053 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7054 {
7055 BUG_ON((unsigned)n > 0xFF);
7056 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7057 }
7058
7059 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7060 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7061 {
7062 BUG_ON((unsigned)n > 0xFF);
7063 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7064 }
7065
7066 +#ifdef CONFIG_X86_32
7067 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7068 +{
7069 + struct desc_struct d;
7070 +
7071 + if (likely(limit))
7072 + limit = (limit - 1UL) >> PAGE_SHIFT;
7073 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7074 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7075 +}
7076 +#endif
7077 +
7078 #endif /* _ASM_X86_DESC_H */
7079 diff -urNp linux-3.0.3/arch/x86/include/asm/e820.h linux-3.0.3/arch/x86/include/asm/e820.h
7080 --- linux-3.0.3/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7081 +++ linux-3.0.3/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7082 @@ -69,7 +69,7 @@ struct e820map {
7083 #define ISA_START_ADDRESS 0xa0000
7084 #define ISA_END_ADDRESS 0x100000
7085
7086 -#define BIOS_BEGIN 0x000a0000
7087 +#define BIOS_BEGIN 0x000c0000
7088 #define BIOS_END 0x00100000
7089
7090 #define BIOS_ROM_BASE 0xffe00000
7091 diff -urNp linux-3.0.3/arch/x86/include/asm/elf.h linux-3.0.3/arch/x86/include/asm/elf.h
7092 --- linux-3.0.3/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7093 +++ linux-3.0.3/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7094 @@ -237,7 +237,25 @@ extern int force_personality32;
7095 the loader. We need to make sure that it is out of the way of the program
7096 that it will "exec", and that there is sufficient room for the brk. */
7097
7098 +#ifdef CONFIG_PAX_SEGMEXEC
7099 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7100 +#else
7101 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7102 +#endif
7103 +
7104 +#ifdef CONFIG_PAX_ASLR
7105 +#ifdef CONFIG_X86_32
7106 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7107 +
7108 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7109 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7110 +#else
7111 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7112 +
7113 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7114 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7115 +#endif
7116 +#endif
7117
7118 /* This yields a mask that user programs can use to figure out what
7119 instruction set this CPU supports. This could be done in user space,
7120 @@ -290,9 +308,7 @@ do { \
7121
7122 #define ARCH_DLINFO \
7123 do { \
7124 - if (vdso_enabled) \
7125 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7126 - (unsigned long)current->mm->context.vdso); \
7127 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7128 } while (0)
7129
7130 #define AT_SYSINFO 32
7131 @@ -303,7 +319,7 @@ do { \
7132
7133 #endif /* !CONFIG_X86_32 */
7134
7135 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7136 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7137
7138 #define VDSO_ENTRY \
7139 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7140 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7141 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7142 #define compat_arch_setup_additional_pages syscall32_setup_pages
7143
7144 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7145 -#define arch_randomize_brk arch_randomize_brk
7146 -
7147 #endif /* _ASM_X86_ELF_H */
7148 diff -urNp linux-3.0.3/arch/x86/include/asm/emergency-restart.h linux-3.0.3/arch/x86/include/asm/emergency-restart.h
7149 --- linux-3.0.3/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7150 +++ linux-3.0.3/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7151 @@ -15,6 +15,6 @@ enum reboot_type {
7152
7153 extern enum reboot_type reboot_type;
7154
7155 -extern void machine_emergency_restart(void);
7156 +extern void machine_emergency_restart(void) __noreturn;
7157
7158 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7159 diff -urNp linux-3.0.3/arch/x86/include/asm/futex.h linux-3.0.3/arch/x86/include/asm/futex.h
7160 --- linux-3.0.3/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7161 +++ linux-3.0.3/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7162 @@ -12,16 +12,18 @@
7163 #include <asm/system.h>
7164
7165 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7166 + typecheck(u32 *, uaddr); \
7167 asm volatile("1:\t" insn "\n" \
7168 "2:\t.section .fixup,\"ax\"\n" \
7169 "3:\tmov\t%3, %1\n" \
7170 "\tjmp\t2b\n" \
7171 "\t.previous\n" \
7172 _ASM_EXTABLE(1b, 3b) \
7173 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7174 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7175 : "i" (-EFAULT), "0" (oparg), "1" (0))
7176
7177 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7178 + typecheck(u32 *, uaddr); \
7179 asm volatile("1:\tmovl %2, %0\n" \
7180 "\tmovl\t%0, %3\n" \
7181 "\t" insn "\n" \
7182 @@ -34,7 +36,7 @@
7183 _ASM_EXTABLE(1b, 4b) \
7184 _ASM_EXTABLE(2b, 4b) \
7185 : "=&a" (oldval), "=&r" (ret), \
7186 - "+m" (*uaddr), "=&r" (tem) \
7187 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7188 : "r" (oparg), "i" (-EFAULT), "1" (0))
7189
7190 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7191 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7192
7193 switch (op) {
7194 case FUTEX_OP_SET:
7195 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7196 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7197 break;
7198 case FUTEX_OP_ADD:
7199 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7200 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7201 uaddr, oparg);
7202 break;
7203 case FUTEX_OP_OR:
7204 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7205 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7206 return -EFAULT;
7207
7208 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7209 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7210 "2:\t.section .fixup, \"ax\"\n"
7211 "3:\tmov %3, %0\n"
7212 "\tjmp 2b\n"
7213 "\t.previous\n"
7214 _ASM_EXTABLE(1b, 3b)
7215 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7216 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7217 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7218 : "memory"
7219 );
7220 diff -urNp linux-3.0.3/arch/x86/include/asm/hw_irq.h linux-3.0.3/arch/x86/include/asm/hw_irq.h
7221 --- linux-3.0.3/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7222 +++ linux-3.0.3/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7223 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7224 extern void enable_IO_APIC(void);
7225
7226 /* Statistics */
7227 -extern atomic_t irq_err_count;
7228 -extern atomic_t irq_mis_count;
7229 +extern atomic_unchecked_t irq_err_count;
7230 +extern atomic_unchecked_t irq_mis_count;
7231
7232 /* EISA */
7233 extern void eisa_set_level_irq(unsigned int irq);
7234 diff -urNp linux-3.0.3/arch/x86/include/asm/i387.h linux-3.0.3/arch/x86/include/asm/i387.h
7235 --- linux-3.0.3/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7236 +++ linux-3.0.3/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7237 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7238 {
7239 int err;
7240
7241 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7242 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7243 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7244 +#endif
7245 +
7246 /* See comment in fxsave() below. */
7247 #ifdef CONFIG_AS_FXSAVEQ
7248 asm volatile("1: fxrstorq %[fx]\n\t"
7249 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7250 {
7251 int err;
7252
7253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7254 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7255 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7256 +#endif
7257 +
7258 /*
7259 * Clear the bytes not touched by the fxsave and reserved
7260 * for the SW usage.
7261 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7262 #endif /* CONFIG_X86_64 */
7263
7264 /* We need a safe address that is cheap to find and that is already
7265 - in L1 during context switch. The best choices are unfortunately
7266 - different for UP and SMP */
7267 -#ifdef CONFIG_SMP
7268 -#define safe_address (__per_cpu_offset[0])
7269 -#else
7270 -#define safe_address (kstat_cpu(0).cpustat.user)
7271 -#endif
7272 + in L1 during context switch. */
7273 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7274
7275 /*
7276 * These must be called with preempt disabled
7277 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7278 struct thread_info *me = current_thread_info();
7279 preempt_disable();
7280 if (me->status & TS_USEDFPU)
7281 - __save_init_fpu(me->task);
7282 + __save_init_fpu(current);
7283 else
7284 clts();
7285 }
7286 diff -urNp linux-3.0.3/arch/x86/include/asm/io.h linux-3.0.3/arch/x86/include/asm/io.h
7287 --- linux-3.0.3/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7288 +++ linux-3.0.3/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7289 @@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7290
7291 #include <linux/vmalloc.h>
7292
7293 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7294 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7295 +{
7296 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7297 +}
7298 +
7299 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7300 +{
7301 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7302 +}
7303 +
7304 /*
7305 * Convert a virtual cached pointer to an uncached pointer
7306 */
7307 diff -urNp linux-3.0.3/arch/x86/include/asm/irqflags.h linux-3.0.3/arch/x86/include/asm/irqflags.h
7308 --- linux-3.0.3/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7309 +++ linux-3.0.3/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7310 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7311 sti; \
7312 sysexit
7313
7314 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7315 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7316 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7317 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7318 +
7319 #else
7320 #define INTERRUPT_RETURN iret
7321 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7322 diff -urNp linux-3.0.3/arch/x86/include/asm/kprobes.h linux-3.0.3/arch/x86/include/asm/kprobes.h
7323 --- linux-3.0.3/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7324 +++ linux-3.0.3/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7325 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7326 #define RELATIVEJUMP_SIZE 5
7327 #define RELATIVECALL_OPCODE 0xe8
7328 #define RELATIVE_ADDR_SIZE 4
7329 -#define MAX_STACK_SIZE 64
7330 -#define MIN_STACK_SIZE(ADDR) \
7331 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7332 - THREAD_SIZE - (unsigned long)(ADDR))) \
7333 - ? (MAX_STACK_SIZE) \
7334 - : (((unsigned long)current_thread_info()) + \
7335 - THREAD_SIZE - (unsigned long)(ADDR)))
7336 +#define MAX_STACK_SIZE 64UL
7337 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7338
7339 #define flush_insn_slot(p) do { } while (0)
7340
7341 diff -urNp linux-3.0.3/arch/x86/include/asm/kvm_host.h linux-3.0.3/arch/x86/include/asm/kvm_host.h
7342 --- linux-3.0.3/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7343 +++ linux-3.0.3/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7344 @@ -441,7 +441,7 @@ struct kvm_arch {
7345 unsigned int n_used_mmu_pages;
7346 unsigned int n_requested_mmu_pages;
7347 unsigned int n_max_mmu_pages;
7348 - atomic_t invlpg_counter;
7349 + atomic_unchecked_t invlpg_counter;
7350 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7351 /*
7352 * Hash table of struct kvm_mmu_page.
7353 @@ -619,7 +619,7 @@ struct kvm_x86_ops {
7354 enum x86_intercept_stage stage);
7355
7356 const struct trace_print_flags *exit_reasons_str;
7357 -};
7358 +} __do_const;
7359
7360 struct kvm_arch_async_pf {
7361 u32 token;
7362 diff -urNp linux-3.0.3/arch/x86/include/asm/local.h linux-3.0.3/arch/x86/include/asm/local.h
7363 --- linux-3.0.3/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7364 +++ linux-3.0.3/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7365 @@ -18,26 +18,58 @@ typedef struct {
7366
7367 static inline void local_inc(local_t *l)
7368 {
7369 - asm volatile(_ASM_INC "%0"
7370 + asm volatile(_ASM_INC "%0\n"
7371 +
7372 +#ifdef CONFIG_PAX_REFCOUNT
7373 + "jno 0f\n"
7374 + _ASM_DEC "%0\n"
7375 + "int $4\n0:\n"
7376 + _ASM_EXTABLE(0b, 0b)
7377 +#endif
7378 +
7379 : "+m" (l->a.counter));
7380 }
7381
7382 static inline void local_dec(local_t *l)
7383 {
7384 - asm volatile(_ASM_DEC "%0"
7385 + asm volatile(_ASM_DEC "%0\n"
7386 +
7387 +#ifdef CONFIG_PAX_REFCOUNT
7388 + "jno 0f\n"
7389 + _ASM_INC "%0\n"
7390 + "int $4\n0:\n"
7391 + _ASM_EXTABLE(0b, 0b)
7392 +#endif
7393 +
7394 : "+m" (l->a.counter));
7395 }
7396
7397 static inline void local_add(long i, local_t *l)
7398 {
7399 - asm volatile(_ASM_ADD "%1,%0"
7400 + asm volatile(_ASM_ADD "%1,%0\n"
7401 +
7402 +#ifdef CONFIG_PAX_REFCOUNT
7403 + "jno 0f\n"
7404 + _ASM_SUB "%1,%0\n"
7405 + "int $4\n0:\n"
7406 + _ASM_EXTABLE(0b, 0b)
7407 +#endif
7408 +
7409 : "+m" (l->a.counter)
7410 : "ir" (i));
7411 }
7412
7413 static inline void local_sub(long i, local_t *l)
7414 {
7415 - asm volatile(_ASM_SUB "%1,%0"
7416 + asm volatile(_ASM_SUB "%1,%0\n"
7417 +
7418 +#ifdef CONFIG_PAX_REFCOUNT
7419 + "jno 0f\n"
7420 + _ASM_ADD "%1,%0\n"
7421 + "int $4\n0:\n"
7422 + _ASM_EXTABLE(0b, 0b)
7423 +#endif
7424 +
7425 : "+m" (l->a.counter)
7426 : "ir" (i));
7427 }
7428 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7429 {
7430 unsigned char c;
7431
7432 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7433 + asm volatile(_ASM_SUB "%2,%0\n"
7434 +
7435 +#ifdef CONFIG_PAX_REFCOUNT
7436 + "jno 0f\n"
7437 + _ASM_ADD "%2,%0\n"
7438 + "int $4\n0:\n"
7439 + _ASM_EXTABLE(0b, 0b)
7440 +#endif
7441 +
7442 + "sete %1\n"
7443 : "+m" (l->a.counter), "=qm" (c)
7444 : "ir" (i) : "memory");
7445 return c;
7446 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7447 {
7448 unsigned char c;
7449
7450 - asm volatile(_ASM_DEC "%0; sete %1"
7451 + asm volatile(_ASM_DEC "%0\n"
7452 +
7453 +#ifdef CONFIG_PAX_REFCOUNT
7454 + "jno 0f\n"
7455 + _ASM_INC "%0\n"
7456 + "int $4\n0:\n"
7457 + _ASM_EXTABLE(0b, 0b)
7458 +#endif
7459 +
7460 + "sete %1\n"
7461 : "+m" (l->a.counter), "=qm" (c)
7462 : : "memory");
7463 return c != 0;
7464 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7465 {
7466 unsigned char c;
7467
7468 - asm volatile(_ASM_INC "%0; sete %1"
7469 + asm volatile(_ASM_INC "%0\n"
7470 +
7471 +#ifdef CONFIG_PAX_REFCOUNT
7472 + "jno 0f\n"
7473 + _ASM_DEC "%0\n"
7474 + "int $4\n0:\n"
7475 + _ASM_EXTABLE(0b, 0b)
7476 +#endif
7477 +
7478 + "sete %1\n"
7479 : "+m" (l->a.counter), "=qm" (c)
7480 : : "memory");
7481 return c != 0;
7482 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7483 {
7484 unsigned char c;
7485
7486 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7487 + asm volatile(_ASM_ADD "%2,%0\n"
7488 +
7489 +#ifdef CONFIG_PAX_REFCOUNT
7490 + "jno 0f\n"
7491 + _ASM_SUB "%2,%0\n"
7492 + "int $4\n0:\n"
7493 + _ASM_EXTABLE(0b, 0b)
7494 +#endif
7495 +
7496 + "sets %1\n"
7497 : "+m" (l->a.counter), "=qm" (c)
7498 : "ir" (i) : "memory");
7499 return c;
7500 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7501 #endif
7502 /* Modern 486+ processor */
7503 __i = i;
7504 - asm volatile(_ASM_XADD "%0, %1;"
7505 + asm volatile(_ASM_XADD "%0, %1\n"
7506 +
7507 +#ifdef CONFIG_PAX_REFCOUNT
7508 + "jno 0f\n"
7509 + _ASM_MOV "%0,%1\n"
7510 + "int $4\n0:\n"
7511 + _ASM_EXTABLE(0b, 0b)
7512 +#endif
7513 +
7514 : "+r" (i), "+m" (l->a.counter)
7515 : : "memory");
7516 return i + __i;
7517 diff -urNp linux-3.0.3/arch/x86/include/asm/mman.h linux-3.0.3/arch/x86/include/asm/mman.h
7518 --- linux-3.0.3/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7519 +++ linux-3.0.3/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7520 @@ -5,4 +5,14 @@
7521
7522 #include <asm-generic/mman.h>
7523
7524 +#ifdef __KERNEL__
7525 +#ifndef __ASSEMBLY__
7526 +#ifdef CONFIG_X86_32
7527 +#define arch_mmap_check i386_mmap_check
7528 +int i386_mmap_check(unsigned long addr, unsigned long len,
7529 + unsigned long flags);
7530 +#endif
7531 +#endif
7532 +#endif
7533 +
7534 #endif /* _ASM_X86_MMAN_H */
7535 diff -urNp linux-3.0.3/arch/x86/include/asm/mmu_context.h linux-3.0.3/arch/x86/include/asm/mmu_context.h
7536 --- linux-3.0.3/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7537 +++ linux-3.0.3/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7538 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7539
7540 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7541 {
7542 +
7543 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7544 + unsigned int i;
7545 + pgd_t *pgd;
7546 +
7547 + pax_open_kernel();
7548 + pgd = get_cpu_pgd(smp_processor_id());
7549 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7550 + set_pgd_batched(pgd+i, native_make_pgd(0));
7551 + pax_close_kernel();
7552 +#endif
7553 +
7554 #ifdef CONFIG_SMP
7555 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7556 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7557 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7558 struct task_struct *tsk)
7559 {
7560 unsigned cpu = smp_processor_id();
7561 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7562 + int tlbstate = TLBSTATE_OK;
7563 +#endif
7564
7565 if (likely(prev != next)) {
7566 #ifdef CONFIG_SMP
7567 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7568 + tlbstate = percpu_read(cpu_tlbstate.state);
7569 +#endif
7570 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7571 percpu_write(cpu_tlbstate.active_mm, next);
7572 #endif
7573 cpumask_set_cpu(cpu, mm_cpumask(next));
7574
7575 /* Re-load page tables */
7576 +#ifdef CONFIG_PAX_PER_CPU_PGD
7577 + pax_open_kernel();
7578 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7579 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7580 + pax_close_kernel();
7581 + load_cr3(get_cpu_pgd(cpu));
7582 +#else
7583 load_cr3(next->pgd);
7584 +#endif
7585
7586 /* stop flush ipis for the previous mm */
7587 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7588 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7589 */
7590 if (unlikely(prev->context.ldt != next->context.ldt))
7591 load_LDT_nolock(&next->context);
7592 - }
7593 +
7594 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7595 + if (!(__supported_pte_mask & _PAGE_NX)) {
7596 + smp_mb__before_clear_bit();
7597 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7598 + smp_mb__after_clear_bit();
7599 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7600 + }
7601 +#endif
7602 +
7603 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7604 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7605 + prev->context.user_cs_limit != next->context.user_cs_limit))
7606 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7607 #ifdef CONFIG_SMP
7608 + else if (unlikely(tlbstate != TLBSTATE_OK))
7609 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7610 +#endif
7611 +#endif
7612 +
7613 + }
7614 else {
7615 +
7616 +#ifdef CONFIG_PAX_PER_CPU_PGD
7617 + pax_open_kernel();
7618 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7619 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7620 + pax_close_kernel();
7621 + load_cr3(get_cpu_pgd(cpu));
7622 +#endif
7623 +
7624 +#ifdef CONFIG_SMP
7625 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7626 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7627
7628 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7629 * tlb flush IPI delivery. We must reload CR3
7630 * to make sure to use no freed page tables.
7631 */
7632 +
7633 +#ifndef CONFIG_PAX_PER_CPU_PGD
7634 load_cr3(next->pgd);
7635 +#endif
7636 +
7637 load_LDT_nolock(&next->context);
7638 +
7639 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7640 + if (!(__supported_pte_mask & _PAGE_NX))
7641 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7642 +#endif
7643 +
7644 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7645 +#ifdef CONFIG_PAX_PAGEEXEC
7646 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7647 +#endif
7648 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7649 +#endif
7650 +
7651 }
7652 - }
7653 #endif
7654 + }
7655 }
7656
7657 #define activate_mm(prev, next) \
7658 diff -urNp linux-3.0.3/arch/x86/include/asm/mmu.h linux-3.0.3/arch/x86/include/asm/mmu.h
7659 --- linux-3.0.3/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7660 +++ linux-3.0.3/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7661 @@ -9,7 +9,7 @@
7662 * we put the segment information here.
7663 */
7664 typedef struct {
7665 - void *ldt;
7666 + struct desc_struct *ldt;
7667 int size;
7668
7669 #ifdef CONFIG_X86_64
7670 @@ -18,7 +18,19 @@ typedef struct {
7671 #endif
7672
7673 struct mutex lock;
7674 - void *vdso;
7675 + unsigned long vdso;
7676 +
7677 +#ifdef CONFIG_X86_32
7678 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7679 + unsigned long user_cs_base;
7680 + unsigned long user_cs_limit;
7681 +
7682 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7683 + cpumask_t cpu_user_cs_mask;
7684 +#endif
7685 +
7686 +#endif
7687 +#endif
7688 } mm_context_t;
7689
7690 #ifdef CONFIG_SMP
7691 diff -urNp linux-3.0.3/arch/x86/include/asm/module.h linux-3.0.3/arch/x86/include/asm/module.h
7692 --- linux-3.0.3/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7693 +++ linux-3.0.3/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7694 @@ -5,6 +5,7 @@
7695
7696 #ifdef CONFIG_X86_64
7697 /* X86_64 does not define MODULE_PROC_FAMILY */
7698 +#define MODULE_PROC_FAMILY ""
7699 #elif defined CONFIG_M386
7700 #define MODULE_PROC_FAMILY "386 "
7701 #elif defined CONFIG_M486
7702 @@ -59,8 +60,30 @@
7703 #error unknown processor family
7704 #endif
7705
7706 -#ifdef CONFIG_X86_32
7707 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7708 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7709 +#define MODULE_PAX_UDEREF "UDEREF "
7710 +#else
7711 +#define MODULE_PAX_UDEREF ""
7712 +#endif
7713 +
7714 +#ifdef CONFIG_PAX_KERNEXEC
7715 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7716 +#else
7717 +#define MODULE_PAX_KERNEXEC ""
7718 #endif
7719
7720 +#ifdef CONFIG_PAX_REFCOUNT
7721 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7722 +#else
7723 +#define MODULE_PAX_REFCOUNT ""
7724 +#endif
7725 +
7726 +#ifdef CONFIG_GRKERNSEC
7727 +#define MODULE_GRSEC "GRSECURITY "
7728 +#else
7729 +#define MODULE_GRSEC ""
7730 +#endif
7731 +
7732 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7733 +
7734 #endif /* _ASM_X86_MODULE_H */
7735 diff -urNp linux-3.0.3/arch/x86/include/asm/page_64_types.h linux-3.0.3/arch/x86/include/asm/page_64_types.h
7736 --- linux-3.0.3/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7737 +++ linux-3.0.3/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7738 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7739
7740 /* duplicated to the one in bootmem.h */
7741 extern unsigned long max_pfn;
7742 -extern unsigned long phys_base;
7743 +extern const unsigned long phys_base;
7744
7745 extern unsigned long __phys_addr(unsigned long);
7746 #define __phys_reloc_hide(x) (x)
7747 diff -urNp linux-3.0.3/arch/x86/include/asm/paravirt.h linux-3.0.3/arch/x86/include/asm/paravirt.h
7748 --- linux-3.0.3/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7749 +++ linux-3.0.3/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7750 @@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7751 val);
7752 }
7753
7754 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7755 +{
7756 + pgdval_t val = native_pgd_val(pgd);
7757 +
7758 + if (sizeof(pgdval_t) > sizeof(long))
7759 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7760 + val, (u64)val >> 32);
7761 + else
7762 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7763 + val);
7764 +}
7765 +
7766 static inline void pgd_clear(pgd_t *pgdp)
7767 {
7768 set_pgd(pgdp, __pgd(0));
7769 @@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7770 pv_mmu_ops.set_fixmap(idx, phys, flags);
7771 }
7772
7773 +#ifdef CONFIG_PAX_KERNEXEC
7774 +static inline unsigned long pax_open_kernel(void)
7775 +{
7776 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7777 +}
7778 +
7779 +static inline unsigned long pax_close_kernel(void)
7780 +{
7781 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7782 +}
7783 +#else
7784 +static inline unsigned long pax_open_kernel(void) { return 0; }
7785 +static inline unsigned long pax_close_kernel(void) { return 0; }
7786 +#endif
7787 +
7788 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7789
7790 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7791 @@ -955,7 +982,7 @@ extern void default_banner(void);
7792
7793 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7794 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7795 -#define PARA_INDIRECT(addr) *%cs:addr
7796 +#define PARA_INDIRECT(addr) *%ss:addr
7797 #endif
7798
7799 #define INTERRUPT_RETURN \
7800 @@ -1032,6 +1059,21 @@ extern void default_banner(void);
7801 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7802 CLBR_NONE, \
7803 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7804 +
7805 +#define GET_CR0_INTO_RDI \
7806 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7807 + mov %rax,%rdi
7808 +
7809 +#define SET_RDI_INTO_CR0 \
7810 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7811 +
7812 +#define GET_CR3_INTO_RDI \
7813 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7814 + mov %rax,%rdi
7815 +
7816 +#define SET_RDI_INTO_CR3 \
7817 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7818 +
7819 #endif /* CONFIG_X86_32 */
7820
7821 #endif /* __ASSEMBLY__ */
7822 diff -urNp linux-3.0.3/arch/x86/include/asm/paravirt_types.h linux-3.0.3/arch/x86/include/asm/paravirt_types.h
7823 --- linux-3.0.3/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7824 +++ linux-3.0.3/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7825 @@ -78,19 +78,19 @@ struct pv_init_ops {
7826 */
7827 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7828 unsigned long addr, unsigned len);
7829 -};
7830 +} __no_const;
7831
7832
7833 struct pv_lazy_ops {
7834 /* Set deferred update mode, used for batching operations. */
7835 void (*enter)(void);
7836 void (*leave)(void);
7837 -};
7838 +} __no_const;
7839
7840 struct pv_time_ops {
7841 unsigned long long (*sched_clock)(void);
7842 unsigned long (*get_tsc_khz)(void);
7843 -};
7844 +} __no_const;
7845
7846 struct pv_cpu_ops {
7847 /* hooks for various privileged instructions */
7848 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7849
7850 void (*start_context_switch)(struct task_struct *prev);
7851 void (*end_context_switch)(struct task_struct *next);
7852 -};
7853 +} __no_const;
7854
7855 struct pv_irq_ops {
7856 /*
7857 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7858 unsigned long start_eip,
7859 unsigned long start_esp);
7860 #endif
7861 -};
7862 +} __no_const;
7863
7864 struct pv_mmu_ops {
7865 unsigned long (*read_cr2)(void);
7866 @@ -306,6 +306,7 @@ struct pv_mmu_ops {
7867 struct paravirt_callee_save make_pud;
7868
7869 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7870 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7871 #endif /* PAGETABLE_LEVELS == 4 */
7872 #endif /* PAGETABLE_LEVELS >= 3 */
7873
7874 @@ -317,6 +318,12 @@ struct pv_mmu_ops {
7875 an mfn. We can tell which is which from the index. */
7876 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7877 phys_addr_t phys, pgprot_t flags);
7878 +
7879 +#ifdef CONFIG_PAX_KERNEXEC
7880 + unsigned long (*pax_open_kernel)(void);
7881 + unsigned long (*pax_close_kernel)(void);
7882 +#endif
7883 +
7884 };
7885
7886 struct arch_spinlock;
7887 @@ -327,7 +334,7 @@ struct pv_lock_ops {
7888 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7889 int (*spin_trylock)(struct arch_spinlock *lock);
7890 void (*spin_unlock)(struct arch_spinlock *lock);
7891 -};
7892 +} __no_const;
7893
7894 /* This contains all the paravirt structures: we get a convenient
7895 * number for each function using the offset which we use to indicate
7896 diff -urNp linux-3.0.3/arch/x86/include/asm/pgalloc.h linux-3.0.3/arch/x86/include/asm/pgalloc.h
7897 --- linux-3.0.3/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7898 +++ linux-3.0.3/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7899 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7900 pmd_t *pmd, pte_t *pte)
7901 {
7902 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7903 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7904 +}
7905 +
7906 +static inline void pmd_populate_user(struct mm_struct *mm,
7907 + pmd_t *pmd, pte_t *pte)
7908 +{
7909 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7910 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7911 }
7912
7913 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable-2level.h linux-3.0.3/arch/x86/include/asm/pgtable-2level.h
7914 --- linux-3.0.3/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7915 +++ linux-3.0.3/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7916 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7917
7918 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7919 {
7920 + pax_open_kernel();
7921 *pmdp = pmd;
7922 + pax_close_kernel();
7923 }
7924
7925 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7926 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_32.h linux-3.0.3/arch/x86/include/asm/pgtable_32.h
7927 --- linux-3.0.3/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7928 +++ linux-3.0.3/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7929 @@ -25,9 +25,6 @@
7930 struct mm_struct;
7931 struct vm_area_struct;
7932
7933 -extern pgd_t swapper_pg_dir[1024];
7934 -extern pgd_t initial_page_table[1024];
7935 -
7936 static inline void pgtable_cache_init(void) { }
7937 static inline void check_pgt_cache(void) { }
7938 void paging_init(void);
7939 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7940 # include <asm/pgtable-2level.h>
7941 #endif
7942
7943 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7944 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7945 +#ifdef CONFIG_X86_PAE
7946 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7947 +#endif
7948 +
7949 #if defined(CONFIG_HIGHPTE)
7950 #define pte_offset_map(dir, address) \
7951 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7952 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7953 /* Clear a kernel PTE and flush it from the TLB */
7954 #define kpte_clear_flush(ptep, vaddr) \
7955 do { \
7956 + pax_open_kernel(); \
7957 pte_clear(&init_mm, (vaddr), (ptep)); \
7958 + pax_close_kernel(); \
7959 __flush_tlb_one((vaddr)); \
7960 } while (0)
7961
7962 @@ -74,6 +79,9 @@ do { \
7963
7964 #endif /* !__ASSEMBLY__ */
7965
7966 +#define HAVE_ARCH_UNMAPPED_AREA
7967 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7968 +
7969 /*
7970 * kern_addr_valid() is (1) for FLATMEM and (0) for
7971 * SPARSEMEM and DISCONTIGMEM
7972 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h
7973 --- linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
7974 +++ linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
7975 @@ -8,7 +8,7 @@
7976 */
7977 #ifdef CONFIG_X86_PAE
7978 # include <asm/pgtable-3level_types.h>
7979 -# define PMD_SIZE (1UL << PMD_SHIFT)
7980 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7981 # define PMD_MASK (~(PMD_SIZE - 1))
7982 #else
7983 # include <asm/pgtable-2level_types.h>
7984 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7985 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7986 #endif
7987
7988 +#ifdef CONFIG_PAX_KERNEXEC
7989 +#ifndef __ASSEMBLY__
7990 +extern unsigned char MODULES_EXEC_VADDR[];
7991 +extern unsigned char MODULES_EXEC_END[];
7992 +#endif
7993 +#include <asm/boot.h>
7994 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7995 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7996 +#else
7997 +#define ktla_ktva(addr) (addr)
7998 +#define ktva_ktla(addr) (addr)
7999 +#endif
8000 +
8001 #define MODULES_VADDR VMALLOC_START
8002 #define MODULES_END VMALLOC_END
8003 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8004 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable-3level.h linux-3.0.3/arch/x86/include/asm/pgtable-3level.h
8005 --- linux-3.0.3/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8006 +++ linux-3.0.3/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8007 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8008
8009 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8010 {
8011 + pax_open_kernel();
8012 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8013 + pax_close_kernel();
8014 }
8015
8016 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8017 {
8018 + pax_open_kernel();
8019 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8020 + pax_close_kernel();
8021 }
8022
8023 /*
8024 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_64.h linux-3.0.3/arch/x86/include/asm/pgtable_64.h
8025 --- linux-3.0.3/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8026 +++ linux-3.0.3/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8027 @@ -16,10 +16,13 @@
8028
8029 extern pud_t level3_kernel_pgt[512];
8030 extern pud_t level3_ident_pgt[512];
8031 +extern pud_t level3_vmalloc_pgt[512];
8032 +extern pud_t level3_vmemmap_pgt[512];
8033 +extern pud_t level2_vmemmap_pgt[512];
8034 extern pmd_t level2_kernel_pgt[512];
8035 extern pmd_t level2_fixmap_pgt[512];
8036 -extern pmd_t level2_ident_pgt[512];
8037 -extern pgd_t init_level4_pgt[];
8038 +extern pmd_t level2_ident_pgt[512*2];
8039 +extern pgd_t init_level4_pgt[512];
8040
8041 #define swapper_pg_dir init_level4_pgt
8042
8043 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8044
8045 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8046 {
8047 + pax_open_kernel();
8048 *pmdp = pmd;
8049 + pax_close_kernel();
8050 }
8051
8052 static inline void native_pmd_clear(pmd_t *pmd)
8053 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8054
8055 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8056 {
8057 + pax_open_kernel();
8058 + *pgdp = pgd;
8059 + pax_close_kernel();
8060 +}
8061 +
8062 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8063 +{
8064 *pgdp = pgd;
8065 }
8066
8067 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h
8068 --- linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8069 +++ linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8070 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8071 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8072 #define MODULES_END _AC(0xffffffffff000000, UL)
8073 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8074 +#define MODULES_EXEC_VADDR MODULES_VADDR
8075 +#define MODULES_EXEC_END MODULES_END
8076 +
8077 +#define ktla_ktva(addr) (addr)
8078 +#define ktva_ktla(addr) (addr)
8079
8080 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8081 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable.h linux-3.0.3/arch/x86/include/asm/pgtable.h
8082 --- linux-3.0.3/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8083 +++ linux-3.0.3/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8084 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8085
8086 #ifndef __PAGETABLE_PUD_FOLDED
8087 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8088 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8089 #define pgd_clear(pgd) native_pgd_clear(pgd)
8090 #endif
8091
8092 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8093
8094 #define arch_end_context_switch(prev) do {} while(0)
8095
8096 +#define pax_open_kernel() native_pax_open_kernel()
8097 +#define pax_close_kernel() native_pax_close_kernel()
8098 #endif /* CONFIG_PARAVIRT */
8099
8100 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8101 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8102 +
8103 +#ifdef CONFIG_PAX_KERNEXEC
8104 +static inline unsigned long native_pax_open_kernel(void)
8105 +{
8106 + unsigned long cr0;
8107 +
8108 + preempt_disable();
8109 + barrier();
8110 + cr0 = read_cr0() ^ X86_CR0_WP;
8111 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8112 + write_cr0(cr0);
8113 + return cr0 ^ X86_CR0_WP;
8114 +}
8115 +
8116 +static inline unsigned long native_pax_close_kernel(void)
8117 +{
8118 + unsigned long cr0;
8119 +
8120 + cr0 = read_cr0() ^ X86_CR0_WP;
8121 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8122 + write_cr0(cr0);
8123 + barrier();
8124 + preempt_enable_no_resched();
8125 + return cr0 ^ X86_CR0_WP;
8126 +}
8127 +#else
8128 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8129 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8130 +#endif
8131 +
8132 /*
8133 * The following only work if pte_present() is true.
8134 * Undefined behaviour if not..
8135 */
8136 +static inline int pte_user(pte_t pte)
8137 +{
8138 + return pte_val(pte) & _PAGE_USER;
8139 +}
8140 +
8141 static inline int pte_dirty(pte_t pte)
8142 {
8143 return pte_flags(pte) & _PAGE_DIRTY;
8144 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8145 return pte_clear_flags(pte, _PAGE_RW);
8146 }
8147
8148 +static inline pte_t pte_mkread(pte_t pte)
8149 +{
8150 + return __pte(pte_val(pte) | _PAGE_USER);
8151 +}
8152 +
8153 static inline pte_t pte_mkexec(pte_t pte)
8154 {
8155 - return pte_clear_flags(pte, _PAGE_NX);
8156 +#ifdef CONFIG_X86_PAE
8157 + if (__supported_pte_mask & _PAGE_NX)
8158 + return pte_clear_flags(pte, _PAGE_NX);
8159 + else
8160 +#endif
8161 + return pte_set_flags(pte, _PAGE_USER);
8162 +}
8163 +
8164 +static inline pte_t pte_exprotect(pte_t pte)
8165 +{
8166 +#ifdef CONFIG_X86_PAE
8167 + if (__supported_pte_mask & _PAGE_NX)
8168 + return pte_set_flags(pte, _PAGE_NX);
8169 + else
8170 +#endif
8171 + return pte_clear_flags(pte, _PAGE_USER);
8172 }
8173
8174 static inline pte_t pte_mkdirty(pte_t pte)
8175 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8176 #endif
8177
8178 #ifndef __ASSEMBLY__
8179 +
8180 +#ifdef CONFIG_PAX_PER_CPU_PGD
8181 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8182 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8183 +{
8184 + return cpu_pgd[cpu];
8185 +}
8186 +#endif
8187 +
8188 #include <linux/mm_types.h>
8189
8190 static inline int pte_none(pte_t pte)
8191 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8192
8193 static inline int pgd_bad(pgd_t pgd)
8194 {
8195 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8196 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8197 }
8198
8199 static inline int pgd_none(pgd_t pgd)
8200 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8201 * pgd_offset() returns a (pgd_t *)
8202 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8203 */
8204 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8205 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8206 +
8207 +#ifdef CONFIG_PAX_PER_CPU_PGD
8208 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8209 +#endif
8210 +
8211 /*
8212 * a shortcut which implies the use of the kernel's pgd, instead
8213 * of a process's
8214 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8215 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8216 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8217
8218 +#ifdef CONFIG_X86_32
8219 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8220 +#else
8221 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8222 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8223 +
8224 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8225 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8226 +#else
8227 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8228 +#endif
8229 +
8230 +#endif
8231 +
8232 #ifndef __ASSEMBLY__
8233
8234 extern int direct_gbpages;
8235 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8236 * dst and src can be on the same page, but the range must not overlap,
8237 * and must not cross a page boundary.
8238 */
8239 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8240 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8241 {
8242 - memcpy(dst, src, count * sizeof(pgd_t));
8243 + pax_open_kernel();
8244 + while (count--)
8245 + *dst++ = *src++;
8246 + pax_close_kernel();
8247 }
8248
8249 +#ifdef CONFIG_PAX_PER_CPU_PGD
8250 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8251 +#endif
8252 +
8253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8254 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8255 +#else
8256 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8257 +#endif
8258
8259 #include <asm-generic/pgtable.h>
8260 #endif /* __ASSEMBLY__ */
8261 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_types.h linux-3.0.3/arch/x86/include/asm/pgtable_types.h
8262 --- linux-3.0.3/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8263 +++ linux-3.0.3/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8264 @@ -16,13 +16,12 @@
8265 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8266 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8267 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8268 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8269 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8270 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8271 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8272 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8273 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8274 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8275 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8276 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8277 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8278 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8279
8280 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8281 @@ -40,7 +39,6 @@
8282 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8283 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8284 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8285 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8286 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8287 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8288 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8289 @@ -57,8 +55,10 @@
8290
8291 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8292 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8293 -#else
8294 +#elif defined(CONFIG_KMEMCHECK)
8295 #define _PAGE_NX (_AT(pteval_t, 0))
8296 +#else
8297 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8298 #endif
8299
8300 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8301 @@ -96,6 +96,9 @@
8302 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8303 _PAGE_ACCESSED)
8304
8305 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8306 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8307 +
8308 #define __PAGE_KERNEL_EXEC \
8309 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8310 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8311 @@ -106,8 +109,8 @@
8312 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8313 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8314 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8315 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8316 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8317 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8318 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8319 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8320 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8321 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8322 @@ -166,8 +169,8 @@
8323 * bits are combined, this will alow user to access the high address mapped
8324 * VDSO in the presence of CONFIG_COMPAT_VDSO
8325 */
8326 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8327 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8328 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8329 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8330 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8331 #endif
8332
8333 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8334 {
8335 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8336 }
8337 +#endif
8338
8339 +#if PAGETABLE_LEVELS == 3
8340 +#include <asm-generic/pgtable-nopud.h>
8341 +#endif
8342 +
8343 +#if PAGETABLE_LEVELS == 2
8344 +#include <asm-generic/pgtable-nopmd.h>
8345 +#endif
8346 +
8347 +#ifndef __ASSEMBLY__
8348 #if PAGETABLE_LEVELS > 3
8349 typedef struct { pudval_t pud; } pud_t;
8350
8351 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8352 return pud.pud;
8353 }
8354 #else
8355 -#include <asm-generic/pgtable-nopud.h>
8356 -
8357 static inline pudval_t native_pud_val(pud_t pud)
8358 {
8359 return native_pgd_val(pud.pgd);
8360 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8361 return pmd.pmd;
8362 }
8363 #else
8364 -#include <asm-generic/pgtable-nopmd.h>
8365 -
8366 static inline pmdval_t native_pmd_val(pmd_t pmd)
8367 {
8368 return native_pgd_val(pmd.pud.pgd);
8369 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8370
8371 extern pteval_t __supported_pte_mask;
8372 extern void set_nx(void);
8373 -extern int nx_enabled;
8374
8375 #define pgprot_writecombine pgprot_writecombine
8376 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8377 diff -urNp linux-3.0.3/arch/x86/include/asm/processor.h linux-3.0.3/arch/x86/include/asm/processor.h
8378 --- linux-3.0.3/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8379 +++ linux-3.0.3/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8380 @@ -266,7 +266,7 @@ struct tss_struct {
8381
8382 } ____cacheline_aligned;
8383
8384 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8385 +extern struct tss_struct init_tss[NR_CPUS];
8386
8387 /*
8388 * Save the original ist values for checking stack pointers during debugging
8389 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8390 */
8391 #define TASK_SIZE PAGE_OFFSET
8392 #define TASK_SIZE_MAX TASK_SIZE
8393 +
8394 +#ifdef CONFIG_PAX_SEGMEXEC
8395 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8396 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8397 +#else
8398 #define STACK_TOP TASK_SIZE
8399 -#define STACK_TOP_MAX STACK_TOP
8400 +#endif
8401 +
8402 +#define STACK_TOP_MAX TASK_SIZE
8403
8404 #define INIT_THREAD { \
8405 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8406 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8407 .vm86_info = NULL, \
8408 .sysenter_cs = __KERNEL_CS, \
8409 .io_bitmap_ptr = NULL, \
8410 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8411 */
8412 #define INIT_TSS { \
8413 .x86_tss = { \
8414 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8415 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8416 .ss0 = __KERNEL_DS, \
8417 .ss1 = __KERNEL_CS, \
8418 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8419 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8420 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8421
8422 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8423 -#define KSTK_TOP(info) \
8424 -({ \
8425 - unsigned long *__ptr = (unsigned long *)(info); \
8426 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8427 -})
8428 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8429
8430 /*
8431 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8432 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8433 #define task_pt_regs(task) \
8434 ({ \
8435 struct pt_regs *__regs__; \
8436 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8437 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8438 __regs__ - 1; \
8439 })
8440
8441 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8442 /*
8443 * User space process size. 47bits minus one guard page.
8444 */
8445 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8446 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8447
8448 /* This decides where the kernel will search for a free chunk of vm
8449 * space during mmap's.
8450 */
8451 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8452 - 0xc0000000 : 0xFFFFe000)
8453 + 0xc0000000 : 0xFFFFf000)
8454
8455 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8456 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8457 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8458 #define STACK_TOP_MAX TASK_SIZE_MAX
8459
8460 #define INIT_THREAD { \
8461 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8462 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8463 }
8464
8465 #define INIT_TSS { \
8466 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8467 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8468 }
8469
8470 /*
8471 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8472 */
8473 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8474
8475 +#ifdef CONFIG_PAX_SEGMEXEC
8476 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8477 +#endif
8478 +
8479 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8480
8481 /* Get/set a process' ability to use the timestamp counter instruction */
8482 diff -urNp linux-3.0.3/arch/x86/include/asm/ptrace.h linux-3.0.3/arch/x86/include/asm/ptrace.h
8483 --- linux-3.0.3/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8484 +++ linux-3.0.3/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8485 @@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8486 }
8487
8488 /*
8489 - * user_mode_vm(regs) determines whether a register set came from user mode.
8490 + * user_mode(regs) determines whether a register set came from user mode.
8491 * This is true if V8086 mode was enabled OR if the register set was from
8492 * protected mode with RPL-3 CS value. This tricky test checks that with
8493 * one comparison. Many places in the kernel can bypass this full check
8494 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8495 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8496 + * be used.
8497 */
8498 -static inline int user_mode(struct pt_regs *regs)
8499 +static inline int user_mode_novm(struct pt_regs *regs)
8500 {
8501 #ifdef CONFIG_X86_32
8502 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8503 #else
8504 - return !!(regs->cs & 3);
8505 + return !!(regs->cs & SEGMENT_RPL_MASK);
8506 #endif
8507 }
8508
8509 -static inline int user_mode_vm(struct pt_regs *regs)
8510 +static inline int user_mode(struct pt_regs *regs)
8511 {
8512 #ifdef CONFIG_X86_32
8513 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8514 USER_RPL;
8515 #else
8516 - return user_mode(regs);
8517 + return user_mode_novm(regs);
8518 #endif
8519 }
8520
8521 diff -urNp linux-3.0.3/arch/x86/include/asm/reboot.h linux-3.0.3/arch/x86/include/asm/reboot.h
8522 --- linux-3.0.3/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8523 +++ linux-3.0.3/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8524 @@ -6,19 +6,19 @@
8525 struct pt_regs;
8526
8527 struct machine_ops {
8528 - void (*restart)(char *cmd);
8529 - void (*halt)(void);
8530 - void (*power_off)(void);
8531 + void (* __noreturn restart)(char *cmd);
8532 + void (* __noreturn halt)(void);
8533 + void (* __noreturn power_off)(void);
8534 void (*shutdown)(void);
8535 void (*crash_shutdown)(struct pt_regs *);
8536 - void (*emergency_restart)(void);
8537 -};
8538 + void (* __noreturn emergency_restart)(void);
8539 +} __no_const;
8540
8541 extern struct machine_ops machine_ops;
8542
8543 void native_machine_crash_shutdown(struct pt_regs *regs);
8544 void native_machine_shutdown(void);
8545 -void machine_real_restart(unsigned int type);
8546 +void machine_real_restart(unsigned int type) __noreturn;
8547 /* These must match dispatch_table in reboot_32.S */
8548 #define MRR_BIOS 0
8549 #define MRR_APM 1
8550 diff -urNp linux-3.0.3/arch/x86/include/asm/rwsem.h linux-3.0.3/arch/x86/include/asm/rwsem.h
8551 --- linux-3.0.3/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8552 +++ linux-3.0.3/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8553 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8554 {
8555 asm volatile("# beginning down_read\n\t"
8556 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8557 +
8558 +#ifdef CONFIG_PAX_REFCOUNT
8559 + "jno 0f\n"
8560 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8561 + "int $4\n0:\n"
8562 + _ASM_EXTABLE(0b, 0b)
8563 +#endif
8564 +
8565 /* adds 0x00000001 */
8566 " jns 1f\n"
8567 " call call_rwsem_down_read_failed\n"
8568 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8569 "1:\n\t"
8570 " mov %1,%2\n\t"
8571 " add %3,%2\n\t"
8572 +
8573 +#ifdef CONFIG_PAX_REFCOUNT
8574 + "jno 0f\n"
8575 + "sub %3,%2\n"
8576 + "int $4\n0:\n"
8577 + _ASM_EXTABLE(0b, 0b)
8578 +#endif
8579 +
8580 " jle 2f\n\t"
8581 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8582 " jnz 1b\n\t"
8583 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8584 long tmp;
8585 asm volatile("# beginning down_write\n\t"
8586 LOCK_PREFIX " xadd %1,(%2)\n\t"
8587 +
8588 +#ifdef CONFIG_PAX_REFCOUNT
8589 + "jno 0f\n"
8590 + "mov %1,(%2)\n"
8591 + "int $4\n0:\n"
8592 + _ASM_EXTABLE(0b, 0b)
8593 +#endif
8594 +
8595 /* adds 0xffff0001, returns the old value */
8596 " test %1,%1\n\t"
8597 /* was the count 0 before? */
8598 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8599 long tmp;
8600 asm volatile("# beginning __up_read\n\t"
8601 LOCK_PREFIX " xadd %1,(%2)\n\t"
8602 +
8603 +#ifdef CONFIG_PAX_REFCOUNT
8604 + "jno 0f\n"
8605 + "mov %1,(%2)\n"
8606 + "int $4\n0:\n"
8607 + _ASM_EXTABLE(0b, 0b)
8608 +#endif
8609 +
8610 /* subtracts 1, returns the old value */
8611 " jns 1f\n\t"
8612 " call call_rwsem_wake\n" /* expects old value in %edx */
8613 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8614 long tmp;
8615 asm volatile("# beginning __up_write\n\t"
8616 LOCK_PREFIX " xadd %1,(%2)\n\t"
8617 +
8618 +#ifdef CONFIG_PAX_REFCOUNT
8619 + "jno 0f\n"
8620 + "mov %1,(%2)\n"
8621 + "int $4\n0:\n"
8622 + _ASM_EXTABLE(0b, 0b)
8623 +#endif
8624 +
8625 /* subtracts 0xffff0001, returns the old value */
8626 " jns 1f\n\t"
8627 " call call_rwsem_wake\n" /* expects old value in %edx */
8628 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8629 {
8630 asm volatile("# beginning __downgrade_write\n\t"
8631 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8632 +
8633 +#ifdef CONFIG_PAX_REFCOUNT
8634 + "jno 0f\n"
8635 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8636 + "int $4\n0:\n"
8637 + _ASM_EXTABLE(0b, 0b)
8638 +#endif
8639 +
8640 /*
8641 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8642 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8643 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8644 */
8645 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8646 {
8647 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8648 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8649 +
8650 +#ifdef CONFIG_PAX_REFCOUNT
8651 + "jno 0f\n"
8652 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8653 + "int $4\n0:\n"
8654 + _ASM_EXTABLE(0b, 0b)
8655 +#endif
8656 +
8657 : "+m" (sem->count)
8658 : "er" (delta));
8659 }
8660 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8661 {
8662 long tmp = delta;
8663
8664 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8665 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8666 +
8667 +#ifdef CONFIG_PAX_REFCOUNT
8668 + "jno 0f\n"
8669 + "mov %0,%1\n"
8670 + "int $4\n0:\n"
8671 + _ASM_EXTABLE(0b, 0b)
8672 +#endif
8673 +
8674 : "+r" (tmp), "+m" (sem->count)
8675 : : "memory");
8676
8677 diff -urNp linux-3.0.3/arch/x86/include/asm/segment.h linux-3.0.3/arch/x86/include/asm/segment.h
8678 --- linux-3.0.3/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8679 +++ linux-3.0.3/arch/x86/include/asm/segment.h 2011-08-23 21:47:55.000000000 -0400
8680 @@ -64,8 +64,8 @@
8681 * 26 - ESPFIX small SS
8682 * 27 - per-cpu [ offset to per-cpu data area ]
8683 * 28 - stack_canary-20 [ for stack protector ]
8684 - * 29 - unused
8685 - * 30 - unused
8686 + * 29 - PCI BIOS CS
8687 + * 30 - PCI BIOS DS
8688 * 31 - TSS for double fault handler
8689 */
8690 #define GDT_ENTRY_TLS_MIN 6
8691 @@ -79,6 +79,8 @@
8692
8693 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8694
8695 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8696 +
8697 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8698
8699 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8700 @@ -104,6 +106,12 @@
8701 #define __KERNEL_STACK_CANARY 0
8702 #endif
8703
8704 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8705 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8706 +
8707 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8708 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8709 +
8710 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8711
8712 /*
8713 @@ -141,7 +149,7 @@
8714 */
8715
8716 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8717 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8718 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8719
8720
8721 #else
8722 @@ -165,6 +173,8 @@
8723 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8724 #define __USER32_DS __USER_DS
8725
8726 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8727 +
8728 #define GDT_ENTRY_TSS 8 /* needs two entries */
8729 #define GDT_ENTRY_LDT 10 /* needs two entries */
8730 #define GDT_ENTRY_TLS_MIN 12
8731 @@ -185,6 +195,7 @@
8732 #endif
8733
8734 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8735 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8736 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8737 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8738 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8739 diff -urNp linux-3.0.3/arch/x86/include/asm/smp.h linux-3.0.3/arch/x86/include/asm/smp.h
8740 --- linux-3.0.3/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8741 +++ linux-3.0.3/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8742 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8743 /* cpus sharing the last level cache: */
8744 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8745 DECLARE_PER_CPU(u16, cpu_llc_id);
8746 -DECLARE_PER_CPU(int, cpu_number);
8747 +DECLARE_PER_CPU(unsigned int, cpu_number);
8748
8749 static inline struct cpumask *cpu_sibling_mask(int cpu)
8750 {
8751 @@ -77,7 +77,7 @@ struct smp_ops {
8752
8753 void (*send_call_func_ipi)(const struct cpumask *mask);
8754 void (*send_call_func_single_ipi)(int cpu);
8755 -};
8756 +} __no_const;
8757
8758 /* Globals due to paravirt */
8759 extern void set_cpu_sibling_map(int cpu);
8760 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8761 extern int safe_smp_processor_id(void);
8762
8763 #elif defined(CONFIG_X86_64_SMP)
8764 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8765 -
8766 -#define stack_smp_processor_id() \
8767 -({ \
8768 - struct thread_info *ti; \
8769 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8770 - ti->cpu; \
8771 -})
8772 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8773 +#define stack_smp_processor_id() raw_smp_processor_id()
8774 #define safe_smp_processor_id() smp_processor_id()
8775
8776 #endif
8777 diff -urNp linux-3.0.3/arch/x86/include/asm/spinlock.h linux-3.0.3/arch/x86/include/asm/spinlock.h
8778 --- linux-3.0.3/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8779 +++ linux-3.0.3/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8780 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8781 static inline void arch_read_lock(arch_rwlock_t *rw)
8782 {
8783 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8784 +
8785 +#ifdef CONFIG_PAX_REFCOUNT
8786 + "jno 0f\n"
8787 + LOCK_PREFIX " addl $1,(%0)\n"
8788 + "int $4\n0:\n"
8789 + _ASM_EXTABLE(0b, 0b)
8790 +#endif
8791 +
8792 "jns 1f\n"
8793 "call __read_lock_failed\n\t"
8794 "1:\n"
8795 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8796 static inline void arch_write_lock(arch_rwlock_t *rw)
8797 {
8798 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8799 +
8800 +#ifdef CONFIG_PAX_REFCOUNT
8801 + "jno 0f\n"
8802 + LOCK_PREFIX " addl %1,(%0)\n"
8803 + "int $4\n0:\n"
8804 + _ASM_EXTABLE(0b, 0b)
8805 +#endif
8806 +
8807 "jz 1f\n"
8808 "call __write_lock_failed\n\t"
8809 "1:\n"
8810 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8811
8812 static inline void arch_read_unlock(arch_rwlock_t *rw)
8813 {
8814 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8815 + asm volatile(LOCK_PREFIX "incl %0\n"
8816 +
8817 +#ifdef CONFIG_PAX_REFCOUNT
8818 + "jno 0f\n"
8819 + LOCK_PREFIX "decl %0\n"
8820 + "int $4\n0:\n"
8821 + _ASM_EXTABLE(0b, 0b)
8822 +#endif
8823 +
8824 + :"+m" (rw->lock) : : "memory");
8825 }
8826
8827 static inline void arch_write_unlock(arch_rwlock_t *rw)
8828 {
8829 - asm volatile(LOCK_PREFIX "addl %1, %0"
8830 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8831 +
8832 +#ifdef CONFIG_PAX_REFCOUNT
8833 + "jno 0f\n"
8834 + LOCK_PREFIX "subl %1, %0\n"
8835 + "int $4\n0:\n"
8836 + _ASM_EXTABLE(0b, 0b)
8837 +#endif
8838 +
8839 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8840 }
8841
8842 diff -urNp linux-3.0.3/arch/x86/include/asm/stackprotector.h linux-3.0.3/arch/x86/include/asm/stackprotector.h
8843 --- linux-3.0.3/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8844 +++ linux-3.0.3/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8845 @@ -48,7 +48,7 @@
8846 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8847 */
8848 #define GDT_STACK_CANARY_INIT \
8849 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8850 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8851
8852 /*
8853 * Initialize the stackprotector canary value.
8854 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8855
8856 static inline void load_stack_canary_segment(void)
8857 {
8858 -#ifdef CONFIG_X86_32
8859 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8860 asm volatile ("mov %0, %%gs" : : "r" (0));
8861 #endif
8862 }
8863 diff -urNp linux-3.0.3/arch/x86/include/asm/stacktrace.h linux-3.0.3/arch/x86/include/asm/stacktrace.h
8864 --- linux-3.0.3/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8865 +++ linux-3.0.3/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8866 @@ -11,28 +11,20 @@
8867
8868 extern int kstack_depth_to_print;
8869
8870 -struct thread_info;
8871 +struct task_struct;
8872 struct stacktrace_ops;
8873
8874 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8875 - unsigned long *stack,
8876 - unsigned long bp,
8877 - const struct stacktrace_ops *ops,
8878 - void *data,
8879 - unsigned long *end,
8880 - int *graph);
8881 -
8882 -extern unsigned long
8883 -print_context_stack(struct thread_info *tinfo,
8884 - unsigned long *stack, unsigned long bp,
8885 - const struct stacktrace_ops *ops, void *data,
8886 - unsigned long *end, int *graph);
8887 -
8888 -extern unsigned long
8889 -print_context_stack_bp(struct thread_info *tinfo,
8890 - unsigned long *stack, unsigned long bp,
8891 - const struct stacktrace_ops *ops, void *data,
8892 - unsigned long *end, int *graph);
8893 +typedef unsigned long walk_stack_t(struct task_struct *task,
8894 + void *stack_start,
8895 + unsigned long *stack,
8896 + unsigned long bp,
8897 + const struct stacktrace_ops *ops,
8898 + void *data,
8899 + unsigned long *end,
8900 + int *graph);
8901 +
8902 +extern walk_stack_t print_context_stack;
8903 +extern walk_stack_t print_context_stack_bp;
8904
8905 /* Generic stack tracer with callbacks */
8906
8907 @@ -40,7 +32,7 @@ struct stacktrace_ops {
8908 void (*address)(void *data, unsigned long address, int reliable);
8909 /* On negative return stop dumping */
8910 int (*stack)(void *data, char *name);
8911 - walk_stack_t walk_stack;
8912 + walk_stack_t *walk_stack;
8913 };
8914
8915 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8916 diff -urNp linux-3.0.3/arch/x86/include/asm/system.h linux-3.0.3/arch/x86/include/asm/system.h
8917 --- linux-3.0.3/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8918 +++ linux-3.0.3/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8919 @@ -129,7 +129,7 @@ do { \
8920 "call __switch_to\n\t" \
8921 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8922 __switch_canary \
8923 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8924 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8925 "movq %%rax,%%rdi\n\t" \
8926 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8927 "jnz ret_from_fork\n\t" \
8928 @@ -140,7 +140,7 @@ do { \
8929 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8930 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8931 [_tif_fork] "i" (_TIF_FORK), \
8932 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
8933 + [thread_info] "m" (current_tinfo), \
8934 [current_task] "m" (current_task) \
8935 __switch_canary_iparam \
8936 : "memory", "cc" __EXTRA_CLOBBER)
8937 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8938 {
8939 unsigned long __limit;
8940 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8941 - return __limit + 1;
8942 + return __limit;
8943 }
8944
8945 static inline void native_clts(void)
8946 @@ -397,12 +397,12 @@ void enable_hlt(void);
8947
8948 void cpu_idle_wait(void);
8949
8950 -extern unsigned long arch_align_stack(unsigned long sp);
8951 +#define arch_align_stack(x) ((x) & ~0xfUL)
8952 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8953
8954 void default_idle(void);
8955
8956 -void stop_this_cpu(void *dummy);
8957 +void stop_this_cpu(void *dummy) __noreturn;
8958
8959 /*
8960 * Force strict CPU ordering.
8961 diff -urNp linux-3.0.3/arch/x86/include/asm/thread_info.h linux-3.0.3/arch/x86/include/asm/thread_info.h
8962 --- linux-3.0.3/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
8963 +++ linux-3.0.3/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
8964 @@ -10,6 +10,7 @@
8965 #include <linux/compiler.h>
8966 #include <asm/page.h>
8967 #include <asm/types.h>
8968 +#include <asm/percpu.h>
8969
8970 /*
8971 * low level task data that entry.S needs immediate access to
8972 @@ -24,7 +25,6 @@ struct exec_domain;
8973 #include <asm/atomic.h>
8974
8975 struct thread_info {
8976 - struct task_struct *task; /* main task structure */
8977 struct exec_domain *exec_domain; /* execution domain */
8978 __u32 flags; /* low level flags */
8979 __u32 status; /* thread synchronous flags */
8980 @@ -34,18 +34,12 @@ struct thread_info {
8981 mm_segment_t addr_limit;
8982 struct restart_block restart_block;
8983 void __user *sysenter_return;
8984 -#ifdef CONFIG_X86_32
8985 - unsigned long previous_esp; /* ESP of the previous stack in
8986 - case of nested (IRQ) stacks
8987 - */
8988 - __u8 supervisor_stack[0];
8989 -#endif
8990 + unsigned long lowest_stack;
8991 int uaccess_err;
8992 };
8993
8994 -#define INIT_THREAD_INFO(tsk) \
8995 +#define INIT_THREAD_INFO \
8996 { \
8997 - .task = &tsk, \
8998 .exec_domain = &default_exec_domain, \
8999 .flags = 0, \
9000 .cpu = 0, \
9001 @@ -56,7 +50,7 @@ struct thread_info {
9002 }, \
9003 }
9004
9005 -#define init_thread_info (init_thread_union.thread_info)
9006 +#define init_thread_info (init_thread_union.stack)
9007 #define init_stack (init_thread_union.stack)
9008
9009 #else /* !__ASSEMBLY__ */
9010 @@ -170,6 +164,23 @@ struct thread_info {
9011 ret; \
9012 })
9013
9014 +#ifdef __ASSEMBLY__
9015 +/* how to get the thread information struct from ASM */
9016 +#define GET_THREAD_INFO(reg) \
9017 + mov PER_CPU_VAR(current_tinfo), reg
9018 +
9019 +/* use this one if reg already contains %esp */
9020 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9021 +#else
9022 +/* how to get the thread information struct from C */
9023 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9024 +
9025 +static __always_inline struct thread_info *current_thread_info(void)
9026 +{
9027 + return percpu_read_stable(current_tinfo);
9028 +}
9029 +#endif
9030 +
9031 #ifdef CONFIG_X86_32
9032
9033 #define STACK_WARN (THREAD_SIZE/8)
9034 @@ -180,35 +191,13 @@ struct thread_info {
9035 */
9036 #ifndef __ASSEMBLY__
9037
9038 -
9039 /* how to get the current stack pointer from C */
9040 register unsigned long current_stack_pointer asm("esp") __used;
9041
9042 -/* how to get the thread information struct from C */
9043 -static inline struct thread_info *current_thread_info(void)
9044 -{
9045 - return (struct thread_info *)
9046 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9047 -}
9048 -
9049 -#else /* !__ASSEMBLY__ */
9050 -
9051 -/* how to get the thread information struct from ASM */
9052 -#define GET_THREAD_INFO(reg) \
9053 - movl $-THREAD_SIZE, reg; \
9054 - andl %esp, reg
9055 -
9056 -/* use this one if reg already contains %esp */
9057 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9058 - andl $-THREAD_SIZE, reg
9059 -
9060 #endif
9061
9062 #else /* X86_32 */
9063
9064 -#include <asm/percpu.h>
9065 -#define KERNEL_STACK_OFFSET (5*8)
9066 -
9067 /*
9068 * macros/functions for gaining access to the thread information structure
9069 * preempt_count needs to be 1 initially, until the scheduler is functional.
9070 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9071 #ifndef __ASSEMBLY__
9072 DECLARE_PER_CPU(unsigned long, kernel_stack);
9073
9074 -static inline struct thread_info *current_thread_info(void)
9075 -{
9076 - struct thread_info *ti;
9077 - ti = (void *)(percpu_read_stable(kernel_stack) +
9078 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9079 - return ti;
9080 -}
9081 -
9082 -#else /* !__ASSEMBLY__ */
9083 -
9084 -/* how to get the thread information struct from ASM */
9085 -#define GET_THREAD_INFO(reg) \
9086 - movq PER_CPU_VAR(kernel_stack),reg ; \
9087 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9088 -
9089 +/* how to get the current stack pointer from C */
9090 +register unsigned long current_stack_pointer asm("rsp") __used;
9091 #endif
9092
9093 #endif /* !X86_32 */
9094 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9095 extern void free_thread_info(struct thread_info *ti);
9096 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9097 #define arch_task_cache_init arch_task_cache_init
9098 +
9099 +#define __HAVE_THREAD_FUNCTIONS
9100 +#define task_thread_info(task) (&(task)->tinfo)
9101 +#define task_stack_page(task) ((task)->stack)
9102 +#define setup_thread_stack(p, org) do {} while (0)
9103 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9104 +
9105 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9106 +extern struct task_struct *alloc_task_struct_node(int node);
9107 +extern void free_task_struct(struct task_struct *);
9108 +
9109 #endif
9110 #endif /* _ASM_X86_THREAD_INFO_H */
9111 diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess_32.h linux-3.0.3/arch/x86/include/asm/uaccess_32.h
9112 --- linux-3.0.3/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9113 +++ linux-3.0.3/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9114 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9115 static __always_inline unsigned long __must_check
9116 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9117 {
9118 + pax_track_stack();
9119 +
9120 + if ((long)n < 0)
9121 + return n;
9122 +
9123 if (__builtin_constant_p(n)) {
9124 unsigned long ret;
9125
9126 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9127 return ret;
9128 }
9129 }
9130 + if (!__builtin_constant_p(n))
9131 + check_object_size(from, n, true);
9132 return __copy_to_user_ll(to, from, n);
9133 }
9134
9135 @@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9136 __copy_to_user(void __user *to, const void *from, unsigned long n)
9137 {
9138 might_fault();
9139 +
9140 return __copy_to_user_inatomic(to, from, n);
9141 }
9142
9143 static __always_inline unsigned long
9144 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9145 {
9146 + if ((long)n < 0)
9147 + return n;
9148 +
9149 /* Avoid zeroing the tail if the copy fails..
9150 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9151 * but as the zeroing behaviour is only significant when n is not
9152 @@ -137,6 +148,12 @@ static __always_inline unsigned long
9153 __copy_from_user(void *to, const void __user *from, unsigned long n)
9154 {
9155 might_fault();
9156 +
9157 + pax_track_stack();
9158 +
9159 + if ((long)n < 0)
9160 + return n;
9161 +
9162 if (__builtin_constant_p(n)) {
9163 unsigned long ret;
9164
9165 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9166 return ret;
9167 }
9168 }
9169 + if (!__builtin_constant_p(n))
9170 + check_object_size(to, n, false);
9171 return __copy_from_user_ll(to, from, n);
9172 }
9173
9174 @@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9175 const void __user *from, unsigned long n)
9176 {
9177 might_fault();
9178 +
9179 + if ((long)n < 0)
9180 + return n;
9181 +
9182 if (__builtin_constant_p(n)) {
9183 unsigned long ret;
9184
9185 @@ -181,15 +204,19 @@ static __always_inline unsigned long
9186 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9187 unsigned long n)
9188 {
9189 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9190 -}
9191 + if ((long)n < 0)
9192 + return n;
9193
9194 -unsigned long __must_check copy_to_user(void __user *to,
9195 - const void *from, unsigned long n);
9196 -unsigned long __must_check _copy_from_user(void *to,
9197 - const void __user *from,
9198 - unsigned long n);
9199 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9200 +}
9201
9202 +extern void copy_to_user_overflow(void)
9203 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9204 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9205 +#else
9206 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9207 +#endif
9208 +;
9209
9210 extern void copy_from_user_overflow(void)
9211 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9212 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9213 #endif
9214 ;
9215
9216 -static inline unsigned long __must_check copy_from_user(void *to,
9217 - const void __user *from,
9218 - unsigned long n)
9219 +/**
9220 + * copy_to_user: - Copy a block of data into user space.
9221 + * @to: Destination address, in user space.
9222 + * @from: Source address, in kernel space.
9223 + * @n: Number of bytes to copy.
9224 + *
9225 + * Context: User context only. This function may sleep.
9226 + *
9227 + * Copy data from kernel space to user space.
9228 + *
9229 + * Returns number of bytes that could not be copied.
9230 + * On success, this will be zero.
9231 + */
9232 +static inline unsigned long __must_check
9233 +copy_to_user(void __user *to, const void *from, unsigned long n)
9234 +{
9235 + int sz = __compiletime_object_size(from);
9236 +
9237 + if (unlikely(sz != -1 && sz < n))
9238 + copy_to_user_overflow();
9239 + else if (access_ok(VERIFY_WRITE, to, n))
9240 + n = __copy_to_user(to, from, n);
9241 + return n;
9242 +}
9243 +
9244 +/**
9245 + * copy_from_user: - Copy a block of data from user space.
9246 + * @to: Destination address, in kernel space.
9247 + * @from: Source address, in user space.
9248 + * @n: Number of bytes to copy.
9249 + *
9250 + * Context: User context only. This function may sleep.
9251 + *
9252 + * Copy data from user space to kernel space.
9253 + *
9254 + * Returns number of bytes that could not be copied.
9255 + * On success, this will be zero.
9256 + *
9257 + * If some data could not be copied, this function will pad the copied
9258 + * data to the requested size using zero bytes.
9259 + */
9260 +static inline unsigned long __must_check
9261 +copy_from_user(void *to, const void __user *from, unsigned long n)
9262 {
9263 int sz = __compiletime_object_size(to);
9264
9265 - if (likely(sz == -1 || sz >= n))
9266 - n = _copy_from_user(to, from, n);
9267 - else
9268 + if (unlikely(sz != -1 && sz < n))
9269 copy_from_user_overflow();
9270 -
9271 + else if (access_ok(VERIFY_READ, from, n))
9272 + n = __copy_from_user(to, from, n);
9273 + else if ((long)n > 0) {
9274 + if (!__builtin_constant_p(n))
9275 + check_object_size(to, n, false);
9276 + memset(to, 0, n);
9277 + }
9278 return n;
9279 }
9280
9281 diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess_64.h linux-3.0.3/arch/x86/include/asm/uaccess_64.h
9282 --- linux-3.0.3/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9283 +++ linux-3.0.3/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9284 @@ -10,6 +10,9 @@
9285 #include <asm/alternative.h>
9286 #include <asm/cpufeature.h>
9287 #include <asm/page.h>
9288 +#include <asm/pgtable.h>
9289 +
9290 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9291
9292 /*
9293 * Copy To/From Userspace
9294 @@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9295 return ret;
9296 }
9297
9298 -__must_check unsigned long
9299 -_copy_to_user(void __user *to, const void *from, unsigned len);
9300 -__must_check unsigned long
9301 -_copy_from_user(void *to, const void __user *from, unsigned len);
9302 +static __always_inline __must_check unsigned long
9303 +__copy_to_user(void __user *to, const void *from, unsigned len);
9304 +static __always_inline __must_check unsigned long
9305 +__copy_from_user(void *to, const void __user *from, unsigned len);
9306 __must_check unsigned long
9307 copy_in_user(void __user *to, const void __user *from, unsigned len);
9308
9309 static inline unsigned long __must_check copy_from_user(void *to,
9310 const void __user *from,
9311 - unsigned long n)
9312 + unsigned n)
9313 {
9314 - int sz = __compiletime_object_size(to);
9315 -
9316 might_fault();
9317 - if (likely(sz == -1 || sz >= n))
9318 - n = _copy_from_user(to, from, n);
9319 -#ifdef CONFIG_DEBUG_VM
9320 - else
9321 - WARN(1, "Buffer overflow detected!\n");
9322 -#endif
9323 +
9324 + if (access_ok(VERIFY_READ, from, n))
9325 + n = __copy_from_user(to, from, n);
9326 + else if ((int)n > 0) {
9327 + if (!__builtin_constant_p(n))
9328 + check_object_size(to, n, false);
9329 + memset(to, 0, n);
9330 + }
9331 return n;
9332 }
9333
9334 @@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9335 {
9336 might_fault();
9337
9338 - return _copy_to_user(dst, src, size);
9339 + if (access_ok(VERIFY_WRITE, dst, size))
9340 + size = __copy_to_user(dst, src, size);
9341 + return size;
9342 }
9343
9344 static __always_inline __must_check
9345 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9346 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9347 {
9348 - int ret = 0;
9349 + int sz = __compiletime_object_size(dst);
9350 + unsigned ret = 0;
9351
9352 might_fault();
9353 - if (!__builtin_constant_p(size))
9354 - return copy_user_generic(dst, (__force void *)src, size);
9355 +
9356 + pax_track_stack();
9357 +
9358 + if ((int)size < 0)
9359 + return size;
9360 +
9361 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9362 + if (!__access_ok(VERIFY_READ, src, size))
9363 + return size;
9364 +#endif
9365 +
9366 + if (unlikely(sz != -1 && sz < size)) {
9367 +#ifdef CONFIG_DEBUG_VM
9368 + WARN(1, "Buffer overflow detected!\n");
9369 +#endif
9370 + return size;
9371 + }
9372 +
9373 + if (!__builtin_constant_p(size)) {
9374 + check_object_size(dst, size, false);
9375 +
9376 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9377 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9378 + src += PAX_USER_SHADOW_BASE;
9379 +#endif
9380 +
9381 + return copy_user_generic(dst, (__force const void *)src, size);
9382 + }
9383 switch (size) {
9384 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9385 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9386 ret, "b", "b", "=q", 1);
9387 return ret;
9388 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9389 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9390 ret, "w", "w", "=r", 2);
9391 return ret;
9392 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9393 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9394 ret, "l", "k", "=r", 4);
9395 return ret;
9396 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9397 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9398 ret, "q", "", "=r", 8);
9399 return ret;
9400 case 10:
9401 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9402 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9403 ret, "q", "", "=r", 10);
9404 if (unlikely(ret))
9405 return ret;
9406 __get_user_asm(*(u16 *)(8 + (char *)dst),
9407 - (u16 __user *)(8 + (char __user *)src),
9408 + (const u16 __user *)(8 + (const char __user *)src),
9409 ret, "w", "w", "=r", 2);
9410 return ret;
9411 case 16:
9412 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9413 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9414 ret, "q", "", "=r", 16);
9415 if (unlikely(ret))
9416 return ret;
9417 __get_user_asm(*(u64 *)(8 + (char *)dst),
9418 - (u64 __user *)(8 + (char __user *)src),
9419 + (const u64 __user *)(8 + (const char __user *)src),
9420 ret, "q", "", "=r", 8);
9421 return ret;
9422 default:
9423 - return copy_user_generic(dst, (__force void *)src, size);
9424 +
9425 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9426 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9427 + src += PAX_USER_SHADOW_BASE;
9428 +#endif
9429 +
9430 + return copy_user_generic(dst, (__force const void *)src, size);
9431 }
9432 }
9433
9434 static __always_inline __must_check
9435 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9436 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9437 {
9438 - int ret = 0;
9439 + int sz = __compiletime_object_size(src);
9440 + unsigned ret = 0;
9441
9442 might_fault();
9443 - if (!__builtin_constant_p(size))
9444 +
9445 + pax_track_stack();
9446 +
9447 + if ((int)size < 0)
9448 + return size;
9449 +
9450 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9451 + if (!__access_ok(VERIFY_WRITE, dst, size))
9452 + return size;
9453 +#endif
9454 +
9455 + if (unlikely(sz != -1 && sz < size)) {
9456 +#ifdef CONFIG_DEBUG_VM
9457 + WARN(1, "Buffer overflow detected!\n");
9458 +#endif
9459 + return size;
9460 + }
9461 +
9462 + if (!__builtin_constant_p(size)) {
9463 + check_object_size(src, size, true);
9464 +
9465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9466 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9467 + dst += PAX_USER_SHADOW_BASE;
9468 +#endif
9469 +
9470 return copy_user_generic((__force void *)dst, src, size);
9471 + }
9472 switch (size) {
9473 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9474 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9475 ret, "b", "b", "iq", 1);
9476 return ret;
9477 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9478 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9479 ret, "w", "w", "ir", 2);
9480 return ret;
9481 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9482 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9483 ret, "l", "k", "ir", 4);
9484 return ret;
9485 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9486 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9487 ret, "q", "", "er", 8);
9488 return ret;
9489 case 10:
9490 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9491 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9492 ret, "q", "", "er", 10);
9493 if (unlikely(ret))
9494 return ret;
9495 asm("":::"memory");
9496 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9497 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9498 ret, "w", "w", "ir", 2);
9499 return ret;
9500 case 16:
9501 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9502 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9503 ret, "q", "", "er", 16);
9504 if (unlikely(ret))
9505 return ret;
9506 asm("":::"memory");
9507 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9508 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9509 ret, "q", "", "er", 8);
9510 return ret;
9511 default:
9512 +
9513 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9514 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9515 + dst += PAX_USER_SHADOW_BASE;
9516 +#endif
9517 +
9518 return copy_user_generic((__force void *)dst, src, size);
9519 }
9520 }
9521
9522 static __always_inline __must_check
9523 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9524 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9525 {
9526 - int ret = 0;
9527 + unsigned ret = 0;
9528
9529 might_fault();
9530 - if (!__builtin_constant_p(size))
9531 +
9532 + if ((int)size < 0)
9533 + return size;
9534 +
9535 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9536 + if (!__access_ok(VERIFY_READ, src, size))
9537 + return size;
9538 + if (!__access_ok(VERIFY_WRITE, dst, size))
9539 + return size;
9540 +#endif
9541 +
9542 + if (!__builtin_constant_p(size)) {
9543 +
9544 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9545 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9546 + src += PAX_USER_SHADOW_BASE;
9547 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9548 + dst += PAX_USER_SHADOW_BASE;
9549 +#endif
9550 +
9551 return copy_user_generic((__force void *)dst,
9552 - (__force void *)src, size);
9553 + (__force const void *)src, size);
9554 + }
9555 switch (size) {
9556 case 1: {
9557 u8 tmp;
9558 - __get_user_asm(tmp, (u8 __user *)src,
9559 + __get_user_asm(tmp, (const u8 __user *)src,
9560 ret, "b", "b", "=q", 1);
9561 if (likely(!ret))
9562 __put_user_asm(tmp, (u8 __user *)dst,
9563 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9564 }
9565 case 2: {
9566 u16 tmp;
9567 - __get_user_asm(tmp, (u16 __user *)src,
9568 + __get_user_asm(tmp, (const u16 __user *)src,
9569 ret, "w", "w", "=r", 2);
9570 if (likely(!ret))
9571 __put_user_asm(tmp, (u16 __user *)dst,
9572 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9573
9574 case 4: {
9575 u32 tmp;
9576 - __get_user_asm(tmp, (u32 __user *)src,
9577 + __get_user_asm(tmp, (const u32 __user *)src,
9578 ret, "l", "k", "=r", 4);
9579 if (likely(!ret))
9580 __put_user_asm(tmp, (u32 __user *)dst,
9581 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9582 }
9583 case 8: {
9584 u64 tmp;
9585 - __get_user_asm(tmp, (u64 __user *)src,
9586 + __get_user_asm(tmp, (const u64 __user *)src,
9587 ret, "q", "", "=r", 8);
9588 if (likely(!ret))
9589 __put_user_asm(tmp, (u64 __user *)dst,
9590 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9591 return ret;
9592 }
9593 default:
9594 +
9595 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9596 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9597 + src += PAX_USER_SHADOW_BASE;
9598 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9599 + dst += PAX_USER_SHADOW_BASE;
9600 +#endif
9601 +
9602 return copy_user_generic((__force void *)dst,
9603 - (__force void *)src, size);
9604 + (__force const void *)src, size);
9605 }
9606 }
9607
9608 @@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9609 static __must_check __always_inline int
9610 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9611 {
9612 + pax_track_stack();
9613 +
9614 + if ((int)size < 0)
9615 + return size;
9616 +
9617 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9618 + if (!__access_ok(VERIFY_READ, src, size))
9619 + return size;
9620 +
9621 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9622 + src += PAX_USER_SHADOW_BASE;
9623 +#endif
9624 +
9625 return copy_user_generic(dst, (__force const void *)src, size);
9626 }
9627
9628 -static __must_check __always_inline int
9629 +static __must_check __always_inline unsigned long
9630 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9631 {
9632 + if ((int)size < 0)
9633 + return size;
9634 +
9635 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9636 + if (!__access_ok(VERIFY_WRITE, dst, size))
9637 + return size;
9638 +
9639 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9640 + dst += PAX_USER_SHADOW_BASE;
9641 +#endif
9642 +
9643 return copy_user_generic((__force void *)dst, src, size);
9644 }
9645
9646 -extern long __copy_user_nocache(void *dst, const void __user *src,
9647 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9648 unsigned size, int zerorest);
9649
9650 -static inline int
9651 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9652 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9653 {
9654 might_sleep();
9655 +
9656 + if ((int)size < 0)
9657 + return size;
9658 +
9659 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9660 + if (!__access_ok(VERIFY_READ, src, size))
9661 + return size;
9662 +#endif
9663 +
9664 return __copy_user_nocache(dst, src, size, 1);
9665 }
9666
9667 -static inline int
9668 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9669 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9670 unsigned size)
9671 {
9672 + if ((int)size < 0)
9673 + return size;
9674 +
9675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9676 + if (!__access_ok(VERIFY_READ, src, size))
9677 + return size;
9678 +#endif
9679 +
9680 return __copy_user_nocache(dst, src, size, 0);
9681 }
9682
9683 -unsigned long
9684 +extern unsigned long
9685 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9686
9687 #endif /* _ASM_X86_UACCESS_64_H */
9688 diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess.h linux-3.0.3/arch/x86/include/asm/uaccess.h
9689 --- linux-3.0.3/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9690 +++ linux-3.0.3/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9691 @@ -7,12 +7,15 @@
9692 #include <linux/compiler.h>
9693 #include <linux/thread_info.h>
9694 #include <linux/string.h>
9695 +#include <linux/sched.h>
9696 #include <asm/asm.h>
9697 #include <asm/page.h>
9698
9699 #define VERIFY_READ 0
9700 #define VERIFY_WRITE 1
9701
9702 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9703 +
9704 /*
9705 * The fs value determines whether argument validity checking should be
9706 * performed or not. If get_fs() == USER_DS, checking is performed, with
9707 @@ -28,7 +31,12 @@
9708
9709 #define get_ds() (KERNEL_DS)
9710 #define get_fs() (current_thread_info()->addr_limit)
9711 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9712 +void __set_fs(mm_segment_t x);
9713 +void set_fs(mm_segment_t x);
9714 +#else
9715 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9716 +#endif
9717
9718 #define segment_eq(a, b) ((a).seg == (b).seg)
9719
9720 @@ -76,7 +84,33 @@
9721 * checks that the pointer is in the user space range - after calling
9722 * this function, memory access functions may still return -EFAULT.
9723 */
9724 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9725 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9726 +#define access_ok(type, addr, size) \
9727 +({ \
9728 + long __size = size; \
9729 + unsigned long __addr = (unsigned long)addr; \
9730 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9731 + unsigned long __end_ao = __addr + __size - 1; \
9732 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9733 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9734 + while(__addr_ao <= __end_ao) { \
9735 + char __c_ao; \
9736 + __addr_ao += PAGE_SIZE; \
9737 + if (__size > PAGE_SIZE) \
9738 + cond_resched(); \
9739 + if (__get_user(__c_ao, (char __user *)__addr)) \
9740 + break; \
9741 + if (type != VERIFY_WRITE) { \
9742 + __addr = __addr_ao; \
9743 + continue; \
9744 + } \
9745 + if (__put_user(__c_ao, (char __user *)__addr)) \
9746 + break; \
9747 + __addr = __addr_ao; \
9748 + } \
9749 + } \
9750 + __ret_ao; \
9751 +})
9752
9753 /*
9754 * The exception table consists of pairs of addresses: the first is the
9755 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9756 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9757 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9758
9759 -
9760 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9761 +#define __copyuser_seg "gs;"
9762 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9763 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9764 +#else
9765 +#define __copyuser_seg
9766 +#define __COPYUSER_SET_ES
9767 +#define __COPYUSER_RESTORE_ES
9768 +#endif
9769
9770 #ifdef CONFIG_X86_32
9771 #define __put_user_asm_u64(x, addr, err, errret) \
9772 - asm volatile("1: movl %%eax,0(%2)\n" \
9773 - "2: movl %%edx,4(%2)\n" \
9774 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9775 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9776 "3:\n" \
9777 ".section .fixup,\"ax\"\n" \
9778 "4: movl %3,%0\n" \
9779 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9780 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9781
9782 #define __put_user_asm_ex_u64(x, addr) \
9783 - asm volatile("1: movl %%eax,0(%1)\n" \
9784 - "2: movl %%edx,4(%1)\n" \
9785 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9786 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9787 "3:\n" \
9788 _ASM_EXTABLE(1b, 2b - 1b) \
9789 _ASM_EXTABLE(2b, 3b - 2b) \
9790 @@ -373,7 +415,7 @@ do { \
9791 } while (0)
9792
9793 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9794 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9795 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9796 "2:\n" \
9797 ".section .fixup,\"ax\"\n" \
9798 "3: mov %3,%0\n" \
9799 @@ -381,7 +423,7 @@ do { \
9800 " jmp 2b\n" \
9801 ".previous\n" \
9802 _ASM_EXTABLE(1b, 3b) \
9803 - : "=r" (err), ltype(x) \
9804 + : "=r" (err), ltype (x) \
9805 : "m" (__m(addr)), "i" (errret), "0" (err))
9806
9807 #define __get_user_size_ex(x, ptr, size) \
9808 @@ -406,7 +448,7 @@ do { \
9809 } while (0)
9810
9811 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9812 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9813 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9814 "2:\n" \
9815 _ASM_EXTABLE(1b, 2b - 1b) \
9816 : ltype(x) : "m" (__m(addr)))
9817 @@ -423,13 +465,24 @@ do { \
9818 int __gu_err; \
9819 unsigned long __gu_val; \
9820 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9821 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9822 + (x) = (__typeof__(*(ptr)))__gu_val; \
9823 __gu_err; \
9824 })
9825
9826 /* FIXME: this hack is definitely wrong -AK */
9827 struct __large_struct { unsigned long buf[100]; };
9828 -#define __m(x) (*(struct __large_struct __user *)(x))
9829 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9830 +#define ____m(x) \
9831 +({ \
9832 + unsigned long ____x = (unsigned long)(x); \
9833 + if (____x < PAX_USER_SHADOW_BASE) \
9834 + ____x += PAX_USER_SHADOW_BASE; \
9835 + (void __user *)____x; \
9836 +})
9837 +#else
9838 +#define ____m(x) (x)
9839 +#endif
9840 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9841
9842 /*
9843 * Tell gcc we read from memory instead of writing: this is because
9844 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9845 * aliasing issues.
9846 */
9847 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9848 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9849 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9850 "2:\n" \
9851 ".section .fixup,\"ax\"\n" \
9852 "3: mov %3,%0\n" \
9853 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9854 ".previous\n" \
9855 _ASM_EXTABLE(1b, 3b) \
9856 : "=r"(err) \
9857 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9858 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9859
9860 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9861 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9862 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9863 "2:\n" \
9864 _ASM_EXTABLE(1b, 2b - 1b) \
9865 : : ltype(x), "m" (__m(addr)))
9866 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9867 * On error, the variable @x is set to zero.
9868 */
9869
9870 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9871 +#define __get_user(x, ptr) get_user((x), (ptr))
9872 +#else
9873 #define __get_user(x, ptr) \
9874 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9875 +#endif
9876
9877 /**
9878 * __put_user: - Write a simple value into user space, with less checking.
9879 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9880 * Returns zero on success, or -EFAULT on error.
9881 */
9882
9883 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9884 +#define __put_user(x, ptr) put_user((x), (ptr))
9885 +#else
9886 #define __put_user(x, ptr) \
9887 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9888 +#endif
9889
9890 #define __get_user_unaligned __get_user
9891 #define __put_user_unaligned __put_user
9892 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9893 #define get_user_ex(x, ptr) do { \
9894 unsigned long __gue_val; \
9895 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9896 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9897 + (x) = (__typeof__(*(ptr)))__gue_val; \
9898 } while (0)
9899
9900 #ifdef CONFIG_X86_WP_WORKS_OK
9901 diff -urNp linux-3.0.3/arch/x86/include/asm/vgtod.h linux-3.0.3/arch/x86/include/asm/vgtod.h
9902 --- linux-3.0.3/arch/x86/include/asm/vgtod.h 2011-07-21 22:17:23.000000000 -0400
9903 +++ linux-3.0.3/arch/x86/include/asm/vgtod.h 2011-08-23 21:47:55.000000000 -0400
9904 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9905 int sysctl_enabled;
9906 struct timezone sys_tz;
9907 struct { /* extract of a clocksource struct */
9908 + char name[8];
9909 cycle_t (*vread)(void);
9910 cycle_t cycle_last;
9911 cycle_t mask;
9912 diff -urNp linux-3.0.3/arch/x86/include/asm/x86_init.h linux-3.0.3/arch/x86/include/asm/x86_init.h
9913 --- linux-3.0.3/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9914 +++ linux-3.0.3/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9915 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9916 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9917 void (*find_smp_config)(void);
9918 void (*get_smp_config)(unsigned int early);
9919 -};
9920 +} __no_const;
9921
9922 /**
9923 * struct x86_init_resources - platform specific resource related ops
9924 @@ -42,7 +42,7 @@ struct x86_init_resources {
9925 void (*probe_roms)(void);
9926 void (*reserve_resources)(void);
9927 char *(*memory_setup)(void);
9928 -};
9929 +} __no_const;
9930
9931 /**
9932 * struct x86_init_irqs - platform specific interrupt setup
9933 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9934 void (*pre_vector_init)(void);
9935 void (*intr_init)(void);
9936 void (*trap_init)(void);
9937 -};
9938 +} __no_const;
9939
9940 /**
9941 * struct x86_init_oem - oem platform specific customizing functions
9942 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9943 struct x86_init_oem {
9944 void (*arch_setup)(void);
9945 void (*banner)(void);
9946 -};
9947 +} __no_const;
9948
9949 /**
9950 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9951 @@ -76,7 +76,7 @@ struct x86_init_oem {
9952 */
9953 struct x86_init_mapping {
9954 void (*pagetable_reserve)(u64 start, u64 end);
9955 -};
9956 +} __no_const;
9957
9958 /**
9959 * struct x86_init_paging - platform specific paging functions
9960 @@ -86,7 +86,7 @@ struct x86_init_mapping {
9961 struct x86_init_paging {
9962 void (*pagetable_setup_start)(pgd_t *base);
9963 void (*pagetable_setup_done)(pgd_t *base);
9964 -};
9965 +} __no_const;
9966
9967 /**
9968 * struct x86_init_timers - platform specific timer setup
9969 @@ -101,7 +101,7 @@ struct x86_init_timers {
9970 void (*tsc_pre_init)(void);
9971 void (*timer_init)(void);
9972 void (*wallclock_init)(void);
9973 -};
9974 +} __no_const;
9975
9976 /**
9977 * struct x86_init_iommu - platform specific iommu setup
9978 @@ -109,7 +109,7 @@ struct x86_init_timers {
9979 */
9980 struct x86_init_iommu {
9981 int (*iommu_init)(void);
9982 -};
9983 +} __no_const;
9984
9985 /**
9986 * struct x86_init_pci - platform specific pci init functions
9987 @@ -123,7 +123,7 @@ struct x86_init_pci {
9988 int (*init)(void);
9989 void (*init_irq)(void);
9990 void (*fixup_irqs)(void);
9991 -};
9992 +} __no_const;
9993
9994 /**
9995 * struct x86_init_ops - functions for platform specific setup
9996 @@ -139,7 +139,7 @@ struct x86_init_ops {
9997 struct x86_init_timers timers;
9998 struct x86_init_iommu iommu;
9999 struct x86_init_pci pci;
10000 -};
10001 +} __no_const;
10002
10003 /**
10004 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10005 @@ -147,7 +147,7 @@ struct x86_init_ops {
10006 */
10007 struct x86_cpuinit_ops {
10008 void (*setup_percpu_clockev)(void);
10009 -};
10010 +} __no_const;
10011
10012 /**
10013 * struct x86_platform_ops - platform specific runtime functions
10014 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10015 bool (*is_untracked_pat_range)(u64 start, u64 end);
10016 void (*nmi_init)(void);
10017 int (*i8042_detect)(void);
10018 -};
10019 +} __no_const;
10020
10021 struct pci_dev;
10022
10023 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10024 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10025 void (*teardown_msi_irq)(unsigned int irq);
10026 void (*teardown_msi_irqs)(struct pci_dev *dev);
10027 -};
10028 +} __no_const;
10029
10030 extern struct x86_init_ops x86_init;
10031 extern struct x86_cpuinit_ops x86_cpuinit;
10032 diff -urNp linux-3.0.3/arch/x86/include/asm/xsave.h linux-3.0.3/arch/x86/include/asm/xsave.h
10033 --- linux-3.0.3/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10034 +++ linux-3.0.3/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10035 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10036 {
10037 int err;
10038
10039 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10040 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10041 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10042 +#endif
10043 +
10044 /*
10045 * Clear the xsave header first, so that reserved fields are
10046 * initialized to zero.
10047 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10048 u32 lmask = mask;
10049 u32 hmask = mask >> 32;
10050
10051 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10052 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10053 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10054 +#endif
10055 +
10056 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10057 "2:\n"
10058 ".section .fixup,\"ax\"\n"
10059 diff -urNp linux-3.0.3/arch/x86/Kconfig linux-3.0.3/arch/x86/Kconfig
10060 --- linux-3.0.3/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10061 +++ linux-3.0.3/arch/x86/Kconfig 2011-08-23 21:48:14.000000000 -0400
10062 @@ -229,7 +229,7 @@ config X86_HT
10063
10064 config X86_32_LAZY_GS
10065 def_bool y
10066 - depends on X86_32 && !CC_STACKPROTECTOR
10067 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10068
10069 config ARCH_HWEIGHT_CFLAGS
10070 string
10071 @@ -1018,7 +1018,7 @@ choice
10072
10073 config NOHIGHMEM
10074 bool "off"
10075 - depends on !X86_NUMAQ
10076 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10077 ---help---
10078 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10079 However, the address space of 32-bit x86 processors is only 4
10080 @@ -1055,7 +1055,7 @@ config NOHIGHMEM
10081
10082 config HIGHMEM4G
10083 bool "4GB"
10084 - depends on !X86_NUMAQ
10085 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10086 ---help---
10087 Select this if you have a 32-bit processor and between 1 and 4
10088 gigabytes of physical RAM.
10089 @@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10090 hex
10091 default 0xB0000000 if VMSPLIT_3G_OPT
10092 default 0x80000000 if VMSPLIT_2G
10093 - default 0x78000000 if VMSPLIT_2G_OPT
10094 + default 0x70000000 if VMSPLIT_2G_OPT
10095 default 0x40000000 if VMSPLIT_1G
10096 default 0xC0000000
10097 depends on X86_32
10098 @@ -1453,7 +1453,7 @@ config ARCH_USES_PG_UNCACHED
10099
10100 config EFI
10101 bool "EFI runtime service support"
10102 - depends on ACPI
10103 + depends on ACPI && !PAX_KERNEXEC
10104 ---help---
10105 This enables the kernel to use EFI runtime services that are
10106 available (such as the EFI variable services).
10107 @@ -1483,6 +1483,7 @@ config SECCOMP
10108
10109 config CC_STACKPROTECTOR
10110 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10111 + depends on X86_64 || !PAX_MEMORY_UDEREF
10112 ---help---
10113 This option turns on the -fstack-protector GCC feature. This
10114 feature puts, at the beginning of functions, a canary value on
10115 @@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10116 config PHYSICAL_START
10117 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10118 default "0x1000000"
10119 + range 0x400000 0x40000000
10120 ---help---
10121 This gives the physical address where the kernel is loaded.
10122
10123 @@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10124 config PHYSICAL_ALIGN
10125 hex "Alignment value to which kernel should be aligned" if X86_32
10126 default "0x1000000"
10127 + range 0x400000 0x1000000 if PAX_KERNEXEC
10128 range 0x2000 0x1000000
10129 ---help---
10130 This value puts the alignment restrictions on physical address
10131 @@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10132 Say N if you want to disable CPU hotplug.
10133
10134 config COMPAT_VDSO
10135 - def_bool y
10136 + def_bool n
10137 prompt "Compat VDSO support"
10138 depends on X86_32 || IA32_EMULATION
10139 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10140 ---help---
10141 Map the 32-bit VDSO to the predictable old-style address too.
10142
10143 diff -urNp linux-3.0.3/arch/x86/Kconfig.cpu linux-3.0.3/arch/x86/Kconfig.cpu
10144 --- linux-3.0.3/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10145 +++ linux-3.0.3/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10146 @@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10147
10148 config X86_F00F_BUG
10149 def_bool y
10150 - depends on M586MMX || M586TSC || M586 || M486 || M386
10151 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10152
10153 config X86_INVD_BUG
10154 def_bool y
10155 @@ -362,7 +362,7 @@ config X86_POPAD_OK
10156
10157 config X86_ALIGNMENT_16
10158 def_bool y
10159 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10160 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10161
10162 config X86_INTEL_USERCOPY
10163 def_bool y
10164 @@ -408,7 +408,7 @@ config X86_CMPXCHG64
10165 # generates cmov.
10166 config X86_CMOV
10167 def_bool y
10168 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10169 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10170
10171 config X86_MINIMUM_CPU_FAMILY
10172 int
10173 diff -urNp linux-3.0.3/arch/x86/Kconfig.debug linux-3.0.3/arch/x86/Kconfig.debug
10174 --- linux-3.0.3/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10175 +++ linux-3.0.3/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10176 @@ -81,7 +81,7 @@ config X86_PTDUMP
10177 config DEBUG_RODATA
10178 bool "Write protect kernel read-only data structures"
10179 default y
10180 - depends on DEBUG_KERNEL
10181 + depends on DEBUG_KERNEL && BROKEN
10182 ---help---
10183 Mark the kernel read-only data as write-protected in the pagetables,
10184 in order to catch accidental (and incorrect) writes to such const
10185 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10186
10187 config DEBUG_SET_MODULE_RONX
10188 bool "Set loadable kernel module data as NX and text as RO"
10189 - depends on MODULES
10190 + depends on MODULES && BROKEN
10191 ---help---
10192 This option helps catch unintended modifications to loadable
10193 kernel module's text and read-only data. It also prevents execution
10194 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile
10195 --- linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10196 +++ linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10197 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10198 $(call cc-option, -fno-stack-protector) \
10199 $(call cc-option, -mpreferred-stack-boundary=2)
10200 KBUILD_CFLAGS += $(call cc-option, -m32)
10201 +ifdef CONSTIFY_PLUGIN
10202 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10203 +endif
10204 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10205 GCOV_PROFILE := n
10206
10207 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S
10208 --- linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10209 +++ linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10210 @@ -108,6 +108,9 @@ wakeup_code:
10211 /* Do any other stuff... */
10212
10213 #ifndef CONFIG_64BIT
10214 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10215 + call verify_cpu
10216 +
10217 /* This could also be done in C code... */
10218 movl pmode_cr3, %eax
10219 movl %eax, %cr3
10220 @@ -131,6 +134,7 @@ wakeup_code:
10221 movl pmode_cr0, %eax
10222 movl %eax, %cr0
10223 jmp pmode_return
10224 +# include "../../verify_cpu.S"
10225 #else
10226 pushw $0
10227 pushw trampoline_segment
10228 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/sleep.c linux-3.0.3/arch/x86/kernel/acpi/sleep.c
10229 --- linux-3.0.3/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10230 +++ linux-3.0.3/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10231 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10232 header->trampoline_segment = trampoline_address() >> 4;
10233 #ifdef CONFIG_SMP
10234 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10235 +
10236 + pax_open_kernel();
10237 early_gdt_descr.address =
10238 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10239 + pax_close_kernel();
10240 +
10241 initial_gs = per_cpu_offset(smp_processor_id());
10242 #endif
10243 initial_code = (unsigned long)wakeup_long64;
10244 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S
10245 --- linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10246 +++ linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10247 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10248 # and restore the stack ... but you need gdt for this to work
10249 movl saved_context_esp, %esp
10250
10251 - movl %cs:saved_magic, %eax
10252 - cmpl $0x12345678, %eax
10253 + cmpl $0x12345678, saved_magic
10254 jne bogus_magic
10255
10256 # jump to place where we left off
10257 - movl saved_eip, %eax
10258 - jmp *%eax
10259 + jmp *(saved_eip)
10260
10261 bogus_magic:
10262 jmp bogus_magic
10263 diff -urNp linux-3.0.3/arch/x86/kernel/alternative.c linux-3.0.3/arch/x86/kernel/alternative.c
10264 --- linux-3.0.3/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10265 +++ linux-3.0.3/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10266 @@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10267 if (!*poff || ptr < text || ptr >= text_end)
10268 continue;
10269 /* turn DS segment override prefix into lock prefix */
10270 - if (*ptr == 0x3e)
10271 + if (*ktla_ktva(ptr) == 0x3e)
10272 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10273 };
10274 mutex_unlock(&text_mutex);
10275 @@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10276 if (!*poff || ptr < text || ptr >= text_end)
10277 continue;
10278 /* turn lock prefix into DS segment override prefix */
10279 - if (*ptr == 0xf0)
10280 + if (*ktla_ktva(ptr) == 0xf0)
10281 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10282 };
10283 mutex_unlock(&text_mutex);
10284 @@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10285
10286 BUG_ON(p->len > MAX_PATCH_LEN);
10287 /* prep the buffer with the original instructions */
10288 - memcpy(insnbuf, p->instr, p->len);
10289 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10290 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10291 (unsigned long)p->instr, p->len);
10292
10293 @@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10294 if (smp_alt_once)
10295 free_init_pages("SMP alternatives",
10296 (unsigned long)__smp_locks,
10297 - (unsigned long)__smp_locks_end);
10298 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10299
10300 restart_nmi();
10301 }
10302 @@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10303 * instructions. And on the local CPU you need to be protected again NMI or MCE
10304 * handlers seeing an inconsistent instruction while you patch.
10305 */
10306 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10307 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10308 size_t len)
10309 {
10310 unsigned long flags;
10311 local_irq_save(flags);
10312 - memcpy(addr, opcode, len);
10313 +
10314 + pax_open_kernel();
10315 + memcpy(ktla_ktva(addr), opcode, len);
10316 sync_core();
10317 + pax_close_kernel();
10318 +
10319 local_irq_restore(flags);
10320 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10321 that causes hangs on some VIA CPUs. */
10322 @@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10323 */
10324 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10325 {
10326 - unsigned long flags;
10327 - char *vaddr;
10328 + unsigned char *vaddr = ktla_ktva(addr);
10329 struct page *pages[2];
10330 - int i;
10331 + size_t i;
10332
10333 if (!core_kernel_text((unsigned long)addr)) {
10334 - pages[0] = vmalloc_to_page(addr);
10335 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10336 + pages[0] = vmalloc_to_page(vaddr);
10337 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10338 } else {
10339 - pages[0] = virt_to_page(addr);
10340 + pages[0] = virt_to_page(vaddr);
10341 WARN_ON(!PageReserved(pages[0]));
10342 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10343 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10344 }
10345 BUG_ON(!pages[0]);
10346 - local_irq_save(flags);
10347 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10348 - if (pages[1])
10349 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10350 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10351 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10352 - clear_fixmap(FIX_TEXT_POKE0);
10353 - if (pages[1])
10354 - clear_fixmap(FIX_TEXT_POKE1);
10355 - local_flush_tlb();
10356 - sync_core();
10357 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10358 - that causes hangs on some VIA CPUs. */
10359 + text_poke_early(addr, opcode, len);
10360 for (i = 0; i < len; i++)
10361 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10362 - local_irq_restore(flags);
10363 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10364 return addr;
10365 }
10366
10367 diff -urNp linux-3.0.3/arch/x86/kernel/apic/apic.c linux-3.0.3/arch/x86/kernel/apic/apic.c
10368 --- linux-3.0.3/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10369 +++ linux-3.0.3/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10370 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10371 /*
10372 * Debug level, exported for io_apic.c
10373 */
10374 -unsigned int apic_verbosity;
10375 +int apic_verbosity;
10376
10377 int pic_mode;
10378
10379 @@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10380 apic_write(APIC_ESR, 0);
10381 v1 = apic_read(APIC_ESR);
10382 ack_APIC_irq();
10383 - atomic_inc(&irq_err_count);
10384 + atomic_inc_unchecked(&irq_err_count);
10385
10386 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10387 smp_processor_id(), v0 , v1);
10388 @@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10389 u16 *bios_cpu_apicid;
10390 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10391
10392 + pax_track_stack();
10393 +
10394 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10395 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10396
10397 diff -urNp linux-3.0.3/arch/x86/kernel/apic/io_apic.c linux-3.0.3/arch/x86/kernel/apic/io_apic.c
10398 --- linux-3.0.3/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10399 +++ linux-3.0.3/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10400 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10401 }
10402 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10403
10404 -void lock_vector_lock(void)
10405 +void lock_vector_lock(void) __acquires(vector_lock)
10406 {
10407 /* Used to the online set of cpus does not change
10408 * during assign_irq_vector.
10409 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10410 raw_spin_lock(&vector_lock);
10411 }
10412
10413 -void unlock_vector_lock(void)
10414 +void unlock_vector_lock(void) __releases(vector_lock)
10415 {
10416 raw_spin_unlock(&vector_lock);
10417 }
10418 @@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10419 ack_APIC_irq();
10420 }
10421
10422 -atomic_t irq_mis_count;
10423 +atomic_unchecked_t irq_mis_count;
10424
10425 /*
10426 * IO-APIC versions below 0x20 don't support EOI register.
10427 @@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10428 * at the cpu.
10429 */
10430 if (!(v & (1 << (i & 0x1f)))) {
10431 - atomic_inc(&irq_mis_count);
10432 + atomic_inc_unchecked(&irq_mis_count);
10433
10434 eoi_ioapic_irq(irq, cfg);
10435 }
10436 diff -urNp linux-3.0.3/arch/x86/kernel/apm_32.c linux-3.0.3/arch/x86/kernel/apm_32.c
10437 --- linux-3.0.3/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10438 +++ linux-3.0.3/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10439 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10440 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10441 * even though they are called in protected mode.
10442 */
10443 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10444 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10445 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10446
10447 static const char driver_version[] = "1.16ac"; /* no spaces */
10448 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10449 BUG_ON(cpu != 0);
10450 gdt = get_cpu_gdt_table(cpu);
10451 save_desc_40 = gdt[0x40 / 8];
10452 +
10453 + pax_open_kernel();
10454 gdt[0x40 / 8] = bad_bios_desc;
10455 + pax_close_kernel();
10456
10457 apm_irq_save(flags);
10458 APM_DO_SAVE_SEGS;
10459 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10460 &call->esi);
10461 APM_DO_RESTORE_SEGS;
10462 apm_irq_restore(flags);
10463 +
10464 + pax_open_kernel();
10465 gdt[0x40 / 8] = save_desc_40;
10466 + pax_close_kernel();
10467 +
10468 put_cpu();
10469
10470 return call->eax & 0xff;
10471 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10472 BUG_ON(cpu != 0);
10473 gdt = get_cpu_gdt_table(cpu);
10474 save_desc_40 = gdt[0x40 / 8];
10475 +
10476 + pax_open_kernel();
10477 gdt[0x40 / 8] = bad_bios_desc;
10478 + pax_close_kernel();
10479
10480 apm_irq_save(flags);
10481 APM_DO_SAVE_SEGS;
10482 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10483 &call->eax);
10484 APM_DO_RESTORE_SEGS;
10485 apm_irq_restore(flags);
10486 +
10487 + pax_open_kernel();
10488 gdt[0x40 / 8] = save_desc_40;
10489 + pax_close_kernel();
10490 +
10491 put_cpu();
10492 return error;
10493 }
10494 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10495 * code to that CPU.
10496 */
10497 gdt = get_cpu_gdt_table(0);
10498 +
10499 + pax_open_kernel();
10500 set_desc_base(&gdt[APM_CS >> 3],
10501 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10502 set_desc_base(&gdt[APM_CS_16 >> 3],
10503 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10504 set_desc_base(&gdt[APM_DS >> 3],
10505 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10506 + pax_close_kernel();
10507
10508 proc_create("apm", 0, NULL, &apm_file_ops);
10509
10510 diff -urNp linux-3.0.3/arch/x86/kernel/asm-offsets_64.c linux-3.0.3/arch/x86/kernel/asm-offsets_64.c
10511 --- linux-3.0.3/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10512 +++ linux-3.0.3/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10513 @@ -69,6 +69,7 @@ int main(void)
10514 BLANK();
10515 #undef ENTRY
10516
10517 + DEFINE(TSS_size, sizeof(struct tss_struct));
10518 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10519 BLANK();
10520
10521 diff -urNp linux-3.0.3/arch/x86/kernel/asm-offsets.c linux-3.0.3/arch/x86/kernel/asm-offsets.c
10522 --- linux-3.0.3/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10523 +++ linux-3.0.3/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10524 @@ -33,6 +33,8 @@ void common(void) {
10525 OFFSET(TI_status, thread_info, status);
10526 OFFSET(TI_addr_limit, thread_info, addr_limit);
10527 OFFSET(TI_preempt_count, thread_info, preempt_count);
10528 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10529 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10530
10531 BLANK();
10532 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10533 @@ -53,8 +55,26 @@ void common(void) {
10534 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10535 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10536 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10537 +
10538 +#ifdef CONFIG_PAX_KERNEXEC
10539 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10540 +#endif
10541 +
10542 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10543 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10544 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10545 +#ifdef CONFIG_X86_64
10546 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10547 +#endif
10548 #endif
10549
10550 +#endif
10551 +
10552 + BLANK();
10553 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10554 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10555 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10556 +
10557 #ifdef CONFIG_XEN
10558 BLANK();
10559 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10560 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/amd.c linux-3.0.3/arch/x86/kernel/cpu/amd.c
10561 --- linux-3.0.3/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10562 +++ linux-3.0.3/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10563 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10564 unsigned int size)
10565 {
10566 /* AMD errata T13 (order #21922) */
10567 - if ((c->x86 == 6)) {
10568 + if (c->x86 == 6) {
10569 /* Duron Rev A0 */
10570 if (c->x86_model == 3 && c->x86_mask == 0)
10571 size = 64;
10572 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/common.c linux-3.0.3/arch/x86/kernel/cpu/common.c
10573 --- linux-3.0.3/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10574 +++ linux-3.0.3/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10575 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10576
10577 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10578
10579 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10580 -#ifdef CONFIG_X86_64
10581 - /*
10582 - * We need valid kernel segments for data and code in long mode too
10583 - * IRET will check the segment types kkeil 2000/10/28
10584 - * Also sysret mandates a special GDT layout
10585 - *
10586 - * TLS descriptors are currently at a different place compared to i386.
10587 - * Hopefully nobody expects them at a fixed place (Wine?)
10588 - */
10589 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10590 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10591 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10592 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10593 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10594 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10595 -#else
10596 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10597 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10598 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10599 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10600 - /*
10601 - * Segments used for calling PnP BIOS have byte granularity.
10602 - * They code segments and data segments have fixed 64k limits,
10603 - * the transfer segment sizes are set at run time.
10604 - */
10605 - /* 32-bit code */
10606 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10607 - /* 16-bit code */
10608 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10609 - /* 16-bit data */
10610 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10611 - /* 16-bit data */
10612 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10613 - /* 16-bit data */
10614 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10615 - /*
10616 - * The APM segments have byte granularity and their bases
10617 - * are set at run time. All have 64k limits.
10618 - */
10619 - /* 32-bit code */
10620 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10621 - /* 16-bit code */
10622 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10623 - /* data */
10624 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10625 -
10626 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10627 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10628 - GDT_STACK_CANARY_INIT
10629 -#endif
10630 -} };
10631 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10632 -
10633 static int __init x86_xsave_setup(char *s)
10634 {
10635 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10636 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10637 {
10638 struct desc_ptr gdt_descr;
10639
10640 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10641 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10642 gdt_descr.size = GDT_SIZE - 1;
10643 load_gdt(&gdt_descr);
10644 /* Reload the per-cpu base */
10645 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10646 /* Filter out anything that depends on CPUID levels we don't have */
10647 filter_cpuid_features(c, true);
10648
10649 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10650 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10651 +#endif
10652 +
10653 /* If the model name is still unset, do table lookup. */
10654 if (!c->x86_model_id[0]) {
10655 const char *p;
10656 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10657 }
10658 __setup("clearcpuid=", setup_disablecpuid);
10659
10660 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10661 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10662 +
10663 #ifdef CONFIG_X86_64
10664 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10665
10666 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10667 EXPORT_PER_CPU_SYMBOL(current_task);
10668
10669 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10670 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10671 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10672 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10673
10674 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10675 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10676 {
10677 memset(regs, 0, sizeof(struct pt_regs));
10678 regs->fs = __KERNEL_PERCPU;
10679 - regs->gs = __KERNEL_STACK_CANARY;
10680 + savesegment(gs, regs->gs);
10681
10682 return regs;
10683 }
10684 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10685 int i;
10686
10687 cpu = stack_smp_processor_id();
10688 - t = &per_cpu(init_tss, cpu);
10689 + t = init_tss + cpu;
10690 oist = &per_cpu(orig_ist, cpu);
10691
10692 #ifdef CONFIG_NUMA
10693 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10694 switch_to_new_gdt(cpu);
10695 loadsegment(fs, 0);
10696
10697 - load_idt((const struct desc_ptr *)&idt_descr);
10698 + load_idt(&idt_descr);
10699
10700 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10701 syscall_init();
10702 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10703 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10704 barrier();
10705
10706 - x86_configure_nx();
10707 if (cpu != 0)
10708 enable_x2apic();
10709
10710 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10711 {
10712 int cpu = smp_processor_id();
10713 struct task_struct *curr = current;
10714 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10715 + struct tss_struct *t = init_tss + cpu;
10716 struct thread_struct *thread = &curr->thread;
10717
10718 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10719 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/intel.c linux-3.0.3/arch/x86/kernel/cpu/intel.c
10720 --- linux-3.0.3/arch/x86/kernel/cpu/intel.c 2011-08-23 21:44:40.000000000 -0400
10721 +++ linux-3.0.3/arch/x86/kernel/cpu/intel.c 2011-08-26 19:49:56.000000000 -0400
10722 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10723 * Update the IDT descriptor and reload the IDT so that
10724 * it uses the read-only mapped virtual address.
10725 */
10726 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10727 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10728 load_idt(&idt_descr);
10729 }
10730 #endif
10731 @@ -466,7 +466,7 @@ static void __cpuinit init_intel(struct
10732
10733 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
10734 if ((epb & 0xF) == 0) {
10735 - printk_once(KERN_WARNING, "x86: updated energy_perf_bias"
10736 + printk_once(KERN_WARNING "x86: updated energy_perf_bias"
10737 " to 'normal' from 'performance'\n"
10738 "You can view and update epb via utility,"
10739 " such as x86_energy_perf_policy(8)\n");
10740 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/Makefile linux-3.0.3/arch/x86/kernel/cpu/Makefile
10741 --- linux-3.0.3/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10742 +++ linux-3.0.3/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10743 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10744 CFLAGS_REMOVE_perf_event.o = -pg
10745 endif
10746
10747 -# Make sure load_percpu_segment has no stackprotector
10748 -nostackp := $(call cc-option, -fno-stack-protector)
10749 -CFLAGS_common.o := $(nostackp)
10750 -
10751 obj-y := intel_cacheinfo.o scattered.o topology.o
10752 obj-y += proc.o capflags.o powerflags.o common.o
10753 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10754 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c
10755 --- linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10756 +++ linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10757 @@ -46,6 +46,7 @@
10758 #include <asm/ipi.h>
10759 #include <asm/mce.h>
10760 #include <asm/msr.h>
10761 +#include <asm/local.h>
10762
10763 #include "mce-internal.h"
10764
10765 @@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10766 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10767 m->cs, m->ip);
10768
10769 - if (m->cs == __KERNEL_CS)
10770 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10771 print_symbol("{%s}", m->ip);
10772 pr_cont("\n");
10773 }
10774 @@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10775
10776 #define PANIC_TIMEOUT 5 /* 5 seconds */
10777
10778 -static atomic_t mce_paniced;
10779 +static atomic_unchecked_t mce_paniced;
10780
10781 static int fake_panic;
10782 -static atomic_t mce_fake_paniced;
10783 +static atomic_unchecked_t mce_fake_paniced;
10784
10785 /* Panic in progress. Enable interrupts and wait for final IPI */
10786 static void wait_for_panic(void)
10787 @@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10788 /*
10789 * Make sure only one CPU runs in machine check panic
10790 */
10791 - if (atomic_inc_return(&mce_paniced) > 1)
10792 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10793 wait_for_panic();
10794 barrier();
10795
10796 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10797 console_verbose();
10798 } else {
10799 /* Don't log too much for fake panic */
10800 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10801 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10802 return;
10803 }
10804 /* First print corrected ones that are still unlogged */
10805 @@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10806 * might have been modified by someone else.
10807 */
10808 rmb();
10809 - if (atomic_read(&mce_paniced))
10810 + if (atomic_read_unchecked(&mce_paniced))
10811 wait_for_panic();
10812 if (!monarch_timeout)
10813 goto out;
10814 @@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10815 */
10816
10817 static DEFINE_SPINLOCK(mce_state_lock);
10818 -static int open_count; /* #times opened */
10819 +static local_t open_count; /* #times opened */
10820 static int open_exclu; /* already open exclusive? */
10821
10822 static int mce_open(struct inode *inode, struct file *file)
10823 {
10824 spin_lock(&mce_state_lock);
10825
10826 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10827 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10828 spin_unlock(&mce_state_lock);
10829
10830 return -EBUSY;
10831 @@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10832
10833 if (file->f_flags & O_EXCL)
10834 open_exclu = 1;
10835 - open_count++;
10836 + local_inc(&open_count);
10837
10838 spin_unlock(&mce_state_lock);
10839
10840 @@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10841 {
10842 spin_lock(&mce_state_lock);
10843
10844 - open_count--;
10845 + local_dec(&open_count);
10846 open_exclu = 0;
10847
10848 spin_unlock(&mce_state_lock);
10849 @@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10850 static void mce_reset(void)
10851 {
10852 cpu_missing = 0;
10853 - atomic_set(&mce_fake_paniced, 0);
10854 + atomic_set_unchecked(&mce_fake_paniced, 0);
10855 atomic_set(&mce_executing, 0);
10856 atomic_set(&mce_callin, 0);
10857 atomic_set(&global_nwo, 0);
10858 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c
10859 --- linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10860 +++ linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10861 @@ -215,7 +215,9 @@ static int inject_init(void)
10862 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10863 return -ENOMEM;
10864 printk(KERN_INFO "Machine check injector initialized\n");
10865 - mce_chrdev_ops.write = mce_write;
10866 + pax_open_kernel();
10867 + *(void **)&mce_chrdev_ops.write = mce_write;
10868 + pax_close_kernel();
10869 register_die_notifier(&mce_raise_nb);
10870 return 0;
10871 }
10872 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c
10873 --- linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c 2011-07-21 22:17:23.000000000 -0400
10874 +++ linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c 2011-08-23 21:47:55.000000000 -0400
10875 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10876 u64 size_or_mask, size_and_mask;
10877 static bool mtrr_aps_delayed_init;
10878
10879 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10880 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10881
10882 const struct mtrr_ops *mtrr_if;
10883
10884 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h
10885 --- linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10886 +++ linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
10887 @@ -25,7 +25,7 @@ struct mtrr_ops {
10888 int (*validate_add_page)(unsigned long base, unsigned long size,
10889 unsigned int type);
10890 int (*have_wrcomb)(void);
10891 -};
10892 +} __do_const;
10893
10894 extern int generic_get_free_region(unsigned long base, unsigned long size,
10895 int replace_reg);
10896 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/perf_event.c linux-3.0.3/arch/x86/kernel/cpu/perf_event.c
10897 --- linux-3.0.3/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10898 +++ linux-3.0.3/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10899 @@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10900 int i, j, w, wmax, num = 0;
10901 struct hw_perf_event *hwc;
10902
10903 + pax_track_stack();
10904 +
10905 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10906
10907 for (i = 0; i < n; i++) {
10908 @@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10909 break;
10910
10911 perf_callchain_store(entry, frame.return_address);
10912 - fp = frame.next_frame;
10913 + fp = (__force const void __user *)frame.next_frame;
10914 }
10915 }
10916
10917 diff -urNp linux-3.0.3/arch/x86/kernel/crash.c linux-3.0.3/arch/x86/kernel/crash.c
10918 --- linux-3.0.3/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10919 +++ linux-3.0.3/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10920 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10921 regs = args->regs;
10922
10923 #ifdef CONFIG_X86_32
10924 - if (!user_mode_vm(regs)) {
10925 + if (!user_mode(regs)) {
10926 crash_fixup_ss_esp(&fixed_regs, regs);
10927 regs = &fixed_regs;
10928 }
10929 diff -urNp linux-3.0.3/arch/x86/kernel/doublefault_32.c linux-3.0.3/arch/x86/kernel/doublefault_32.c
10930 --- linux-3.0.3/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10931 +++ linux-3.0.3/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10932 @@ -11,7 +11,7 @@
10933
10934 #define DOUBLEFAULT_STACKSIZE (1024)
10935 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10936 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10937 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10938
10939 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10940
10941 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10942 unsigned long gdt, tss;
10943
10944 store_gdt(&gdt_desc);
10945 - gdt = gdt_desc.address;
10946 + gdt = (unsigned long)gdt_desc.address;
10947
10948 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10949
10950 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10951 /* 0x2 bit is always set */
10952 .flags = X86_EFLAGS_SF | 0x2,
10953 .sp = STACK_START,
10954 - .es = __USER_DS,
10955 + .es = __KERNEL_DS,
10956 .cs = __KERNEL_CS,
10957 .ss = __KERNEL_DS,
10958 - .ds = __USER_DS,
10959 + .ds = __KERNEL_DS,
10960 .fs = __KERNEL_PERCPU,
10961
10962 .__cr3 = __pa_nodebug(swapper_pg_dir),
10963 diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack_32.c linux-3.0.3/arch/x86/kernel/dumpstack_32.c
10964 --- linux-3.0.3/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
10965 +++ linux-3.0.3/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
10966 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
10967 bp = stack_frame(task, regs);
10968
10969 for (;;) {
10970 - struct thread_info *context;
10971 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
10972
10973 - context = (struct thread_info *)
10974 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
10975 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
10976 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
10977
10978 - stack = (unsigned long *)context->previous_esp;
10979 - if (!stack)
10980 + if (stack_start == task_stack_page(task))
10981 break;
10982 + stack = *(unsigned long **)stack_start;
10983 if (ops->stack(data, "IRQ") < 0)
10984 break;
10985 touch_nmi_watchdog();
10986 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
10987 * When in-kernel, we also print out the stack and code at the
10988 * time of the fault..
10989 */
10990 - if (!user_mode_vm(regs)) {
10991 + if (!user_mode(regs)) {
10992 unsigned int code_prologue = code_bytes * 43 / 64;
10993 unsigned int code_len = code_bytes;
10994 unsigned char c;
10995 u8 *ip;
10996 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10997
10998 printk(KERN_EMERG "Stack:\n");
10999 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11000
11001 printk(KERN_EMERG "Code: ");
11002
11003 - ip = (u8 *)regs->ip - code_prologue;
11004 + ip = (u8 *)regs->ip - code_prologue + cs_base;
11005 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11006 /* try starting at IP */
11007 - ip = (u8 *)regs->ip;
11008 + ip = (u8 *)regs->ip + cs_base;
11009 code_len = code_len - code_prologue + 1;
11010 }
11011 for (i = 0; i < code_len; i++, ip++) {
11012 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11013 printk(" Bad EIP value.");
11014 break;
11015 }
11016 - if (ip == (u8 *)regs->ip)
11017 + if (ip == (u8 *)regs->ip + cs_base)
11018 printk("<%02x> ", c);
11019 else
11020 printk("%02x ", c);
11021 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11022 {
11023 unsigned short ud2;
11024
11025 + ip = ktla_ktva(ip);
11026 if (ip < PAGE_OFFSET)
11027 return 0;
11028 if (probe_kernel_address((unsigned short *)ip, ud2))
11029 diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack_64.c linux-3.0.3/arch/x86/kernel/dumpstack_64.c
11030 --- linux-3.0.3/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11031 +++ linux-3.0.3/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11032 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11033 unsigned long *irq_stack_end =
11034 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11035 unsigned used = 0;
11036 - struct thread_info *tinfo;
11037 int graph = 0;
11038 unsigned long dummy;
11039 + void *stack_start;
11040
11041 if (!task)
11042 task = current;
11043 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11044 * current stack address. If the stacks consist of nested
11045 * exceptions
11046 */
11047 - tinfo = task_thread_info(task);
11048 for (;;) {
11049 char *id;
11050 unsigned long *estack_end;
11051 +
11052 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11053 &used, &id);
11054
11055 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11056 if (ops->stack(data, id) < 0)
11057 break;
11058
11059 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11060 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11061 data, estack_end, &graph);
11062 ops->stack(data, "<EOE>");
11063 /*
11064 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11065 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11066 if (ops->stack(data, "IRQ") < 0)
11067 break;
11068 - bp = ops->walk_stack(tinfo, stack, bp,
11069 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11070 ops, data, irq_stack_end, &graph);
11071 /*
11072 * We link to the next stack (which would be
11073 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11074 /*
11075 * This handles the process stack:
11076 */
11077 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11078 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11079 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11080 put_cpu();
11081 }
11082 EXPORT_SYMBOL(dump_trace);
11083 diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack.c linux-3.0.3/arch/x86/kernel/dumpstack.c
11084 --- linux-3.0.3/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11085 +++ linux-3.0.3/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11086 @@ -2,6 +2,9 @@
11087 * Copyright (C) 1991, 1992 Linus Torvalds
11088 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11089 */
11090 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11091 +#define __INCLUDED_BY_HIDESYM 1
11092 +#endif
11093 #include <linux/kallsyms.h>
11094 #include <linux/kprobes.h>
11095 #include <linux/uaccess.h>
11096 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11097 static void
11098 print_ftrace_graph_addr(unsigned long addr, void *data,
11099 const struct stacktrace_ops *ops,
11100 - struct thread_info *tinfo, int *graph)
11101 + struct task_struct *task, int *graph)
11102 {
11103 - struct task_struct *task = tinfo->task;
11104 unsigned long ret_addr;
11105 int index = task->curr_ret_stack;
11106
11107 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11108 static inline void
11109 print_ftrace_graph_addr(unsigned long addr, void *data,
11110 const struct stacktrace_ops *ops,
11111 - struct thread_info *tinfo, int *graph)
11112 + struct task_struct *task, int *graph)
11113 { }
11114 #endif
11115
11116 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11117 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11118 */
11119
11120 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11121 - void *p, unsigned int size, void *end)
11122 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11123 {
11124 - void *t = tinfo;
11125 if (end) {
11126 if (p < end && p >= (end-THREAD_SIZE))
11127 return 1;
11128 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11129 }
11130
11131 unsigned long
11132 -print_context_stack(struct thread_info *tinfo,
11133 +print_context_stack(struct task_struct *task, void *stack_start,
11134 unsigned long *stack, unsigned long bp,
11135 const struct stacktrace_ops *ops, void *data,
11136 unsigned long *end, int *graph)
11137 {
11138 struct stack_frame *frame = (struct stack_frame *)bp;
11139
11140 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11141 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11142 unsigned long addr;
11143
11144 addr = *stack;
11145 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11146 } else {
11147 ops->address(data, addr, 0);
11148 }
11149 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11150 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11151 }
11152 stack++;
11153 }
11154 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11155 EXPORT_SYMBOL_GPL(print_context_stack);
11156
11157 unsigned long
11158 -print_context_stack_bp(struct thread_info *tinfo,
11159 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11160 unsigned long *stack, unsigned long bp,
11161 const struct stacktrace_ops *ops, void *data,
11162 unsigned long *end, int *graph)
11163 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11164 struct stack_frame *frame = (struct stack_frame *)bp;
11165 unsigned long *ret_addr = &frame->return_address;
11166
11167 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11168 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11169 unsigned long addr = *ret_addr;
11170
11171 if (!__kernel_text_address(addr))
11172 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11173 ops->address(data, addr, 1);
11174 frame = frame->next_frame;
11175 ret_addr = &frame->return_address;
11176 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11177 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11178 }
11179
11180 return (unsigned long)frame;
11181 @@ -186,7 +186,7 @@ void dump_stack(void)
11182
11183 bp = stack_frame(current, NULL);
11184 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11185 - current->pid, current->comm, print_tainted(),
11186 + task_pid_nr(current), current->comm, print_tainted(),
11187 init_utsname()->release,
11188 (int)strcspn(init_utsname()->version, " "),
11189 init_utsname()->version);
11190 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11191 }
11192 EXPORT_SYMBOL_GPL(oops_begin);
11193
11194 +extern void gr_handle_kernel_exploit(void);
11195 +
11196 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11197 {
11198 if (regs && kexec_should_crash(current))
11199 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11200 panic("Fatal exception in interrupt");
11201 if (panic_on_oops)
11202 panic("Fatal exception");
11203 - do_exit(signr);
11204 +
11205 + gr_handle_kernel_exploit();
11206 +
11207 + do_group_exit(signr);
11208 }
11209
11210 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11211 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11212
11213 show_registers(regs);
11214 #ifdef CONFIG_X86_32
11215 - if (user_mode_vm(regs)) {
11216 + if (user_mode(regs)) {
11217 sp = regs->sp;
11218 ss = regs->ss & 0xffff;
11219 } else {
11220 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11221 unsigned long flags = oops_begin();
11222 int sig = SIGSEGV;
11223
11224 - if (!user_mode_vm(regs))
11225 + if (!user_mode(regs))
11226 report_bug(regs->ip, regs);
11227
11228 if (__die(str, regs, err))
11229 diff -urNp linux-3.0.3/arch/x86/kernel/early_printk.c linux-3.0.3/arch/x86/kernel/early_printk.c
11230 --- linux-3.0.3/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11231 +++ linux-3.0.3/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11232 @@ -7,6 +7,7 @@
11233 #include <linux/pci_regs.h>
11234 #include <linux/pci_ids.h>
11235 #include <linux/errno.h>
11236 +#include <linux/sched.h>
11237 #include <asm/io.h>
11238 #include <asm/processor.h>
11239 #include <asm/fcntl.h>
11240 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11241 int n;
11242 va_list ap;
11243
11244 + pax_track_stack();
11245 +
11246 va_start(ap, fmt);
11247 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11248 early_console->write(early_console, buf, n);
11249 diff -urNp linux-3.0.3/arch/x86/kernel/entry_32.S linux-3.0.3/arch/x86/kernel/entry_32.S
11250 --- linux-3.0.3/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11251 +++ linux-3.0.3/arch/x86/kernel/entry_32.S 2011-08-23 21:48:14.000000000 -0400
11252 @@ -185,13 +185,146 @@
11253 /*CFI_REL_OFFSET gs, PT_GS*/
11254 .endm
11255 .macro SET_KERNEL_GS reg
11256 +
11257 +#ifdef CONFIG_CC_STACKPROTECTOR
11258 movl $(__KERNEL_STACK_CANARY), \reg
11259 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11260 + movl $(__USER_DS), \reg
11261 +#else
11262 + xorl \reg, \reg
11263 +#endif
11264 +
11265 movl \reg, %gs
11266 .endm
11267
11268 #endif /* CONFIG_X86_32_LAZY_GS */
11269
11270 -.macro SAVE_ALL
11271 +.macro pax_enter_kernel
11272 +#ifdef CONFIG_PAX_KERNEXEC
11273 + call pax_enter_kernel
11274 +#endif
11275 +.endm
11276 +
11277 +.macro pax_exit_kernel
11278 +#ifdef CONFIG_PAX_KERNEXEC
11279 + call pax_exit_kernel
11280 +#endif
11281 +.endm
11282 +
11283 +#ifdef CONFIG_PAX_KERNEXEC
11284 +ENTRY(pax_enter_kernel)
11285 +#ifdef CONFIG_PARAVIRT
11286 + pushl %eax
11287 + pushl %ecx
11288 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11289 + mov %eax, %esi
11290 +#else
11291 + mov %cr0, %esi
11292 +#endif
11293 + bts $16, %esi
11294 + jnc 1f
11295 + mov %cs, %esi
11296 + cmp $__KERNEL_CS, %esi
11297 + jz 3f
11298 + ljmp $__KERNEL_CS, $3f
11299 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11300 +2:
11301 +#ifdef CONFIG_PARAVIRT
11302 + mov %esi, %eax
11303 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11304 +#else
11305 + mov %esi, %cr0
11306 +#endif
11307 +3:
11308 +#ifdef CONFIG_PARAVIRT
11309 + popl %ecx
11310 + popl %eax
11311 +#endif
11312 + ret
11313 +ENDPROC(pax_enter_kernel)
11314 +
11315 +ENTRY(pax_exit_kernel)
11316 +#ifdef CONFIG_PARAVIRT
11317 + pushl %eax
11318 + pushl %ecx
11319 +#endif
11320 + mov %cs, %esi
11321 + cmp $__KERNEXEC_KERNEL_CS, %esi
11322 + jnz 2f
11323 +#ifdef CONFIG_PARAVIRT
11324 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11325 + mov %eax, %esi
11326 +#else
11327 + mov %cr0, %esi
11328 +#endif
11329 + btr $16, %esi
11330 + ljmp $__KERNEL_CS, $1f
11331 +1:
11332 +#ifdef CONFIG_PARAVIRT
11333 + mov %esi, %eax
11334 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11335 +#else
11336 + mov %esi, %cr0
11337 +#endif
11338 +2:
11339 +#ifdef CONFIG_PARAVIRT
11340 + popl %ecx
11341 + popl %eax
11342 +#endif
11343 + ret
11344 +ENDPROC(pax_exit_kernel)
11345 +#endif
11346 +
11347 +.macro pax_erase_kstack
11348 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11349 + call pax_erase_kstack
11350 +#endif
11351 +.endm
11352 +
11353 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11354 +/*
11355 + * ebp: thread_info
11356 + * ecx, edx: can be clobbered
11357 + */
11358 +ENTRY(pax_erase_kstack)
11359 + pushl %edi
11360 + pushl %eax
11361 +
11362 + mov TI_lowest_stack(%ebp), %edi
11363 + mov $-0xBEEF, %eax
11364 + std
11365 +
11366 +1: mov %edi, %ecx
11367 + and $THREAD_SIZE_asm - 1, %ecx
11368 + shr $2, %ecx
11369 + repne scasl
11370 + jecxz 2f
11371 +
11372 + cmp $2*16, %ecx
11373 + jc 2f
11374 +
11375 + mov $2*16, %ecx
11376 + repe scasl
11377 + jecxz 2f
11378 + jne 1b
11379 +
11380 +2: cld
11381 + mov %esp, %ecx
11382 + sub %edi, %ecx
11383 + shr $2, %ecx
11384 + rep stosl
11385 +
11386 + mov TI_task_thread_sp0(%ebp), %edi
11387 + sub $128, %edi
11388 + mov %edi, TI_lowest_stack(%ebp)
11389 +
11390 + popl %eax
11391 + popl %edi
11392 + ret
11393 +ENDPROC(pax_erase_kstack)
11394 +#endif
11395 +
11396 +.macro __SAVE_ALL _DS
11397 cld
11398 PUSH_GS
11399 pushl_cfi %fs
11400 @@ -214,7 +347,7 @@
11401 CFI_REL_OFFSET ecx, 0
11402 pushl_cfi %ebx
11403 CFI_REL_OFFSET ebx, 0
11404 - movl $(__USER_DS), %edx
11405 + movl $\_DS, %edx
11406 movl %edx, %ds
11407 movl %edx, %es
11408 movl $(__KERNEL_PERCPU), %edx
11409 @@ -222,6 +355,15 @@
11410 SET_KERNEL_GS %edx
11411 .endm
11412
11413 +.macro SAVE_ALL
11414 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11415 + __SAVE_ALL __KERNEL_DS
11416 + pax_enter_kernel
11417 +#else
11418 + __SAVE_ALL __USER_DS
11419 +#endif
11420 +.endm
11421 +
11422 .macro RESTORE_INT_REGS
11423 popl_cfi %ebx
11424 CFI_RESTORE ebx
11425 @@ -332,7 +474,15 @@ check_userspace:
11426 movb PT_CS(%esp), %al
11427 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11428 cmpl $USER_RPL, %eax
11429 +
11430 +#ifdef CONFIG_PAX_KERNEXEC
11431 + jae resume_userspace
11432 +
11433 + PAX_EXIT_KERNEL
11434 + jmp resume_kernel
11435 +#else
11436 jb resume_kernel # not returning to v8086 or userspace
11437 +#endif
11438
11439 ENTRY(resume_userspace)
11440 LOCKDEP_SYS_EXIT
11441 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11442 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11443 # int/exception return?
11444 jne work_pending
11445 - jmp restore_all
11446 + jmp restore_all_pax
11447 END(ret_from_exception)
11448
11449 #ifdef CONFIG_PREEMPT
11450 @@ -394,23 +544,34 @@ sysenter_past_esp:
11451 /*CFI_REL_OFFSET cs, 0*/
11452 /*
11453 * Push current_thread_info()->sysenter_return to the stack.
11454 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11455 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11456 */
11457 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11458 + pushl_cfi $0
11459 CFI_REL_OFFSET eip, 0
11460
11461 pushl_cfi %eax
11462 SAVE_ALL
11463 + GET_THREAD_INFO(%ebp)
11464 + movl TI_sysenter_return(%ebp),%ebp
11465 + movl %ebp,PT_EIP(%esp)
11466 ENABLE_INTERRUPTS(CLBR_NONE)
11467
11468 /*
11469 * Load the potential sixth argument from user stack.
11470 * Careful about security.
11471 */
11472 + movl PT_OLDESP(%esp),%ebp
11473 +
11474 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11475 + mov PT_OLDSS(%esp),%ds
11476 +1: movl %ds:(%ebp),%ebp
11477 + push %ss
11478 + pop %ds
11479 +#else
11480 cmpl $__PAGE_OFFSET-3,%ebp
11481 jae syscall_fault
11482 1: movl (%ebp),%ebp
11483 +#endif
11484 +
11485 movl %ebp,PT_EBP(%esp)
11486 .section __ex_table,"a"
11487 .align 4
11488 @@ -433,12 +594,23 @@ sysenter_do_call:
11489 testl $_TIF_ALLWORK_MASK, %ecx
11490 jne sysexit_audit
11491 sysenter_exit:
11492 +
11493 +#ifdef CONFIG_PAX_RANDKSTACK
11494 + pushl_cfi %eax
11495 + call pax_randomize_kstack
11496 + popl_cfi %eax
11497 +#endif
11498 +
11499 + pax_erase_kstack
11500 +
11501 /* if something modifies registers it must also disable sysexit */
11502 movl PT_EIP(%esp), %edx
11503 movl PT_OLDESP(%esp), %ecx
11504 xorl %ebp,%ebp
11505 TRACE_IRQS_ON
11506 1: mov PT_FS(%esp), %fs
11507 +2: mov PT_DS(%esp), %ds
11508 +3: mov PT_ES(%esp), %es
11509 PTGS_TO_GS
11510 ENABLE_INTERRUPTS_SYSEXIT
11511
11512 @@ -455,6 +627,9 @@ sysenter_audit:
11513 movl %eax,%edx /* 2nd arg: syscall number */
11514 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11515 call audit_syscall_entry
11516 +
11517 + pax_erase_kstack
11518 +
11519 pushl_cfi %ebx
11520 movl PT_EAX(%esp),%eax /* reload syscall number */
11521 jmp sysenter_do_call
11522 @@ -481,11 +656,17 @@ sysexit_audit:
11523
11524 CFI_ENDPROC
11525 .pushsection .fixup,"ax"
11526 -2: movl $0,PT_FS(%esp)
11527 +4: movl $0,PT_FS(%esp)
11528 + jmp 1b
11529 +5: movl $0,PT_DS(%esp)
11530 + jmp 1b
11531 +6: movl $0,PT_ES(%esp)
11532 jmp 1b
11533 .section __ex_table,"a"
11534 .align 4
11535 - .long 1b,2b
11536 + .long 1b,4b
11537 + .long 2b,5b
11538 + .long 3b,6b
11539 .popsection
11540 PTGS_TO_GS_EX
11541 ENDPROC(ia32_sysenter_target)
11542 @@ -518,6 +699,14 @@ syscall_exit:
11543 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11544 jne syscall_exit_work
11545
11546 +restore_all_pax:
11547 +
11548 +#ifdef CONFIG_PAX_RANDKSTACK
11549 + call pax_randomize_kstack
11550 +#endif
11551 +
11552 + pax_erase_kstack
11553 +
11554 restore_all:
11555 TRACE_IRQS_IRET
11556 restore_all_notrace:
11557 @@ -577,14 +766,34 @@ ldt_ss:
11558 * compensating for the offset by changing to the ESPFIX segment with
11559 * a base address that matches for the difference.
11560 */
11561 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11562 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11563 mov %esp, %edx /* load kernel esp */
11564 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11565 mov %dx, %ax /* eax: new kernel esp */
11566 sub %eax, %edx /* offset (low word is 0) */
11567 +#ifdef CONFIG_SMP
11568 + movl PER_CPU_VAR(cpu_number), %ebx
11569 + shll $PAGE_SHIFT_asm, %ebx
11570 + addl $cpu_gdt_table, %ebx
11571 +#else
11572 + movl $cpu_gdt_table, %ebx
11573 +#endif
11574 shr $16, %edx
11575 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11576 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11577 +
11578 +#ifdef CONFIG_PAX_KERNEXEC
11579 + mov %cr0, %esi
11580 + btr $16, %esi
11581 + mov %esi, %cr0
11582 +#endif
11583 +
11584 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11585 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11586 +
11587 +#ifdef CONFIG_PAX_KERNEXEC
11588 + bts $16, %esi
11589 + mov %esi, %cr0
11590 +#endif
11591 +
11592 pushl_cfi $__ESPFIX_SS
11593 pushl_cfi %eax /* new kernel esp */
11594 /* Disable interrupts, but do not irqtrace this section: we
11595 @@ -613,29 +822,23 @@ work_resched:
11596 movl TI_flags(%ebp), %ecx
11597 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11598 # than syscall tracing?
11599 - jz restore_all
11600 + jz restore_all_pax
11601 testb $_TIF_NEED_RESCHED, %cl
11602 jnz work_resched
11603
11604 work_notifysig: # deal with pending signals and
11605 # notify-resume requests
11606 + movl %esp, %eax
11607 #ifdef CONFIG_VM86
11608 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11609 - movl %esp, %eax
11610 - jne work_notifysig_v86 # returning to kernel-space or
11611 + jz 1f # returning to kernel-space or
11612 # vm86-space
11613 - xorl %edx, %edx
11614 - call do_notify_resume
11615 - jmp resume_userspace_sig
11616
11617 - ALIGN
11618 -work_notifysig_v86:
11619 pushl_cfi %ecx # save ti_flags for do_notify_resume
11620 call save_v86_state # %eax contains pt_regs pointer
11621 popl_cfi %ecx
11622 movl %eax, %esp
11623 -#else
11624 - movl %esp, %eax
11625 +1:
11626 #endif
11627 xorl %edx, %edx
11628 call do_notify_resume
11629 @@ -648,6 +851,9 @@ syscall_trace_entry:
11630 movl $-ENOSYS,PT_EAX(%esp)
11631 movl %esp, %eax
11632 call syscall_trace_enter
11633 +
11634 + pax_erase_kstack
11635 +
11636 /* What it returned is what we'll actually use. */
11637 cmpl $(nr_syscalls), %eax
11638 jnae syscall_call
11639 @@ -670,6 +876,10 @@ END(syscall_exit_work)
11640
11641 RING0_INT_FRAME # can't unwind into user space anyway
11642 syscall_fault:
11643 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11644 + push %ss
11645 + pop %ds
11646 +#endif
11647 GET_THREAD_INFO(%ebp)
11648 movl $-EFAULT,PT_EAX(%esp)
11649 jmp resume_userspace
11650 @@ -752,6 +962,36 @@ ptregs_clone:
11651 CFI_ENDPROC
11652 ENDPROC(ptregs_clone)
11653
11654 + ALIGN;
11655 +ENTRY(kernel_execve)
11656 + CFI_STARTPROC
11657 + pushl_cfi %ebp
11658 + sub $PT_OLDSS+4,%esp
11659 + pushl_cfi %edi
11660 + pushl_cfi %ecx
11661 + pushl_cfi %eax
11662 + lea 3*4(%esp),%edi
11663 + mov $PT_OLDSS/4+1,%ecx
11664 + xorl %eax,%eax
11665 + rep stosl
11666 + popl_cfi %eax
11667 + popl_cfi %ecx
11668 + popl_cfi %edi
11669 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11670 + pushl_cfi %esp
11671 + call sys_execve
11672 + add $4,%esp
11673 + CFI_ADJUST_CFA_OFFSET -4
11674 + GET_THREAD_INFO(%ebp)
11675 + test %eax,%eax
11676 + jz syscall_exit
11677 + add $PT_OLDSS+4,%esp
11678 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11679 + popl_cfi %ebp
11680 + ret
11681 + CFI_ENDPROC
11682 +ENDPROC(kernel_execve)
11683 +
11684 .macro FIXUP_ESPFIX_STACK
11685 /*
11686 * Switch back for ESPFIX stack to the normal zerobased stack
11687 @@ -761,8 +1001,15 @@ ENDPROC(ptregs_clone)
11688 * normal stack and adjusts ESP with the matching offset.
11689 */
11690 /* fixup the stack */
11691 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11692 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11693 +#ifdef CONFIG_SMP
11694 + movl PER_CPU_VAR(cpu_number), %ebx
11695 + shll $PAGE_SHIFT_asm, %ebx
11696 + addl $cpu_gdt_table, %ebx
11697 +#else
11698 + movl $cpu_gdt_table, %ebx
11699 +#endif
11700 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11701 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11702 shl $16, %eax
11703 addl %esp, %eax /* the adjusted stack pointer */
11704 pushl_cfi $__KERNEL_DS
11705 @@ -1213,7 +1460,6 @@ return_to_handler:
11706 jmp *%ecx
11707 #endif
11708
11709 -.section .rodata,"a"
11710 #include "syscall_table_32.S"
11711
11712 syscall_table_size=(.-sys_call_table)
11713 @@ -1259,9 +1505,12 @@ error_code:
11714 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11715 REG_TO_PTGS %ecx
11716 SET_KERNEL_GS %ecx
11717 - movl $(__USER_DS), %ecx
11718 + movl $(__KERNEL_DS), %ecx
11719 movl %ecx, %ds
11720 movl %ecx, %es
11721 +
11722 + pax_enter_kernel
11723 +
11724 TRACE_IRQS_OFF
11725 movl %esp,%eax # pt_regs pointer
11726 call *%edi
11727 @@ -1346,6 +1595,9 @@ nmi_stack_correct:
11728 xorl %edx,%edx # zero error code
11729 movl %esp,%eax # pt_regs pointer
11730 call do_nmi
11731 +
11732 + pax_exit_kernel
11733 +
11734 jmp restore_all_notrace
11735 CFI_ENDPROC
11736
11737 @@ -1382,6 +1634,9 @@ nmi_espfix_stack:
11738 FIXUP_ESPFIX_STACK # %eax == %esp
11739 xorl %edx,%edx # zero error code
11740 call do_nmi
11741 +
11742 + pax_exit_kernel
11743 +
11744 RESTORE_REGS
11745 lss 12+4(%esp), %esp # back to espfix stack
11746 CFI_ADJUST_CFA_OFFSET -24
11747 diff -urNp linux-3.0.3/arch/x86/kernel/entry_64.S linux-3.0.3/arch/x86/kernel/entry_64.S
11748 --- linux-3.0.3/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11749 +++ linux-3.0.3/arch/x86/kernel/entry_64.S 2011-08-26 19:49:56.000000000 -0400
11750 @@ -53,6 +53,7 @@
11751 #include <asm/paravirt.h>
11752 #include <asm/ftrace.h>
11753 #include <asm/percpu.h>
11754 +#include <asm/pgtable.h>
11755
11756 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11757 #include <linux/elf-em.h>
11758 @@ -176,6 +177,264 @@ ENTRY(native_usergs_sysret64)
11759 ENDPROC(native_usergs_sysret64)
11760 #endif /* CONFIG_PARAVIRT */
11761
11762 + .macro ljmpq sel, off
11763 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11764 + .byte 0x48; ljmp *1234f(%rip)
11765 + .pushsection .rodata
11766 + .align 16
11767 + 1234: .quad \off; .word \sel
11768 + .popsection
11769 +#else
11770 + pushq $\sel
11771 + pushq $\off
11772 + lretq
11773 +#endif
11774 + .endm
11775 +
11776 + .macro pax_enter_kernel
11777 +#ifdef CONFIG_PAX_KERNEXEC
11778 + call pax_enter_kernel
11779 +#endif
11780 + .endm
11781 +
11782 + .macro pax_exit_kernel
11783 +#ifdef CONFIG_PAX_KERNEXEC
11784 + call pax_exit_kernel
11785 +#endif
11786 + .endm
11787 +
11788 +#ifdef CONFIG_PAX_KERNEXEC
11789 +ENTRY(pax_enter_kernel)
11790 + pushq %rdi
11791 +
11792 +#ifdef CONFIG_PARAVIRT
11793 + PV_SAVE_REGS(CLBR_RDI)
11794 +#endif
11795 +
11796 + GET_CR0_INTO_RDI
11797 + bts $16,%rdi
11798 + jnc 1f
11799 + mov %cs,%edi
11800 + cmp $__KERNEL_CS,%edi
11801 + jz 3f
11802 + ljmpq __KERNEL_CS,3f
11803 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11804 +2: SET_RDI_INTO_CR0
11805 +3:
11806 +
11807 +#ifdef CONFIG_PARAVIRT
11808 + PV_RESTORE_REGS(CLBR_RDI)
11809 +#endif
11810 +
11811 + popq %rdi
11812 + retq
11813 +ENDPROC(pax_enter_kernel)
11814 +
11815 +ENTRY(pax_exit_kernel)
11816 + pushq %rdi
11817 +
11818 +#ifdef CONFIG_PARAVIRT
11819 + PV_SAVE_REGS(CLBR_RDI)
11820 +#endif
11821 +
11822 + mov %cs,%rdi
11823 + cmp $__KERNEXEC_KERNEL_CS,%edi
11824 + jnz 2f
11825 + GET_CR0_INTO_RDI
11826 + btr $16,%rdi
11827 + ljmpq __KERNEL_CS,1f
11828 +1: SET_RDI_INTO_CR0
11829 +2:
11830 +
11831 +#ifdef CONFIG_PARAVIRT
11832 + PV_RESTORE_REGS(CLBR_RDI);
11833 +#endif
11834 +
11835 + popq %rdi
11836 + retq
11837 +ENDPROC(pax_exit_kernel)
11838 +#endif
11839 +
11840 + .macro pax_enter_kernel_user
11841 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11842 + call pax_enter_kernel_user
11843 +#endif
11844 + .endm
11845 +
11846 + .macro pax_exit_kernel_user
11847 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11848 + call pax_exit_kernel_user
11849 +#endif
11850 +#ifdef CONFIG_PAX_RANDKSTACK
11851 + push %rax
11852 + call pax_randomize_kstack
11853 + pop %rax
11854 +#endif
11855 + .endm
11856 +
11857 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11858 +ENTRY(pax_enter_kernel_user)
11859 + pushq %rdi
11860 + pushq %rbx
11861 +
11862 +#ifdef CONFIG_PARAVIRT
11863 + PV_SAVE_REGS(CLBR_RDI)
11864 +#endif
11865 +
11866 + GET_CR3_INTO_RDI
11867 + mov %rdi,%rbx
11868 + add $__START_KERNEL_map,%rbx
11869 + sub phys_base(%rip),%rbx
11870 +
11871 +#ifdef CONFIG_PARAVIRT
11872 + pushq %rdi
11873 + cmpl $0, pv_info+PARAVIRT_enabled
11874 + jz 1f
11875 + i = 0
11876 + .rept USER_PGD_PTRS
11877 + mov i*8(%rbx),%rsi
11878 + mov $0,%sil
11879 + lea i*8(%rbx),%rdi
11880 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11881 + i = i + 1
11882 + .endr
11883 + jmp 2f
11884 +1:
11885 +#endif
11886 +
11887 + i = 0
11888 + .rept USER_PGD_PTRS
11889 + movb $0,i*8(%rbx)
11890 + i = i + 1
11891 + .endr
11892 +
11893 +#ifdef CONFIG_PARAVIRT
11894 +2: popq %rdi
11895 +#endif
11896 + SET_RDI_INTO_CR3
11897 +
11898 +#ifdef CONFIG_PAX_KERNEXEC
11899 + GET_CR0_INTO_RDI
11900 + bts $16,%rdi
11901 + SET_RDI_INTO_CR0
11902 +#endif
11903 +
11904 +#ifdef CONFIG_PARAVIRT
11905 + PV_RESTORE_REGS(CLBR_RDI)
11906 +#endif
11907 +
11908 + popq %rbx
11909 + popq %rdi
11910 + retq
11911 +ENDPROC(pax_enter_kernel_user)
11912 +
11913 +ENTRY(pax_exit_kernel_user)
11914 + push %rdi
11915 +
11916 +#ifdef CONFIG_PARAVIRT
11917 + pushq %rbx
11918 + PV_SAVE_REGS(CLBR_RDI)
11919 +#endif
11920 +
11921 +#ifdef CONFIG_PAX_KERNEXEC
11922 + GET_CR0_INTO_RDI
11923 + btr $16,%rdi
11924 + SET_RDI_INTO_CR0
11925 +#endif
11926 +
11927 + GET_CR3_INTO_RDI
11928 + add $__START_KERNEL_map,%rdi
11929 + sub phys_base(%rip),%rdi
11930 +
11931 +#ifdef CONFIG_PARAVIRT
11932 + cmpl $0, pv_info+PARAVIRT_enabled
11933 + jz 1f
11934 + mov %rdi,%rbx
11935 + i = 0
11936 + .rept USER_PGD_PTRS
11937 + mov i*8(%rbx),%rsi
11938 + mov $0x67,%sil
11939 + lea i*8(%rbx),%rdi
11940 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11941 + i = i + 1
11942 + .endr
11943 + jmp 2f
11944 +1:
11945 +#endif
11946 +
11947 + i = 0
11948 + .rept USER_PGD_PTRS
11949 + movb $0x67,i*8(%rdi)
11950 + i = i + 1
11951 + .endr
11952 +
11953 +#ifdef CONFIG_PARAVIRT
11954 +2: PV_RESTORE_REGS(CLBR_RDI)
11955 + popq %rbx
11956 +#endif
11957 +
11958 + popq %rdi
11959 + retq
11960 +ENDPROC(pax_exit_kernel_user)
11961 +#endif
11962 +
11963 + .macro pax_erase_kstack
11964 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11965 + call pax_erase_kstack
11966 +#endif
11967 + .endm
11968 +
11969 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11970 +/*
11971 + * r10: thread_info
11972 + * rcx, rdx: can be clobbered
11973 + */
11974 +ENTRY(pax_erase_kstack)
11975 + pushq %rdi
11976 + pushq %rax
11977 + pushq %r10
11978 +
11979 + GET_THREAD_INFO(%r10)
11980 + mov TI_lowest_stack(%r10), %rdi
11981 + mov $-0xBEEF, %rax
11982 + std
11983 +
11984 +1: mov %edi, %ecx
11985 + and $THREAD_SIZE_asm - 1, %ecx
11986 + shr $3, %ecx
11987 + repne scasq
11988 + jecxz 2f
11989 +
11990 + cmp $2*8, %ecx
11991 + jc 2f
11992 +
11993 + mov $2*8, %ecx
11994 + repe scasq
11995 + jecxz 2f
11996 + jne 1b
11997 +
11998 +2: cld
11999 + mov %esp, %ecx
12000 + sub %edi, %ecx
12001 +
12002 + cmp $THREAD_SIZE_asm, %rcx
12003 + jb 3f
12004 + ud2
12005 +3:
12006 +
12007 + shr $3, %ecx
12008 + rep stosq
12009 +
12010 + mov TI_task_thread_sp0(%r10), %rdi
12011 + sub $256, %rdi
12012 + mov %rdi, TI_lowest_stack(%r10)
12013 +
12014 + popq %r10
12015 + popq %rax
12016 + popq %rdi
12017 + ret
12018 +ENDPROC(pax_erase_kstack)
12019 +#endif
12020
12021 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12022 #ifdef CONFIG_TRACE_IRQFLAGS
12023 @@ -318,7 +577,7 @@ ENTRY(save_args)
12024 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12025 movq_cfi rbp, 8 /* push %rbp */
12026 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12027 - testl $3, CS(%rdi)
12028 + testb $3, CS(%rdi)
12029 je 1f
12030 SWAPGS
12031 /*
12032 @@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
12033
12034 RESTORE_REST
12035
12036 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12037 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12038 je int_ret_from_sys_call
12039
12040 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12041 @@ -455,7 +714,7 @@ END(ret_from_fork)
12042 ENTRY(system_call)
12043 CFI_STARTPROC simple
12044 CFI_SIGNAL_FRAME
12045 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12046 + CFI_DEF_CFA rsp,0
12047 CFI_REGISTER rip,rcx
12048 /*CFI_REGISTER rflags,r11*/
12049 SWAPGS_UNSAFE_STACK
12050 @@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
12051
12052 movq %rsp,PER_CPU_VAR(old_rsp)
12053 movq PER_CPU_VAR(kernel_stack),%rsp
12054 + pax_enter_kernel_user
12055 /*
12056 * No need to follow this irqs off/on section - it's straight
12057 * and short:
12058 */
12059 ENABLE_INTERRUPTS(CLBR_NONE)
12060 - SAVE_ARGS 8,1
12061 + SAVE_ARGS 8*6,1
12062 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12063 movq %rcx,RIP-ARGOFFSET(%rsp)
12064 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12065 @@ -502,6 +762,8 @@ sysret_check:
12066 andl %edi,%edx
12067 jnz sysret_careful
12068 CFI_REMEMBER_STATE
12069 + pax_exit_kernel_user
12070 + pax_erase_kstack
12071 /*
12072 * sysretq will re-enable interrupts:
12073 */
12074 @@ -560,6 +822,9 @@ auditsys:
12075 movq %rax,%rsi /* 2nd arg: syscall number */
12076 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12077 call audit_syscall_entry
12078 +
12079 + pax_erase_kstack
12080 +
12081 LOAD_ARGS 0 /* reload call-clobbered registers */
12082 jmp system_call_fastpath
12083
12084 @@ -590,6 +855,9 @@ tracesys:
12085 FIXUP_TOP_OF_STACK %rdi
12086 movq %rsp,%rdi
12087 call syscall_trace_enter
12088 +
12089 + pax_erase_kstack
12090 +
12091 /*
12092 * Reload arg registers from stack in case ptrace changed them.
12093 * We don't reload %rax because syscall_trace_enter() returned
12094 @@ -611,7 +879,7 @@ tracesys:
12095 GLOBAL(int_ret_from_sys_call)
12096 DISABLE_INTERRUPTS(CLBR_NONE)
12097 TRACE_IRQS_OFF
12098 - testl $3,CS-ARGOFFSET(%rsp)
12099 + testb $3,CS-ARGOFFSET(%rsp)
12100 je retint_restore_args
12101 movl $_TIF_ALLWORK_MASK,%edi
12102 /* edi: mask to check */
12103 @@ -793,6 +1061,16 @@ END(interrupt)
12104 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12105 call save_args
12106 PARTIAL_FRAME 0
12107 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12108 + testb $3, CS(%rdi)
12109 + jnz 1f
12110 + pax_enter_kernel
12111 + jmp 2f
12112 +1: pax_enter_kernel_user
12113 +2:
12114 +#else
12115 + pax_enter_kernel
12116 +#endif
12117 call \func
12118 .endm
12119
12120 @@ -825,7 +1103,7 @@ ret_from_intr:
12121 CFI_ADJUST_CFA_OFFSET -8
12122 exit_intr:
12123 GET_THREAD_INFO(%rcx)
12124 - testl $3,CS-ARGOFFSET(%rsp)
12125 + testb $3,CS-ARGOFFSET(%rsp)
12126 je retint_kernel
12127
12128 /* Interrupt came from user space */
12129 @@ -847,12 +1125,15 @@ retint_swapgs: /* return to user-space
12130 * The iretq could re-enable interrupts:
12131 */
12132 DISABLE_INTERRUPTS(CLBR_ANY)
12133 + pax_exit_kernel_user
12134 + pax_erase_kstack
12135 TRACE_IRQS_IRETQ
12136 SWAPGS
12137 jmp restore_args
12138
12139 retint_restore_args: /* return to kernel space */
12140 DISABLE_INTERRUPTS(CLBR_ANY)
12141 + pax_exit_kernel
12142 /*
12143 * The iretq could re-enable interrupts:
12144 */
12145 @@ -1027,6 +1308,16 @@ ENTRY(\sym)
12146 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12147 call error_entry
12148 DEFAULT_FRAME 0
12149 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12150 + testb $3, CS(%rsp)
12151 + jnz 1f
12152 + pax_enter_kernel
12153 + jmp 2f
12154 +1: pax_enter_kernel_user
12155 +2:
12156 +#else
12157 + pax_enter_kernel
12158 +#endif
12159 movq %rsp,%rdi /* pt_regs pointer */
12160 xorl %esi,%esi /* no error code */
12161 call \do_sym
12162 @@ -1044,6 +1335,16 @@ ENTRY(\sym)
12163 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12164 call save_paranoid
12165 TRACE_IRQS_OFF
12166 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12167 + testb $3, CS(%rsp)
12168 + jnz 1f
12169 + pax_enter_kernel
12170 + jmp 2f
12171 +1: pax_enter_kernel_user
12172 +2:
12173 +#else
12174 + pax_enter_kernel
12175 +#endif
12176 movq %rsp,%rdi /* pt_regs pointer */
12177 xorl %esi,%esi /* no error code */
12178 call \do_sym
12179 @@ -1052,7 +1353,7 @@ ENTRY(\sym)
12180 END(\sym)
12181 .endm
12182
12183 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12184 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12185 .macro paranoidzeroentry_ist sym do_sym ist
12186 ENTRY(\sym)
12187 INTR_FRAME
12188 @@ -1062,8 +1363,24 @@ ENTRY(\sym)
12189 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12190 call save_paranoid
12191 TRACE_IRQS_OFF
12192 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12193 + testb $3, CS(%rsp)
12194 + jnz 1f
12195 + pax_enter_kernel
12196 + jmp 2f
12197 +1: pax_enter_kernel_user
12198 +2:
12199 +#else
12200 + pax_enter_kernel
12201 +#endif
12202 movq %rsp,%rdi /* pt_regs pointer */
12203 xorl %esi,%esi /* no error code */
12204 +#ifdef CONFIG_SMP
12205 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12206 + lea init_tss(%r12), %r12
12207 +#else
12208 + lea init_tss(%rip), %r12
12209 +#endif
12210 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12211 call \do_sym
12212 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12213 @@ -1080,6 +1397,16 @@ ENTRY(\sym)
12214 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12215 call error_entry
12216 DEFAULT_FRAME 0
12217 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12218 + testb $3, CS(%rsp)
12219 + jnz 1f
12220 + pax_enter_kernel
12221 + jmp 2f
12222 +1: pax_enter_kernel_user
12223 +2:
12224 +#else
12225 + pax_enter_kernel
12226 +#endif
12227 movq %rsp,%rdi /* pt_regs pointer */
12228 movq ORIG_RAX(%rsp),%rsi /* get error code */
12229 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12230 @@ -1099,6 +1426,16 @@ ENTRY(\sym)
12231 call save_paranoid
12232 DEFAULT_FRAME 0
12233 TRACE_IRQS_OFF
12234 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12235 + testb $3, CS(%rsp)
12236 + jnz 1f
12237 + pax_enter_kernel
12238 + jmp 2f
12239 +1: pax_enter_kernel_user
12240 +2:
12241 +#else
12242 + pax_enter_kernel
12243 +#endif
12244 movq %rsp,%rdi /* pt_regs pointer */
12245 movq ORIG_RAX(%rsp),%rsi /* get error code */
12246 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12247 @@ -1361,14 +1698,27 @@ ENTRY(paranoid_exit)
12248 TRACE_IRQS_OFF
12249 testl %ebx,%ebx /* swapgs needed? */
12250 jnz paranoid_restore
12251 - testl $3,CS(%rsp)
12252 + testb $3,CS(%rsp)
12253 jnz paranoid_userspace
12254 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12255 + pax_exit_kernel
12256 + TRACE_IRQS_IRETQ 0
12257 + SWAPGS_UNSAFE_STACK
12258 + RESTORE_ALL 8
12259 + jmp irq_return
12260 +#endif
12261 paranoid_swapgs:
12262 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12263 + pax_exit_kernel_user
12264 +#else
12265 + pax_exit_kernel
12266 +#endif
12267 TRACE_IRQS_IRETQ 0
12268 SWAPGS_UNSAFE_STACK
12269 RESTORE_ALL 8
12270 jmp irq_return
12271 paranoid_restore:
12272 + pax_exit_kernel
12273 TRACE_IRQS_IRETQ 0
12274 RESTORE_ALL 8
12275 jmp irq_return
12276 @@ -1426,7 +1776,7 @@ ENTRY(error_entry)
12277 movq_cfi r14, R14+8
12278 movq_cfi r15, R15+8
12279 xorl %ebx,%ebx
12280 - testl $3,CS+8(%rsp)
12281 + testb $3,CS+8(%rsp)
12282 je error_kernelspace
12283 error_swapgs:
12284 SWAPGS
12285 @@ -1490,6 +1840,16 @@ ENTRY(nmi)
12286 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12287 call save_paranoid
12288 DEFAULT_FRAME 0
12289 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12290 + testb $3, CS(%rsp)
12291 + jnz 1f
12292 + pax_enter_kernel
12293 + jmp 2f
12294 +1: pax_enter_kernel_user
12295 +2:
12296 +#else
12297 + pax_enter_kernel
12298 +#endif
12299 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12300 movq %rsp,%rdi
12301 movq $-1,%rsi
12302 @@ -1500,11 +1860,25 @@ ENTRY(nmi)
12303 DISABLE_INTERRUPTS(CLBR_NONE)
12304 testl %ebx,%ebx /* swapgs needed? */
12305 jnz nmi_restore
12306 - testl $3,CS(%rsp)
12307 + testb $3,CS(%rsp)
12308 jnz nmi_userspace
12309 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12310 + pax_exit_kernel
12311 + SWAPGS_UNSAFE_STACK
12312 + RESTORE_ALL 8
12313 + jmp irq_return
12314 +#endif
12315 nmi_swapgs:
12316 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12317 + pax_exit_kernel_user
12318 +#else
12319 + pax_exit_kernel
12320 +#endif
12321 SWAPGS_UNSAFE_STACK
12322 + RESTORE_ALL 8
12323 + jmp irq_return
12324 nmi_restore:
12325 + pax_exit_kernel
12326 RESTORE_ALL 8
12327 jmp irq_return
12328 nmi_userspace:
12329 diff -urNp linux-3.0.3/arch/x86/kernel/ftrace.c linux-3.0.3/arch/x86/kernel/ftrace.c
12330 --- linux-3.0.3/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12331 +++ linux-3.0.3/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12332 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12333 static const void *mod_code_newcode; /* holds the text to write to the IP */
12334
12335 static unsigned nmi_wait_count;
12336 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12337 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12338
12339 int ftrace_arch_read_dyn_info(char *buf, int size)
12340 {
12341 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12342
12343 r = snprintf(buf, size, "%u %u",
12344 nmi_wait_count,
12345 - atomic_read(&nmi_update_count));
12346 + atomic_read_unchecked(&nmi_update_count));
12347 return r;
12348 }
12349
12350 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12351
12352 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12353 smp_rmb();
12354 + pax_open_kernel();
12355 ftrace_mod_code();
12356 - atomic_inc(&nmi_update_count);
12357 + pax_close_kernel();
12358 + atomic_inc_unchecked(&nmi_update_count);
12359 }
12360 /* Must have previous changes seen before executions */
12361 smp_mb();
12362 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12363 {
12364 unsigned char replaced[MCOUNT_INSN_SIZE];
12365
12366 + ip = ktla_ktva(ip);
12367 +
12368 /*
12369 * Note: Due to modules and __init, code can
12370 * disappear and change, we need to protect against faulting
12371 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12372 unsigned char old[MCOUNT_INSN_SIZE], *new;
12373 int ret;
12374
12375 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12376 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12377 new = ftrace_call_replace(ip, (unsigned long)func);
12378 ret = ftrace_modify_code(ip, old, new);
12379
12380 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12381 {
12382 unsigned char code[MCOUNT_INSN_SIZE];
12383
12384 + ip = ktla_ktva(ip);
12385 +
12386 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12387 return -EFAULT;
12388
12389 diff -urNp linux-3.0.3/arch/x86/kernel/head32.c linux-3.0.3/arch/x86/kernel/head32.c
12390 --- linux-3.0.3/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12391 +++ linux-3.0.3/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12392 @@ -19,6 +19,7 @@
12393 #include <asm/io_apic.h>
12394 #include <asm/bios_ebda.h>
12395 #include <asm/tlbflush.h>
12396 +#include <asm/boot.h>
12397
12398 static void __init i386_default_early_setup(void)
12399 {
12400 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12401 {
12402 memblock_init();
12403
12404 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12405 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12406
12407 #ifdef CONFIG_BLK_DEV_INITRD
12408 /* Reserve INITRD */
12409 diff -urNp linux-3.0.3/arch/x86/kernel/head_32.S linux-3.0.3/arch/x86/kernel/head_32.S
12410 --- linux-3.0.3/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12411 +++ linux-3.0.3/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12412 @@ -25,6 +25,12 @@
12413 /* Physical address */
12414 #define pa(X) ((X) - __PAGE_OFFSET)
12415
12416 +#ifdef CONFIG_PAX_KERNEXEC
12417 +#define ta(X) (X)
12418 +#else
12419 +#define ta(X) ((X) - __PAGE_OFFSET)
12420 +#endif
12421 +
12422 /*
12423 * References to members of the new_cpu_data structure.
12424 */
12425 @@ -54,11 +60,7 @@
12426 * and small than max_low_pfn, otherwise will waste some page table entries
12427 */
12428
12429 -#if PTRS_PER_PMD > 1
12430 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12431 -#else
12432 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12433 -#endif
12434 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12435
12436 /* Number of possible pages in the lowmem region */
12437 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12438 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12439 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12440
12441 /*
12442 + * Real beginning of normal "text" segment
12443 + */
12444 +ENTRY(stext)
12445 +ENTRY(_stext)
12446 +
12447 +/*
12448 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12449 * %esi points to the real-mode code as a 32-bit pointer.
12450 * CS and DS must be 4 GB flat segments, but we don't depend on
12451 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12452 * can.
12453 */
12454 __HEAD
12455 +
12456 +#ifdef CONFIG_PAX_KERNEXEC
12457 + jmp startup_32
12458 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12459 +.fill PAGE_SIZE-5,1,0xcc
12460 +#endif
12461 +
12462 ENTRY(startup_32)
12463 movl pa(stack_start),%ecx
12464
12465 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12466 2:
12467 leal -__PAGE_OFFSET(%ecx),%esp
12468
12469 +#ifdef CONFIG_SMP
12470 + movl $pa(cpu_gdt_table),%edi
12471 + movl $__per_cpu_load,%eax
12472 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12473 + rorl $16,%eax
12474 + movb %al,__KERNEL_PERCPU + 4(%edi)
12475 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12476 + movl $__per_cpu_end - 1,%eax
12477 + subl $__per_cpu_start,%eax
12478 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12479 +#endif
12480 +
12481 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12482 + movl $NR_CPUS,%ecx
12483 + movl $pa(cpu_gdt_table),%edi
12484 +1:
12485 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12486 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12487 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12488 + addl $PAGE_SIZE_asm,%edi
12489 + loop 1b
12490 +#endif
12491 +
12492 +#ifdef CONFIG_PAX_KERNEXEC
12493 + movl $pa(boot_gdt),%edi
12494 + movl $__LOAD_PHYSICAL_ADDR,%eax
12495 + movw %ax,__BOOT_CS + 2(%edi)
12496 + rorl $16,%eax
12497 + movb %al,__BOOT_CS + 4(%edi)
12498 + movb %ah,__BOOT_CS + 7(%edi)
12499 + rorl $16,%eax
12500 +
12501 + ljmp $(__BOOT_CS),$1f
12502 +1:
12503 +
12504 + movl $NR_CPUS,%ecx
12505 + movl $pa(cpu_gdt_table),%edi
12506 + addl $__PAGE_OFFSET,%eax
12507 +1:
12508 + movw %ax,__KERNEL_CS + 2(%edi)
12509 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12510 + rorl $16,%eax
12511 + movb %al,__KERNEL_CS + 4(%edi)
12512 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12513 + movb %ah,__KERNEL_CS + 7(%edi)
12514 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12515 + rorl $16,%eax
12516 + addl $PAGE_SIZE_asm,%edi
12517 + loop 1b
12518 +#endif
12519 +
12520 /*
12521 * Clear BSS first so that there are no surprises...
12522 */
12523 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12524 movl %eax, pa(max_pfn_mapped)
12525
12526 /* Do early initialization of the fixmap area */
12527 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12528 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12529 +#ifdef CONFIG_COMPAT_VDSO
12530 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12531 +#else
12532 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12533 +#endif
12534 #else /* Not PAE */
12535
12536 page_pde_offset = (__PAGE_OFFSET >> 20);
12537 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12538 movl %eax, pa(max_pfn_mapped)
12539
12540 /* Do early initialization of the fixmap area */
12541 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12542 - movl %eax,pa(initial_page_table+0xffc)
12543 +#ifdef CONFIG_COMPAT_VDSO
12544 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12545 +#else
12546 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12547 +#endif
12548 #endif
12549
12550 #ifdef CONFIG_PARAVIRT
12551 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12552 cmpl $num_subarch_entries, %eax
12553 jae bad_subarch
12554
12555 - movl pa(subarch_entries)(,%eax,4), %eax
12556 - subl $__PAGE_OFFSET, %eax
12557 - jmp *%eax
12558 + jmp *pa(subarch_entries)(,%eax,4)
12559
12560 bad_subarch:
12561 WEAK(lguest_entry)
12562 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12563 __INITDATA
12564
12565 subarch_entries:
12566 - .long default_entry /* normal x86/PC */
12567 - .long lguest_entry /* lguest hypervisor */
12568 - .long xen_entry /* Xen hypervisor */
12569 - .long default_entry /* Moorestown MID */
12570 + .long ta(default_entry) /* normal x86/PC */
12571 + .long ta(lguest_entry) /* lguest hypervisor */
12572 + .long ta(xen_entry) /* Xen hypervisor */
12573 + .long ta(default_entry) /* Moorestown MID */
12574 num_subarch_entries = (. - subarch_entries) / 4
12575 .previous
12576 #else
12577 @@ -312,6 +382,7 @@ default_entry:
12578 orl %edx,%eax
12579 movl %eax,%cr4
12580
12581 +#ifdef CONFIG_X86_PAE
12582 testb $X86_CR4_PAE, %al # check if PAE is enabled
12583 jz 6f
12584
12585 @@ -340,6 +411,9 @@ default_entry:
12586 /* Make changes effective */
12587 wrmsr
12588
12589 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12590 +#endif
12591 +
12592 6:
12593
12594 /*
12595 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12596 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12597 movl %eax,%ss # after changing gdt.
12598
12599 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12600 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12601 movl %eax,%ds
12602 movl %eax,%es
12603
12604 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12605 */
12606 cmpb $0,ready
12607 jne 1f
12608 - movl $gdt_page,%eax
12609 + movl $cpu_gdt_table,%eax
12610 movl $stack_canary,%ecx
12611 +#ifdef CONFIG_SMP
12612 + addl $__per_cpu_load,%ecx
12613 +#endif
12614 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12615 shrl $16, %ecx
12616 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12617 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12618 1:
12619 -#endif
12620 movl $(__KERNEL_STACK_CANARY),%eax
12621 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12622 + movl $(__USER_DS),%eax
12623 +#else
12624 + xorl %eax,%eax
12625 +#endif
12626 movl %eax,%gs
12627
12628 xorl %eax,%eax # Clear LDT
12629 @@ -558,22 +639,22 @@ early_page_fault:
12630 jmp early_fault
12631
12632 early_fault:
12633 - cld
12634 #ifdef CONFIG_PRINTK
12635 + cmpl $1,%ss:early_recursion_flag
12636 + je hlt_loop
12637 + incl %ss:early_recursion_flag
12638 + cld
12639 pusha
12640 movl $(__KERNEL_DS),%eax
12641 movl %eax,%ds
12642 movl %eax,%es
12643 - cmpl $2,early_recursion_flag
12644 - je hlt_loop
12645 - incl early_recursion_flag
12646 movl %cr2,%eax
12647 pushl %eax
12648 pushl %edx /* trapno */
12649 pushl $fault_msg
12650 call printk
12651 +; call dump_stack
12652 #endif
12653 - call dump_stack
12654 hlt_loop:
12655 hlt
12656 jmp hlt_loop
12657 @@ -581,8 +662,11 @@ hlt_loop:
12658 /* This is the default interrupt "handler" :-) */
12659 ALIGN
12660 ignore_int:
12661 - cld
12662 #ifdef CONFIG_PRINTK
12663 + cmpl $2,%ss:early_recursion_flag
12664 + je hlt_loop
12665 + incl %ss:early_recursion_flag
12666 + cld
12667 pushl %eax
12668 pushl %ecx
12669 pushl %edx
12670 @@ -591,9 +675,6 @@ ignore_int:
12671 movl $(__KERNEL_DS),%eax
12672 movl %eax,%ds
12673 movl %eax,%es
12674 - cmpl $2,early_recursion_flag
12675 - je hlt_loop
12676 - incl early_recursion_flag
12677 pushl 16(%esp)
12678 pushl 24(%esp)
12679 pushl 32(%esp)
12680 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12681 /*
12682 * BSS section
12683 */
12684 -__PAGE_ALIGNED_BSS
12685 - .align PAGE_SIZE
12686 #ifdef CONFIG_X86_PAE
12687 +.section .initial_pg_pmd,"a",@progbits
12688 initial_pg_pmd:
12689 .fill 1024*KPMDS,4,0
12690 #else
12691 +.section .initial_page_table,"a",@progbits
12692 ENTRY(initial_page_table)
12693 .fill 1024,4,0
12694 #endif
12695 +.section .initial_pg_fixmap,"a",@progbits
12696 initial_pg_fixmap:
12697 .fill 1024,4,0
12698 +.section .empty_zero_page,"a",@progbits
12699 ENTRY(empty_zero_page)
12700 .fill 4096,1,0
12701 +.section .swapper_pg_dir,"a",@progbits
12702 ENTRY(swapper_pg_dir)
12703 +#ifdef CONFIG_X86_PAE
12704 + .fill 4,8,0
12705 +#else
12706 .fill 1024,4,0
12707 +#endif
12708 +
12709 +/*
12710 + * The IDT has to be page-aligned to simplify the Pentium
12711 + * F0 0F bug workaround.. We have a special link segment
12712 + * for this.
12713 + */
12714 +.section .idt,"a",@progbits
12715 +ENTRY(idt_table)
12716 + .fill 256,8,0
12717
12718 /*
12719 * This starts the data section.
12720 */
12721 #ifdef CONFIG_X86_PAE
12722 -__PAGE_ALIGNED_DATA
12723 - /* Page-aligned for the benefit of paravirt? */
12724 - .align PAGE_SIZE
12725 +.section .initial_page_table,"a",@progbits
12726 ENTRY(initial_page_table)
12727 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12728 # if KPMDS == 3
12729 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12730 # error "Kernel PMDs should be 1, 2 or 3"
12731 # endif
12732 .align PAGE_SIZE /* needs to be page-sized too */
12733 +
12734 +#ifdef CONFIG_PAX_PER_CPU_PGD
12735 +ENTRY(cpu_pgd)
12736 + .rept NR_CPUS
12737 + .fill 4,8,0
12738 + .endr
12739 +#endif
12740 +
12741 #endif
12742
12743 .data
12744 .balign 4
12745 ENTRY(stack_start)
12746 - .long init_thread_union+THREAD_SIZE
12747 + .long init_thread_union+THREAD_SIZE-8
12748 +
12749 +ready: .byte 0
12750
12751 +.section .rodata,"a",@progbits
12752 early_recursion_flag:
12753 .long 0
12754
12755 -ready: .byte 0
12756 -
12757 int_msg:
12758 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12759
12760 @@ -707,7 +811,7 @@ fault_msg:
12761 .word 0 # 32 bit align gdt_desc.address
12762 boot_gdt_descr:
12763 .word __BOOT_DS+7
12764 - .long boot_gdt - __PAGE_OFFSET
12765 + .long pa(boot_gdt)
12766
12767 .word 0 # 32-bit align idt_desc.address
12768 idt_descr:
12769 @@ -718,7 +822,7 @@ idt_descr:
12770 .word 0 # 32 bit align gdt_desc.address
12771 ENTRY(early_gdt_descr)
12772 .word GDT_ENTRIES*8-1
12773 - .long gdt_page /* Overwritten for secondary CPUs */
12774 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12775
12776 /*
12777 * The boot_gdt must mirror the equivalent in setup.S and is
12778 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12779 .align L1_CACHE_BYTES
12780 ENTRY(boot_gdt)
12781 .fill GDT_ENTRY_BOOT_CS,8,0
12782 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12783 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12784 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12785 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12786 +
12787 + .align PAGE_SIZE_asm
12788 +ENTRY(cpu_gdt_table)
12789 + .rept NR_CPUS
12790 + .quad 0x0000000000000000 /* NULL descriptor */
12791 + .quad 0x0000000000000000 /* 0x0b reserved */
12792 + .quad 0x0000000000000000 /* 0x13 reserved */
12793 + .quad 0x0000000000000000 /* 0x1b reserved */
12794 +
12795 +#ifdef CONFIG_PAX_KERNEXEC
12796 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12797 +#else
12798 + .quad 0x0000000000000000 /* 0x20 unused */
12799 +#endif
12800 +
12801 + .quad 0x0000000000000000 /* 0x28 unused */
12802 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12803 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12804 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12805 + .quad 0x0000000000000000 /* 0x4b reserved */
12806 + .quad 0x0000000000000000 /* 0x53 reserved */
12807 + .quad 0x0000000000000000 /* 0x5b reserved */
12808 +
12809 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12810 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12811 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12812 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12813 +
12814 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12815 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12816 +
12817 + /*
12818 + * Segments used for calling PnP BIOS have byte granularity.
12819 + * The code segments and data segments have fixed 64k limits,
12820 + * the transfer segment sizes are set at run time.
12821 + */
12822 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12823 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12824 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12825 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12826 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12827 +
12828 + /*
12829 + * The APM segments have byte granularity and their bases
12830 + * are set at run time. All have 64k limits.
12831 + */
12832 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12833 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12834 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12835 +
12836 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12837 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12838 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12839 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12840 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12841 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12842 +
12843 + /* Be sure this is zeroed to avoid false validations in Xen */
12844 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12845 + .endr
12846 diff -urNp linux-3.0.3/arch/x86/kernel/head_64.S linux-3.0.3/arch/x86/kernel/head_64.S
12847 --- linux-3.0.3/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12848 +++ linux-3.0.3/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12849 @@ -19,6 +19,7 @@
12850 #include <asm/cache.h>
12851 #include <asm/processor-flags.h>
12852 #include <asm/percpu.h>
12853 +#include <asm/cpufeature.h>
12854
12855 #ifdef CONFIG_PARAVIRT
12856 #include <asm/asm-offsets.h>
12857 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12858 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12859 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12860 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12861 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12862 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12863 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12864 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12865
12866 .text
12867 __HEAD
12868 @@ -85,35 +90,22 @@ startup_64:
12869 */
12870 addq %rbp, init_level4_pgt + 0(%rip)
12871 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12872 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12873 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12874 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12875
12876 addq %rbp, level3_ident_pgt + 0(%rip)
12877 +#ifndef CONFIG_XEN
12878 + addq %rbp, level3_ident_pgt + 8(%rip)
12879 +#endif
12880
12881 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12882 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12883 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12884
12885 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12886 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12887 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12888
12889 - /* Add an Identity mapping if I am above 1G */
12890 - leaq _text(%rip), %rdi
12891 - andq $PMD_PAGE_MASK, %rdi
12892 -
12893 - movq %rdi, %rax
12894 - shrq $PUD_SHIFT, %rax
12895 - andq $(PTRS_PER_PUD - 1), %rax
12896 - jz ident_complete
12897 -
12898 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12899 - leaq level3_ident_pgt(%rip), %rbx
12900 - movq %rdx, 0(%rbx, %rax, 8)
12901 -
12902 - movq %rdi, %rax
12903 - shrq $PMD_SHIFT, %rax
12904 - andq $(PTRS_PER_PMD - 1), %rax
12905 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12906 - leaq level2_spare_pgt(%rip), %rbx
12907 - movq %rdx, 0(%rbx, %rax, 8)
12908 -ident_complete:
12909 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12910 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12911
12912 /*
12913 * Fixup the kernel text+data virtual addresses. Note that
12914 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12915 * after the boot processor executes this code.
12916 */
12917
12918 - /* Enable PAE mode and PGE */
12919 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12920 + /* Enable PAE mode and PSE/PGE */
12921 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12922 movq %rax, %cr4
12923
12924 /* Setup early boot stage 4 level pagetables. */
12925 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12926 movl $MSR_EFER, %ecx
12927 rdmsr
12928 btsl $_EFER_SCE, %eax /* Enable System Call */
12929 - btl $20,%edi /* No Execute supported? */
12930 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12931 jnc 1f
12932 btsl $_EFER_NX, %eax
12933 + leaq init_level4_pgt(%rip), %rdi
12934 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12935 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12936 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12937 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12938 1: wrmsr /* Make changes effective */
12939
12940 /* Setup cr0 */
12941 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12942 bad_address:
12943 jmp bad_address
12944
12945 - .section ".init.text","ax"
12946 + __INIT
12947 #ifdef CONFIG_EARLY_PRINTK
12948 .globl early_idt_handlers
12949 early_idt_handlers:
12950 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12951 #endif /* EARLY_PRINTK */
12952 1: hlt
12953 jmp 1b
12954 + .previous
12955
12956 #ifdef CONFIG_EARLY_PRINTK
12957 + __INITDATA
12958 early_recursion_flag:
12959 .long 0
12960 + .previous
12961
12962 + .section .rodata,"a",@progbits
12963 early_idt_msg:
12964 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12965 early_idt_ripmsg:
12966 .asciz "RIP %s\n"
12967 -#endif /* CONFIG_EARLY_PRINTK */
12968 .previous
12969 +#endif /* CONFIG_EARLY_PRINTK */
12970
12971 + .section .rodata,"a",@progbits
12972 #define NEXT_PAGE(name) \
12973 .balign PAGE_SIZE; \
12974 ENTRY(name)
12975 @@ -338,7 +340,6 @@ ENTRY(name)
12976 i = i + 1 ; \
12977 .endr
12978
12979 - .data
12980 /*
12981 * This default setting generates an ident mapping at address 0x100000
12982 * and a mapping for the kernel that precisely maps virtual address
12983 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12984 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12985 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12986 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12987 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
12988 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12989 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12990 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12991 .org init_level4_pgt + L4_START_KERNEL*8, 0
12992 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12993 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12994
12995 +#ifdef CONFIG_PAX_PER_CPU_PGD
12996 +NEXT_PAGE(cpu_pgd)
12997 + .rept NR_CPUS
12998 + .fill 512,8,0
12999 + .endr
13000 +#endif
13001 +
13002 NEXT_PAGE(level3_ident_pgt)
13003 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13004 +#ifdef CONFIG_XEN
13005 .fill 511,8,0
13006 +#else
13007 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13008 + .fill 510,8,0
13009 +#endif
13010 +
13011 +NEXT_PAGE(level3_vmalloc_pgt)
13012 + .fill 512,8,0
13013 +
13014 +NEXT_PAGE(level3_vmemmap_pgt)
13015 + .fill L3_VMEMMAP_START,8,0
13016 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13017
13018 NEXT_PAGE(level3_kernel_pgt)
13019 .fill L3_START_KERNEL,8,0
13020 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13021 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13022 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13023
13024 +NEXT_PAGE(level2_vmemmap_pgt)
13025 + .fill 512,8,0
13026 +
13027 NEXT_PAGE(level2_fixmap_pgt)
13028 - .fill 506,8,0
13029 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13030 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13031 - .fill 5,8,0
13032 + .fill 507,8,0
13033 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13034 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13035 + .fill 4,8,0
13036
13037 -NEXT_PAGE(level1_fixmap_pgt)
13038 +NEXT_PAGE(level1_vsyscall_pgt)
13039 .fill 512,8,0
13040
13041 -NEXT_PAGE(level2_ident_pgt)
13042 - /* Since I easily can, map the first 1G.
13043 + /* Since I easily can, map the first 2G.
13044 * Don't set NX because code runs from these pages.
13045 */
13046 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13047 +NEXT_PAGE(level2_ident_pgt)
13048 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13049
13050 NEXT_PAGE(level2_kernel_pgt)
13051 /*
13052 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13053 * If you want to increase this then increase MODULES_VADDR
13054 * too.)
13055 */
13056 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13057 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13058 -
13059 -NEXT_PAGE(level2_spare_pgt)
13060 - .fill 512, 8, 0
13061 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13062
13063 #undef PMDS
13064 #undef NEXT_PAGE
13065
13066 - .data
13067 + .align PAGE_SIZE
13068 +ENTRY(cpu_gdt_table)
13069 + .rept NR_CPUS
13070 + .quad 0x0000000000000000 /* NULL descriptor */
13071 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13072 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13073 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13074 + .quad 0x00cffb000000ffff /* __USER32_CS */
13075 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13076 + .quad 0x00affb000000ffff /* __USER_CS */
13077 +
13078 +#ifdef CONFIG_PAX_KERNEXEC
13079 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13080 +#else
13081 + .quad 0x0 /* unused */
13082 +#endif
13083 +
13084 + .quad 0,0 /* TSS */
13085 + .quad 0,0 /* LDT */
13086 + .quad 0,0,0 /* three TLS descriptors */
13087 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13088 + /* asm/segment.h:GDT_ENTRIES must match this */
13089 +
13090 + /* zero the remaining page */
13091 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13092 + .endr
13093 +
13094 .align 16
13095 .globl early_gdt_descr
13096 early_gdt_descr:
13097 .word GDT_ENTRIES*8-1
13098 early_gdt_descr_base:
13099 - .quad INIT_PER_CPU_VAR(gdt_page)
13100 + .quad cpu_gdt_table
13101
13102 ENTRY(phys_base)
13103 /* This must match the first entry in level2_kernel_pgt */
13104 .quad 0x0000000000000000
13105
13106 #include "../../x86/xen/xen-head.S"
13107 -
13108 - .section .bss, "aw", @nobits
13109 +
13110 + .section .rodata,"a",@progbits
13111 .align L1_CACHE_BYTES
13112 ENTRY(idt_table)
13113 - .skip IDT_ENTRIES * 16
13114 + .fill 512,8,0
13115
13116 __PAGE_ALIGNED_BSS
13117 .align PAGE_SIZE
13118 diff -urNp linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c
13119 --- linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13120 +++ linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13121 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13122 EXPORT_SYMBOL(cmpxchg8b_emu);
13123 #endif
13124
13125 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13126 +
13127 /* Networking helper routines. */
13128 EXPORT_SYMBOL(csum_partial_copy_generic);
13129 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13130 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13131
13132 EXPORT_SYMBOL(__get_user_1);
13133 EXPORT_SYMBOL(__get_user_2);
13134 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13135
13136 EXPORT_SYMBOL(csum_partial);
13137 EXPORT_SYMBOL(empty_zero_page);
13138 +
13139 +#ifdef CONFIG_PAX_KERNEXEC
13140 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13141 +#endif
13142 diff -urNp linux-3.0.3/arch/x86/kernel/i8259.c linux-3.0.3/arch/x86/kernel/i8259.c
13143 --- linux-3.0.3/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13144 +++ linux-3.0.3/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13145 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13146 "spurious 8259A interrupt: IRQ%d.\n", irq);
13147 spurious_irq_mask |= irqmask;
13148 }
13149 - atomic_inc(&irq_err_count);
13150 + atomic_inc_unchecked(&irq_err_count);
13151 /*
13152 * Theoretically we do not have to handle this IRQ,
13153 * but in Linux this does not cause problems and is
13154 diff -urNp linux-3.0.3/arch/x86/kernel/init_task.c linux-3.0.3/arch/x86/kernel/init_task.c
13155 --- linux-3.0.3/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13156 +++ linux-3.0.3/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13157 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13158 * way process stacks are handled. This is done by having a special
13159 * "init_task" linker map entry..
13160 */
13161 -union thread_union init_thread_union __init_task_data =
13162 - { INIT_THREAD_INFO(init_task) };
13163 +union thread_union init_thread_union __init_task_data;
13164
13165 /*
13166 * Initial task structure.
13167 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13168 * section. Since TSS's are completely CPU-local, we want them
13169 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13170 */
13171 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13172 -
13173 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13174 +EXPORT_SYMBOL(init_tss);
13175 diff -urNp linux-3.0.3/arch/x86/kernel/ioport.c linux-3.0.3/arch/x86/kernel/ioport.c
13176 --- linux-3.0.3/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13177 +++ linux-3.0.3/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13178 @@ -6,6 +6,7 @@
13179 #include <linux/sched.h>
13180 #include <linux/kernel.h>
13181 #include <linux/capability.h>
13182 +#include <linux/security.h>
13183 #include <linux/errno.h>
13184 #include <linux/types.h>
13185 #include <linux/ioport.h>
13186 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13187
13188 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13189 return -EINVAL;
13190 +#ifdef CONFIG_GRKERNSEC_IO
13191 + if (turn_on && grsec_disable_privio) {
13192 + gr_handle_ioperm();
13193 + return -EPERM;
13194 + }
13195 +#endif
13196 if (turn_on && !capable(CAP_SYS_RAWIO))
13197 return -EPERM;
13198
13199 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13200 * because the ->io_bitmap_max value must match the bitmap
13201 * contents:
13202 */
13203 - tss = &per_cpu(init_tss, get_cpu());
13204 + tss = init_tss + get_cpu();
13205
13206 if (turn_on)
13207 bitmap_clear(t->io_bitmap_ptr, from, num);
13208 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13209 return -EINVAL;
13210 /* Trying to gain more privileges? */
13211 if (level > old) {
13212 +#ifdef CONFIG_GRKERNSEC_IO
13213 + if (grsec_disable_privio) {
13214 + gr_handle_iopl();
13215 + return -EPERM;
13216 + }
13217 +#endif
13218 if (!capable(CAP_SYS_RAWIO))
13219 return -EPERM;
13220 }
13221 diff -urNp linux-3.0.3/arch/x86/kernel/irq_32.c linux-3.0.3/arch/x86/kernel/irq_32.c
13222 --- linux-3.0.3/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13223 +++ linux-3.0.3/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13224 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13225 __asm__ __volatile__("andl %%esp,%0" :
13226 "=r" (sp) : "0" (THREAD_SIZE - 1));
13227
13228 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13229 + return sp < STACK_WARN;
13230 }
13231
13232 static void print_stack_overflow(void)
13233 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13234 * per-CPU IRQ handling contexts (thread information and stack)
13235 */
13236 union irq_ctx {
13237 - struct thread_info tinfo;
13238 - u32 stack[THREAD_SIZE/sizeof(u32)];
13239 + unsigned long previous_esp;
13240 + u32 stack[THREAD_SIZE/sizeof(u32)];
13241 } __attribute__((aligned(THREAD_SIZE)));
13242
13243 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13244 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13245 static inline int
13246 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13247 {
13248 - union irq_ctx *curctx, *irqctx;
13249 + union irq_ctx *irqctx;
13250 u32 *isp, arg1, arg2;
13251
13252 - curctx = (union irq_ctx *) current_thread_info();
13253 irqctx = __this_cpu_read(hardirq_ctx);
13254
13255 /*
13256 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13257 * handler) we can't do that and just have to keep using the
13258 * current stack (which is the irq stack already after all)
13259 */
13260 - if (unlikely(curctx == irqctx))
13261 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13262 return 0;
13263
13264 /* build the stack frame on the IRQ stack */
13265 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13266 - irqctx->tinfo.task = curctx->tinfo.task;
13267 - irqctx->tinfo.previous_esp = current_stack_pointer;
13268 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13269 + irqctx->previous_esp = current_stack_pointer;
13270
13271 - /*
13272 - * Copy the softirq bits in preempt_count so that the
13273 - * softirq checks work in the hardirq context.
13274 - */
13275 - irqctx->tinfo.preempt_count =
13276 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13277 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13278 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13279 + __set_fs(MAKE_MM_SEG(0));
13280 +#endif
13281
13282 if (unlikely(overflow))
13283 call_on_stack(print_stack_overflow, isp);
13284 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13285 : "0" (irq), "1" (desc), "2" (isp),
13286 "D" (desc->handle_irq)
13287 : "memory", "cc", "ecx");
13288 +
13289 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13290 + __set_fs(current_thread_info()->addr_limit);
13291 +#endif
13292 +
13293 return 1;
13294 }
13295
13296 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13297 */
13298 void __cpuinit irq_ctx_init(int cpu)
13299 {
13300 - union irq_ctx *irqctx;
13301 -
13302 if (per_cpu(hardirq_ctx, cpu))
13303 return;
13304
13305 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13306 - THREAD_FLAGS,
13307 - THREAD_ORDER));
13308 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13309 - irqctx->tinfo.cpu = cpu;
13310 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13311 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13312 -
13313 - per_cpu(hardirq_ctx, cpu) = irqctx;
13314 -
13315 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13316 - THREAD_FLAGS,
13317 - THREAD_ORDER));
13318 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13319 - irqctx->tinfo.cpu = cpu;
13320 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13321 -
13322 - per_cpu(softirq_ctx, cpu) = irqctx;
13323 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13324 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13325
13326 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13327 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13328 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13329 asmlinkage void do_softirq(void)
13330 {
13331 unsigned long flags;
13332 - struct thread_info *curctx;
13333 union irq_ctx *irqctx;
13334 u32 *isp;
13335
13336 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13337 local_irq_save(flags);
13338
13339 if (local_softirq_pending()) {
13340 - curctx = current_thread_info();
13341 irqctx = __this_cpu_read(softirq_ctx);
13342 - irqctx->tinfo.task = curctx->task;
13343 - irqctx->tinfo.previous_esp = current_stack_pointer;
13344 + irqctx->previous_esp = current_stack_pointer;
13345
13346 /* build the stack frame on the softirq stack */
13347 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13348 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13349 +
13350 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13351 + __set_fs(MAKE_MM_SEG(0));
13352 +#endif
13353
13354 call_on_stack(__do_softirq, isp);
13355 +
13356 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13357 + __set_fs(current_thread_info()->addr_limit);
13358 +#endif
13359 +
13360 /*
13361 * Shouldn't happen, we returned above if in_interrupt():
13362 */
13363 diff -urNp linux-3.0.3/arch/x86/kernel/irq.c linux-3.0.3/arch/x86/kernel/irq.c
13364 --- linux-3.0.3/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13365 +++ linux-3.0.3/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13366 @@ -17,7 +17,7 @@
13367 #include <asm/mce.h>
13368 #include <asm/hw_irq.h>
13369
13370 -atomic_t irq_err_count;
13371 +atomic_unchecked_t irq_err_count;
13372
13373 /* Function pointer for generic interrupt vector handling */
13374 void (*x86_platform_ipi_callback)(void) = NULL;
13375 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13376 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13377 seq_printf(p, " Machine check polls\n");
13378 #endif
13379 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13380 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13381 #if defined(CONFIG_X86_IO_APIC)
13382 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13383 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13384 #endif
13385 return 0;
13386 }
13387 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13388
13389 u64 arch_irq_stat(void)
13390 {
13391 - u64 sum = atomic_read(&irq_err_count);
13392 + u64 sum = atomic_read_unchecked(&irq_err_count);
13393
13394 #ifdef CONFIG_X86_IO_APIC
13395 - sum += atomic_read(&irq_mis_count);
13396 + sum += atomic_read_unchecked(&irq_mis_count);
13397 #endif
13398 return sum;
13399 }
13400 diff -urNp linux-3.0.3/arch/x86/kernel/kgdb.c linux-3.0.3/arch/x86/kernel/kgdb.c
13401 --- linux-3.0.3/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13402 +++ linux-3.0.3/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13403 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13404 #ifdef CONFIG_X86_32
13405 switch (regno) {
13406 case GDB_SS:
13407 - if (!user_mode_vm(regs))
13408 + if (!user_mode(regs))
13409 *(unsigned long *)mem = __KERNEL_DS;
13410 break;
13411 case GDB_SP:
13412 - if (!user_mode_vm(regs))
13413 + if (!user_mode(regs))
13414 *(unsigned long *)mem = kernel_stack_pointer(regs);
13415 break;
13416 case GDB_GS:
13417 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13418 case 'k':
13419 /* clear the trace bit */
13420 linux_regs->flags &= ~X86_EFLAGS_TF;
13421 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13422 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13423
13424 /* set the trace bit if we're stepping */
13425 if (remcomInBuffer[0] == 's') {
13426 linux_regs->flags |= X86_EFLAGS_TF;
13427 - atomic_set(&kgdb_cpu_doing_single_step,
13428 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13429 raw_smp_processor_id());
13430 }
13431
13432 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13433 return NOTIFY_DONE;
13434
13435 case DIE_DEBUG:
13436 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13437 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13438 if (user_mode(regs))
13439 return single_step_cont(regs, args);
13440 break;
13441 diff -urNp linux-3.0.3/arch/x86/kernel/kprobes.c linux-3.0.3/arch/x86/kernel/kprobes.c
13442 --- linux-3.0.3/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13443 +++ linux-3.0.3/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13444 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13445 } __attribute__((packed)) *insn;
13446
13447 insn = (struct __arch_relative_insn *)from;
13448 +
13449 + pax_open_kernel();
13450 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13451 insn->op = op;
13452 + pax_close_kernel();
13453 }
13454
13455 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13456 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13457 kprobe_opcode_t opcode;
13458 kprobe_opcode_t *orig_opcodes = opcodes;
13459
13460 - if (search_exception_tables((unsigned long)opcodes))
13461 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13462 return 0; /* Page fault may occur on this address. */
13463
13464 retry:
13465 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13466 }
13467 }
13468 insn_get_length(&insn);
13469 + pax_open_kernel();
13470 memcpy(dest, insn.kaddr, insn.length);
13471 + pax_close_kernel();
13472
13473 #ifdef CONFIG_X86_64
13474 if (insn_rip_relative(&insn)) {
13475 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13476 (u8 *) dest;
13477 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13478 disp = (u8 *) dest + insn_offset_displacement(&insn);
13479 + pax_open_kernel();
13480 *(s32 *) disp = (s32) newdisp;
13481 + pax_close_kernel();
13482 }
13483 #endif
13484 return insn.length;
13485 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13486 */
13487 __copy_instruction(p->ainsn.insn, p->addr, 0);
13488
13489 - if (can_boost(p->addr))
13490 + if (can_boost(ktla_ktva(p->addr)))
13491 p->ainsn.boostable = 0;
13492 else
13493 p->ainsn.boostable = -1;
13494
13495 - p->opcode = *p->addr;
13496 + p->opcode = *(ktla_ktva(p->addr));
13497 }
13498
13499 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13500 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13501 * nor set current_kprobe, because it doesn't use single
13502 * stepping.
13503 */
13504 - regs->ip = (unsigned long)p->ainsn.insn;
13505 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13506 preempt_enable_no_resched();
13507 return;
13508 }
13509 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13510 if (p->opcode == BREAKPOINT_INSTRUCTION)
13511 regs->ip = (unsigned long)p->addr;
13512 else
13513 - regs->ip = (unsigned long)p->ainsn.insn;
13514 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13515 }
13516
13517 /*
13518 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13519 setup_singlestep(p, regs, kcb, 0);
13520 return 1;
13521 }
13522 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13523 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13524 /*
13525 * The breakpoint instruction was removed right
13526 * after we hit it. Another cpu has removed
13527 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13528 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13529 {
13530 unsigned long *tos = stack_addr(regs);
13531 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13532 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13533 unsigned long orig_ip = (unsigned long)p->addr;
13534 kprobe_opcode_t *insn = p->ainsn.insn;
13535
13536 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13537 struct die_args *args = data;
13538 int ret = NOTIFY_DONE;
13539
13540 - if (args->regs && user_mode_vm(args->regs))
13541 + if (args->regs && user_mode(args->regs))
13542 return ret;
13543
13544 switch (val) {
13545 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13546 * Verify if the address gap is in 2GB range, because this uses
13547 * a relative jump.
13548 */
13549 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13550 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13551 if (abs(rel) > 0x7fffffff)
13552 return -ERANGE;
13553
13554 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13555 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13556
13557 /* Set probe function call */
13558 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13559 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13560
13561 /* Set returning jmp instruction at the tail of out-of-line buffer */
13562 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13563 - (u8 *)op->kp.addr + op->optinsn.size);
13564 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13565
13566 flush_icache_range((unsigned long) buf,
13567 (unsigned long) buf + TMPL_END_IDX +
13568 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13569 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13570
13571 /* Backup instructions which will be replaced by jump address */
13572 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13573 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13574 RELATIVE_ADDR_SIZE);
13575
13576 insn_buf[0] = RELATIVEJUMP_OPCODE;
13577 diff -urNp linux-3.0.3/arch/x86/kernel/kvm.c linux-3.0.3/arch/x86/kernel/kvm.c
13578 --- linux-3.0.3/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13579 +++ linux-3.0.3/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13580 @@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13581 pv_mmu_ops.set_pud = kvm_set_pud;
13582 #if PAGETABLE_LEVELS == 4
13583 pv_mmu_ops.set_pgd = kvm_set_pgd;
13584 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13585 #endif
13586 #endif
13587 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13588 diff -urNp linux-3.0.3/arch/x86/kernel/ldt.c linux-3.0.3/arch/x86/kernel/ldt.c
13589 --- linux-3.0.3/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13590 +++ linux-3.0.3/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13591 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13592 if (reload) {
13593 #ifdef CONFIG_SMP
13594 preempt_disable();
13595 - load_LDT(pc);
13596 + load_LDT_nolock(pc);
13597 if (!cpumask_equal(mm_cpumask(current->mm),
13598 cpumask_of(smp_processor_id())))
13599 smp_call_function(flush_ldt, current->mm, 1);
13600 preempt_enable();
13601 #else
13602 - load_LDT(pc);
13603 + load_LDT_nolock(pc);
13604 #endif
13605 }
13606 if (oldsize) {
13607 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13608 return err;
13609
13610 for (i = 0; i < old->size; i++)
13611 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13612 + write_ldt_entry(new->ldt, i, old->ldt + i);
13613 return 0;
13614 }
13615
13616 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13617 retval = copy_ldt(&mm->context, &old_mm->context);
13618 mutex_unlock(&old_mm->context.lock);
13619 }
13620 +
13621 + if (tsk == current) {
13622 + mm->context.vdso = 0;
13623 +
13624 +#ifdef CONFIG_X86_32
13625 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13626 + mm->context.user_cs_base = 0UL;
13627 + mm->context.user_cs_limit = ~0UL;
13628 +
13629 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13630 + cpus_clear(mm->context.cpu_user_cs_mask);
13631 +#endif
13632 +
13633 +#endif
13634 +#endif
13635 +
13636 + }
13637 +
13638 return retval;
13639 }
13640
13641 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13642 }
13643 }
13644
13645 +#ifdef CONFIG_PAX_SEGMEXEC
13646 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13647 + error = -EINVAL;
13648 + goto out_unlock;
13649 + }
13650 +#endif
13651 +
13652 fill_ldt(&ldt, &ldt_info);
13653 if (oldmode)
13654 ldt.avl = 0;
13655 diff -urNp linux-3.0.3/arch/x86/kernel/machine_kexec_32.c linux-3.0.3/arch/x86/kernel/machine_kexec_32.c
13656 --- linux-3.0.3/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13657 +++ linux-3.0.3/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13658 @@ -27,7 +27,7 @@
13659 #include <asm/cacheflush.h>
13660 #include <asm/debugreg.h>
13661
13662 -static void set_idt(void *newidt, __u16 limit)
13663 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13664 {
13665 struct desc_ptr curidt;
13666
13667 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13668 }
13669
13670
13671 -static void set_gdt(void *newgdt, __u16 limit)
13672 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13673 {
13674 struct desc_ptr curgdt;
13675
13676 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13677 }
13678
13679 control_page = page_address(image->control_code_page);
13680 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13681 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13682
13683 relocate_kernel_ptr = control_page;
13684 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13685 diff -urNp linux-3.0.3/arch/x86/kernel/microcode_intel.c linux-3.0.3/arch/x86/kernel/microcode_intel.c
13686 --- linux-3.0.3/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13687 +++ linux-3.0.3/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13688 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13689
13690 static int get_ucode_user(void *to, const void *from, size_t n)
13691 {
13692 - return copy_from_user(to, from, n);
13693 + return copy_from_user(to, (__force const void __user *)from, n);
13694 }
13695
13696 static enum ucode_state
13697 request_microcode_user(int cpu, const void __user *buf, size_t size)
13698 {
13699 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13700 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13701 }
13702
13703 static void microcode_fini_cpu(int cpu)
13704 diff -urNp linux-3.0.3/arch/x86/kernel/module.c linux-3.0.3/arch/x86/kernel/module.c
13705 --- linux-3.0.3/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13706 +++ linux-3.0.3/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13707 @@ -36,21 +36,66 @@
13708 #define DEBUGP(fmt...)
13709 #endif
13710
13711 -void *module_alloc(unsigned long size)
13712 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13713 {
13714 if (PAGE_ALIGN(size) > MODULES_LEN)
13715 return NULL;
13716 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13717 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13718 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13719 -1, __builtin_return_address(0));
13720 }
13721
13722 +void *module_alloc(unsigned long size)
13723 +{
13724 +
13725 +#ifdef CONFIG_PAX_KERNEXEC
13726 + return __module_alloc(size, PAGE_KERNEL);
13727 +#else
13728 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13729 +#endif
13730 +
13731 +}
13732 +
13733 /* Free memory returned from module_alloc */
13734 void module_free(struct module *mod, void *module_region)
13735 {
13736 vfree(module_region);
13737 }
13738
13739 +#ifdef CONFIG_PAX_KERNEXEC
13740 +#ifdef CONFIG_X86_32
13741 +void *module_alloc_exec(unsigned long size)
13742 +{
13743 + struct vm_struct *area;
13744 +
13745 + if (size == 0)
13746 + return NULL;
13747 +
13748 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13749 + return area ? area->addr : NULL;
13750 +}
13751 +EXPORT_SYMBOL(module_alloc_exec);
13752 +
13753 +void module_free_exec(struct module *mod, void *module_region)
13754 +{
13755 + vunmap(module_region);
13756 +}
13757 +EXPORT_SYMBOL(module_free_exec);
13758 +#else
13759 +void module_free_exec(struct module *mod, void *module_region)
13760 +{
13761 + module_free(mod, module_region);
13762 +}
13763 +EXPORT_SYMBOL(module_free_exec);
13764 +
13765 +void *module_alloc_exec(unsigned long size)
13766 +{
13767 + return __module_alloc(size, PAGE_KERNEL_RX);
13768 +}
13769 +EXPORT_SYMBOL(module_alloc_exec);
13770 +#endif
13771 +#endif
13772 +
13773 /* We don't need anything special. */
13774 int module_frob_arch_sections(Elf_Ehdr *hdr,
13775 Elf_Shdr *sechdrs,
13776 @@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13777 unsigned int i;
13778 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13779 Elf32_Sym *sym;
13780 - uint32_t *location;
13781 + uint32_t *plocation, location;
13782
13783 DEBUGP("Applying relocate section %u to %u\n", relsec,
13784 sechdrs[relsec].sh_info);
13785 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13786 /* This is where to make the change */
13787 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13788 - + rel[i].r_offset;
13789 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13790 + location = (uint32_t)plocation;
13791 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13792 + plocation = ktla_ktva((void *)plocation);
13793 /* This is the symbol it is referring to. Note that all
13794 undefined symbols have been resolved. */
13795 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13796 @@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13797 switch (ELF32_R_TYPE(rel[i].r_info)) {
13798 case R_386_32:
13799 /* We add the value into the location given */
13800 - *location += sym->st_value;
13801 + pax_open_kernel();
13802 + *plocation += sym->st_value;
13803 + pax_close_kernel();
13804 break;
13805 case R_386_PC32:
13806 /* Add the value, subtract its postition */
13807 - *location += sym->st_value - (uint32_t)location;
13808 + pax_open_kernel();
13809 + *plocation += sym->st_value - location;
13810 + pax_close_kernel();
13811 break;
13812 default:
13813 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13814 @@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13815 case R_X86_64_NONE:
13816 break;
13817 case R_X86_64_64:
13818 + pax_open_kernel();
13819 *(u64 *)loc = val;
13820 + pax_close_kernel();
13821 break;
13822 case R_X86_64_32:
13823 + pax_open_kernel();
13824 *(u32 *)loc = val;
13825 + pax_close_kernel();
13826 if (val != *(u32 *)loc)
13827 goto overflow;
13828 break;
13829 case R_X86_64_32S:
13830 + pax_open_kernel();
13831 *(s32 *)loc = val;
13832 + pax_close_kernel();
13833 if ((s64)val != *(s32 *)loc)
13834 goto overflow;
13835 break;
13836 case R_X86_64_PC32:
13837 val -= (u64)loc;
13838 + pax_open_kernel();
13839 *(u32 *)loc = val;
13840 + pax_close_kernel();
13841 +
13842 #if 0
13843 if ((s64)val != *(s32 *)loc)
13844 goto overflow;
13845 diff -urNp linux-3.0.3/arch/x86/kernel/paravirt.c linux-3.0.3/arch/x86/kernel/paravirt.c
13846 --- linux-3.0.3/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13847 +++ linux-3.0.3/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13848 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13849 {
13850 return x;
13851 }
13852 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13853 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13854 +#endif
13855
13856 void __init default_banner(void)
13857 {
13858 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13859 * corresponding structure. */
13860 static void *get_call_destination(u8 type)
13861 {
13862 - struct paravirt_patch_template tmpl = {
13863 + const struct paravirt_patch_template tmpl = {
13864 .pv_init_ops = pv_init_ops,
13865 .pv_time_ops = pv_time_ops,
13866 .pv_cpu_ops = pv_cpu_ops,
13867 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13868 .pv_lock_ops = pv_lock_ops,
13869 #endif
13870 };
13871 +
13872 + pax_track_stack();
13873 +
13874 return *((void **)&tmpl + type);
13875 }
13876
13877 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13878 if (opfunc == NULL)
13879 /* If there's no function, patch it with a ud2a (BUG) */
13880 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13881 - else if (opfunc == _paravirt_nop)
13882 + else if (opfunc == (void *)_paravirt_nop)
13883 /* If the operation is a nop, then nop the callsite */
13884 ret = paravirt_patch_nop();
13885
13886 /* identity functions just return their single argument */
13887 - else if (opfunc == _paravirt_ident_32)
13888 + else if (opfunc == (void *)_paravirt_ident_32)
13889 ret = paravirt_patch_ident_32(insnbuf, len);
13890 - else if (opfunc == _paravirt_ident_64)
13891 + else if (opfunc == (void *)_paravirt_ident_64)
13892 ret = paravirt_patch_ident_64(insnbuf, len);
13893 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13894 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13895 + ret = paravirt_patch_ident_64(insnbuf, len);
13896 +#endif
13897
13898 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13899 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13900 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13901 if (insn_len > len || start == NULL)
13902 insn_len = len;
13903 else
13904 - memcpy(insnbuf, start, insn_len);
13905 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13906
13907 return insn_len;
13908 }
13909 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13910 preempt_enable();
13911 }
13912
13913 -struct pv_info pv_info = {
13914 +struct pv_info pv_info __read_only = {
13915 .name = "bare hardware",
13916 .paravirt_enabled = 0,
13917 .kernel_rpl = 0,
13918 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13919 };
13920
13921 -struct pv_init_ops pv_init_ops = {
13922 +struct pv_init_ops pv_init_ops __read_only = {
13923 .patch = native_patch,
13924 };
13925
13926 -struct pv_time_ops pv_time_ops = {
13927 +struct pv_time_ops pv_time_ops __read_only = {
13928 .sched_clock = native_sched_clock,
13929 };
13930
13931 -struct pv_irq_ops pv_irq_ops = {
13932 +struct pv_irq_ops pv_irq_ops __read_only = {
13933 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13934 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13935 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13936 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13937 #endif
13938 };
13939
13940 -struct pv_cpu_ops pv_cpu_ops = {
13941 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13942 .cpuid = native_cpuid,
13943 .get_debugreg = native_get_debugreg,
13944 .set_debugreg = native_set_debugreg,
13945 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13946 .end_context_switch = paravirt_nop,
13947 };
13948
13949 -struct pv_apic_ops pv_apic_ops = {
13950 +struct pv_apic_ops pv_apic_ops __read_only = {
13951 #ifdef CONFIG_X86_LOCAL_APIC
13952 .startup_ipi_hook = paravirt_nop,
13953 #endif
13954 };
13955
13956 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13957 +#ifdef CONFIG_X86_32
13958 +#ifdef CONFIG_X86_PAE
13959 +/* 64-bit pagetable entries */
13960 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13961 +#else
13962 /* 32-bit pagetable entries */
13963 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13964 +#endif
13965 #else
13966 /* 64-bit pagetable entries */
13967 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13968 #endif
13969
13970 -struct pv_mmu_ops pv_mmu_ops = {
13971 +struct pv_mmu_ops pv_mmu_ops __read_only = {
13972
13973 .read_cr2 = native_read_cr2,
13974 .write_cr2 = native_write_cr2,
13975 @@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
13976 .make_pud = PTE_IDENT,
13977
13978 .set_pgd = native_set_pgd,
13979 + .set_pgd_batched = native_set_pgd_batched,
13980 #endif
13981 #endif /* PAGETABLE_LEVELS >= 3 */
13982
13983 @@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13984 },
13985
13986 .set_fixmap = native_set_fixmap,
13987 +
13988 +#ifdef CONFIG_PAX_KERNEXEC
13989 + .pax_open_kernel = native_pax_open_kernel,
13990 + .pax_close_kernel = native_pax_close_kernel,
13991 +#endif
13992 +
13993 };
13994
13995 EXPORT_SYMBOL_GPL(pv_time_ops);
13996 diff -urNp linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c
13997 --- linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
13998 +++ linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
13999 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14000 arch_spin_lock(lock);
14001 }
14002
14003 -struct pv_lock_ops pv_lock_ops = {
14004 +struct pv_lock_ops pv_lock_ops __read_only = {
14005 #ifdef CONFIG_SMP
14006 .spin_is_locked = __ticket_spin_is_locked,
14007 .spin_is_contended = __ticket_spin_is_contended,
14008 diff -urNp linux-3.0.3/arch/x86/kernel/pci-iommu_table.c linux-3.0.3/arch/x86/kernel/pci-iommu_table.c
14009 --- linux-3.0.3/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14010 +++ linux-3.0.3/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14011 @@ -2,7 +2,7 @@
14012 #include <asm/iommu_table.h>
14013 #include <linux/string.h>
14014 #include <linux/kallsyms.h>
14015 -
14016 +#include <linux/sched.h>
14017
14018 #define DEBUG 1
14019
14020 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14021 {
14022 struct iommu_table_entry *p, *q, *x;
14023
14024 + pax_track_stack();
14025 +
14026 /* Simple cyclic dependency checker. */
14027 for (p = start; p < finish; p++) {
14028 q = find_dependents_of(start, finish, p);
14029 diff -urNp linux-3.0.3/arch/x86/kernel/process_32.c linux-3.0.3/arch/x86/kernel/process_32.c
14030 --- linux-3.0.3/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14031 +++ linux-3.0.3/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14032 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14033 unsigned long thread_saved_pc(struct task_struct *tsk)
14034 {
14035 return ((unsigned long *)tsk->thread.sp)[3];
14036 +//XXX return tsk->thread.eip;
14037 }
14038
14039 #ifndef CONFIG_SMP
14040 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14041 unsigned long sp;
14042 unsigned short ss, gs;
14043
14044 - if (user_mode_vm(regs)) {
14045 + if (user_mode(regs)) {
14046 sp = regs->sp;
14047 ss = regs->ss & 0xffff;
14048 - gs = get_user_gs(regs);
14049 } else {
14050 sp = kernel_stack_pointer(regs);
14051 savesegment(ss, ss);
14052 - savesegment(gs, gs);
14053 }
14054 + gs = get_user_gs(regs);
14055
14056 show_regs_common();
14057
14058 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14059 struct task_struct *tsk;
14060 int err;
14061
14062 - childregs = task_pt_regs(p);
14063 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14064 *childregs = *regs;
14065 childregs->ax = 0;
14066 childregs->sp = sp;
14067
14068 p->thread.sp = (unsigned long) childregs;
14069 p->thread.sp0 = (unsigned long) (childregs+1);
14070 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14071
14072 p->thread.ip = (unsigned long) ret_from_fork;
14073
14074 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14075 struct thread_struct *prev = &prev_p->thread,
14076 *next = &next_p->thread;
14077 int cpu = smp_processor_id();
14078 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14079 + struct tss_struct *tss = init_tss + cpu;
14080 bool preload_fpu;
14081
14082 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14083 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14084 */
14085 lazy_save_gs(prev->gs);
14086
14087 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14088 + __set_fs(task_thread_info(next_p)->addr_limit);
14089 +#endif
14090 +
14091 /*
14092 * Load the per-thread Thread-Local Storage descriptor.
14093 */
14094 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14095 */
14096 arch_end_context_switch(next_p);
14097
14098 + percpu_write(current_task, next_p);
14099 + percpu_write(current_tinfo, &next_p->tinfo);
14100 +
14101 if (preload_fpu)
14102 __math_state_restore();
14103
14104 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14105 if (prev->gs | next->gs)
14106 lazy_load_gs(next->gs);
14107
14108 - percpu_write(current_task, next_p);
14109 -
14110 return prev_p;
14111 }
14112
14113 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14114 } while (count++ < 16);
14115 return 0;
14116 }
14117 -
14118 diff -urNp linux-3.0.3/arch/x86/kernel/process_64.c linux-3.0.3/arch/x86/kernel/process_64.c
14119 --- linux-3.0.3/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14120 +++ linux-3.0.3/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14121 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14122 void exit_idle(void)
14123 {
14124 /* idle loop has pid 0 */
14125 - if (current->pid)
14126 + if (task_pid_nr(current))
14127 return;
14128 __exit_idle();
14129 }
14130 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14131 struct pt_regs *childregs;
14132 struct task_struct *me = current;
14133
14134 - childregs = ((struct pt_regs *)
14135 - (THREAD_SIZE + task_stack_page(p))) - 1;
14136 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14137 *childregs = *regs;
14138
14139 childregs->ax = 0;
14140 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14141 p->thread.sp = (unsigned long) childregs;
14142 p->thread.sp0 = (unsigned long) (childregs+1);
14143 p->thread.usersp = me->thread.usersp;
14144 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14145
14146 set_tsk_thread_flag(p, TIF_FORK);
14147
14148 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14149 struct thread_struct *prev = &prev_p->thread;
14150 struct thread_struct *next = &next_p->thread;
14151 int cpu = smp_processor_id();
14152 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14153 + struct tss_struct *tss = init_tss + cpu;
14154 unsigned fsindex, gsindex;
14155 bool preload_fpu;
14156
14157 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14158 prev->usersp = percpu_read(old_rsp);
14159 percpu_write(old_rsp, next->usersp);
14160 percpu_write(current_task, next_p);
14161 + percpu_write(current_tinfo, &next_p->tinfo);
14162
14163 - percpu_write(kernel_stack,
14164 - (unsigned long)task_stack_page(next_p) +
14165 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14166 + percpu_write(kernel_stack, next->sp0);
14167
14168 /*
14169 * Now maybe reload the debug registers and handle I/O bitmaps
14170 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14171 if (!p || p == current || p->state == TASK_RUNNING)
14172 return 0;
14173 stack = (unsigned long)task_stack_page(p);
14174 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14175 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14176 return 0;
14177 fp = *(u64 *)(p->thread.sp);
14178 do {
14179 - if (fp < (unsigned long)stack ||
14180 - fp >= (unsigned long)stack+THREAD_SIZE)
14181 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14182 return 0;
14183 ip = *(u64 *)(fp+8);
14184 if (!in_sched_functions(ip))
14185 diff -urNp linux-3.0.3/arch/x86/kernel/process.c linux-3.0.3/arch/x86/kernel/process.c
14186 --- linux-3.0.3/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14187 +++ linux-3.0.3/arch/x86/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
14188 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14189
14190 void free_thread_info(struct thread_info *ti)
14191 {
14192 - free_thread_xstate(ti->task);
14193 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14194 }
14195
14196 +static struct kmem_cache *task_struct_cachep;
14197 +
14198 void arch_task_cache_init(void)
14199 {
14200 - task_xstate_cachep =
14201 - kmem_cache_create("task_xstate", xstate_size,
14202 + /* create a slab on which task_structs can be allocated */
14203 + task_struct_cachep =
14204 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14205 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14206 +
14207 + task_xstate_cachep =
14208 + kmem_cache_create("task_xstate", xstate_size,
14209 __alignof__(union thread_xstate),
14210 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14211 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14212 +}
14213 +
14214 +struct task_struct *alloc_task_struct_node(int node)
14215 +{
14216 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14217 +}
14218 +
14219 +void free_task_struct(struct task_struct *task)
14220 +{
14221 + free_thread_xstate(task);
14222 + kmem_cache_free(task_struct_cachep, task);
14223 }
14224
14225 /*
14226 @@ -70,7 +87,7 @@ void exit_thread(void)
14227 unsigned long *bp = t->io_bitmap_ptr;
14228
14229 if (bp) {
14230 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14231 + struct tss_struct *tss = init_tss + get_cpu();
14232
14233 t->io_bitmap_ptr = NULL;
14234 clear_thread_flag(TIF_IO_BITMAP);
14235 @@ -106,7 +123,7 @@ void show_regs_common(void)
14236
14237 printk(KERN_CONT "\n");
14238 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14239 - current->pid, current->comm, print_tainted(),
14240 + task_pid_nr(current), current->comm, print_tainted(),
14241 init_utsname()->release,
14242 (int)strcspn(init_utsname()->version, " "),
14243 init_utsname()->version);
14244 @@ -120,6 +137,9 @@ void flush_thread(void)
14245 {
14246 struct task_struct *tsk = current;
14247
14248 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14249 + loadsegment(gs, 0);
14250 +#endif
14251 flush_ptrace_hw_breakpoint(tsk);
14252 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14253 /*
14254 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14255 regs.di = (unsigned long) arg;
14256
14257 #ifdef CONFIG_X86_32
14258 - regs.ds = __USER_DS;
14259 - regs.es = __USER_DS;
14260 + regs.ds = __KERNEL_DS;
14261 + regs.es = __KERNEL_DS;
14262 regs.fs = __KERNEL_PERCPU;
14263 - regs.gs = __KERNEL_STACK_CANARY;
14264 + savesegment(gs, regs.gs);
14265 #else
14266 regs.ss = __KERNEL_DS;
14267 #endif
14268 @@ -403,7 +423,7 @@ void default_idle(void)
14269 EXPORT_SYMBOL(default_idle);
14270 #endif
14271
14272 -void stop_this_cpu(void *dummy)
14273 +__noreturn void stop_this_cpu(void *dummy)
14274 {
14275 local_irq_disable();
14276 /*
14277 @@ -668,16 +688,34 @@ static int __init idle_setup(char *str)
14278 }
14279 early_param("idle", idle_setup);
14280
14281 -unsigned long arch_align_stack(unsigned long sp)
14282 +#ifdef CONFIG_PAX_RANDKSTACK
14283 +asmlinkage void pax_randomize_kstack(void)
14284 {
14285 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14286 - sp -= get_random_int() % 8192;
14287 - return sp & ~0xf;
14288 -}
14289 + struct thread_struct *thread = &current->thread;
14290 + unsigned long time;
14291
14292 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14293 -{
14294 - unsigned long range_end = mm->brk + 0x02000000;
14295 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14296 -}
14297 + if (!randomize_va_space)
14298 + return;
14299 +
14300 + rdtscl(time);
14301 +
14302 + /* P4 seems to return a 0 LSB, ignore it */
14303 +#ifdef CONFIG_MPENTIUM4
14304 + time &= 0x3EUL;
14305 + time <<= 2;
14306 +#elif defined(CONFIG_X86_64)
14307 + time &= 0xFUL;
14308 + time <<= 4;
14309 +#else
14310 + time &= 0x1FUL;
14311 + time <<= 3;
14312 +#endif
14313 +
14314 + thread->sp0 ^= time;
14315 + load_sp0(init_tss + smp_processor_id(), thread);
14316
14317 +#ifdef CONFIG_X86_64
14318 + percpu_write(kernel_stack, thread->sp0);
14319 +#endif
14320 +}
14321 +#endif
14322 diff -urNp linux-3.0.3/arch/x86/kernel/ptrace.c linux-3.0.3/arch/x86/kernel/ptrace.c
14323 --- linux-3.0.3/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14324 +++ linux-3.0.3/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14325 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14326 unsigned long addr, unsigned long data)
14327 {
14328 int ret;
14329 - unsigned long __user *datap = (unsigned long __user *)data;
14330 + unsigned long __user *datap = (__force unsigned long __user *)data;
14331
14332 switch (request) {
14333 /* read the word at location addr in the USER area. */
14334 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14335 if ((int) addr < 0)
14336 return -EIO;
14337 ret = do_get_thread_area(child, addr,
14338 - (struct user_desc __user *)data);
14339 + (__force struct user_desc __user *) data);
14340 break;
14341
14342 case PTRACE_SET_THREAD_AREA:
14343 if ((int) addr < 0)
14344 return -EIO;
14345 ret = do_set_thread_area(child, addr,
14346 - (struct user_desc __user *)data, 0);
14347 + (__force struct user_desc __user *) data, 0);
14348 break;
14349 #endif
14350
14351 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14352 memset(info, 0, sizeof(*info));
14353 info->si_signo = SIGTRAP;
14354 info->si_code = si_code;
14355 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14356 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14357 }
14358
14359 void user_single_step_siginfo(struct task_struct *tsk,
14360 diff -urNp linux-3.0.3/arch/x86/kernel/pvclock.c linux-3.0.3/arch/x86/kernel/pvclock.c
14361 --- linux-3.0.3/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14362 +++ linux-3.0.3/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14363 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14364 return pv_tsc_khz;
14365 }
14366
14367 -static atomic64_t last_value = ATOMIC64_INIT(0);
14368 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14369
14370 void pvclock_resume(void)
14371 {
14372 - atomic64_set(&last_value, 0);
14373 + atomic64_set_unchecked(&last_value, 0);
14374 }
14375
14376 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14377 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14378 * updating at the same time, and one of them could be slightly behind,
14379 * making the assumption that last_value always go forward fail to hold.
14380 */
14381 - last = atomic64_read(&last_value);
14382 + last = atomic64_read_unchecked(&last_value);
14383 do {
14384 if (ret < last)
14385 return last;
14386 - last = atomic64_cmpxchg(&last_value, last, ret);
14387 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14388 } while (unlikely(last != ret));
14389
14390 return ret;
14391 diff -urNp linux-3.0.3/arch/x86/kernel/reboot.c linux-3.0.3/arch/x86/kernel/reboot.c
14392 --- linux-3.0.3/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14393 +++ linux-3.0.3/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14394 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14395 EXPORT_SYMBOL(pm_power_off);
14396
14397 static const struct desc_ptr no_idt = {};
14398 -static int reboot_mode;
14399 +static unsigned short reboot_mode;
14400 enum reboot_type reboot_type = BOOT_ACPI;
14401 int reboot_force;
14402
14403 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
14404 extern const unsigned char machine_real_restart_asm[];
14405 extern const u64 machine_real_restart_gdt[3];
14406
14407 -void machine_real_restart(unsigned int type)
14408 +__noreturn void machine_real_restart(unsigned int type)
14409 {
14410 void *restart_va;
14411 unsigned long restart_pa;
14412 - void (*restart_lowmem)(unsigned int);
14413 + void (* __noreturn restart_lowmem)(unsigned int);
14414 u64 *lowmem_gdt;
14415
14416 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14417 + struct desc_struct *gdt;
14418 +#endif
14419 +
14420 local_irq_disable();
14421
14422 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14423 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14424 boot)". This seems like a fairly standard thing that gets set by
14425 REBOOT.COM programs, and the previous reset routine did this
14426 too. */
14427 - *((unsigned short *)0x472) = reboot_mode;
14428 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14429
14430 /* Patch the GDT in the low memory trampoline */
14431 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14432
14433 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14434 restart_pa = virt_to_phys(restart_va);
14435 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14436 + restart_lowmem = (void *)restart_pa;
14437
14438 /* GDT[0]: GDT self-pointer */
14439 lowmem_gdt[0] =
14440 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14441 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14442
14443 /* Jump to the identity-mapped low memory code */
14444 +
14445 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14446 + gdt = get_cpu_gdt_table(smp_processor_id());
14447 + pax_open_kernel();
14448 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14449 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14450 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14451 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14452 +#endif
14453 +#ifdef CONFIG_PAX_KERNEXEC
14454 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14455 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14456 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14457 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14458 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14459 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14460 +#endif
14461 + pax_close_kernel();
14462 +#endif
14463 +
14464 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14465 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14466 + unreachable();
14467 +#else
14468 restart_lowmem(type);
14469 +#endif
14470 +
14471 }
14472 #ifdef CONFIG_APM_MODULE
14473 EXPORT_SYMBOL(machine_real_restart);
14474 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14475 * try to force a triple fault and then cycle between hitting the keyboard
14476 * controller and doing that
14477 */
14478 -static void native_machine_emergency_restart(void)
14479 +__noreturn static void native_machine_emergency_restart(void)
14480 {
14481 int i;
14482 int attempt = 0;
14483 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14484 #endif
14485 }
14486
14487 -static void __machine_emergency_restart(int emergency)
14488 +static __noreturn void __machine_emergency_restart(int emergency)
14489 {
14490 reboot_emergency = emergency;
14491 machine_ops.emergency_restart();
14492 }
14493
14494 -static void native_machine_restart(char *__unused)
14495 +static __noreturn void native_machine_restart(char *__unused)
14496 {
14497 printk("machine restart\n");
14498
14499 @@ -662,7 +692,7 @@ static void native_machine_restart(char
14500 __machine_emergency_restart(0);
14501 }
14502
14503 -static void native_machine_halt(void)
14504 +static __noreturn void native_machine_halt(void)
14505 {
14506 /* stop other cpus and apics */
14507 machine_shutdown();
14508 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
14509 stop_this_cpu(NULL);
14510 }
14511
14512 -static void native_machine_power_off(void)
14513 +__noreturn static void native_machine_power_off(void)
14514 {
14515 if (pm_power_off) {
14516 if (!reboot_force)
14517 @@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14518 }
14519 /* a fallback in case there is no PM info available */
14520 tboot_shutdown(TB_SHUTDOWN_HALT);
14521 + unreachable();
14522 }
14523
14524 struct machine_ops machine_ops = {
14525 diff -urNp linux-3.0.3/arch/x86/kernel/setup.c linux-3.0.3/arch/x86/kernel/setup.c
14526 --- linux-3.0.3/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14527 +++ linux-3.0.3/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14528 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14529 * area (640->1Mb) as ram even though it is not.
14530 * take them out.
14531 */
14532 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14533 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14534 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14535 }
14536
14537 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14538
14539 if (!boot_params.hdr.root_flags)
14540 root_mountflags &= ~MS_RDONLY;
14541 - init_mm.start_code = (unsigned long) _text;
14542 - init_mm.end_code = (unsigned long) _etext;
14543 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14544 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14545 init_mm.end_data = (unsigned long) _edata;
14546 init_mm.brk = _brk_end;
14547
14548 - code_resource.start = virt_to_phys(_text);
14549 - code_resource.end = virt_to_phys(_etext)-1;
14550 - data_resource.start = virt_to_phys(_etext);
14551 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14552 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14553 + data_resource.start = virt_to_phys(_sdata);
14554 data_resource.end = virt_to_phys(_edata)-1;
14555 bss_resource.start = virt_to_phys(&__bss_start);
14556 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14557 diff -urNp linux-3.0.3/arch/x86/kernel/setup_percpu.c linux-3.0.3/arch/x86/kernel/setup_percpu.c
14558 --- linux-3.0.3/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14559 +++ linux-3.0.3/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14560 @@ -21,19 +21,17 @@
14561 #include <asm/cpu.h>
14562 #include <asm/stackprotector.h>
14563
14564 -DEFINE_PER_CPU(int, cpu_number);
14565 +#ifdef CONFIG_SMP
14566 +DEFINE_PER_CPU(unsigned int, cpu_number);
14567 EXPORT_PER_CPU_SYMBOL(cpu_number);
14568 +#endif
14569
14570 -#ifdef CONFIG_X86_64
14571 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14572 -#else
14573 -#define BOOT_PERCPU_OFFSET 0
14574 -#endif
14575
14576 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14577 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14578
14579 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14580 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14581 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14582 };
14583 EXPORT_SYMBOL(__per_cpu_offset);
14584 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14585 {
14586 #ifdef CONFIG_X86_32
14587 struct desc_struct gdt;
14588 + unsigned long base = per_cpu_offset(cpu);
14589
14590 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14591 - 0x2 | DESCTYPE_S, 0x8);
14592 - gdt.s = 1;
14593 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14594 + 0x83 | DESCTYPE_S, 0xC);
14595 write_gdt_entry(get_cpu_gdt_table(cpu),
14596 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14597 #endif
14598 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14599 /* alrighty, percpu areas up and running */
14600 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14601 for_each_possible_cpu(cpu) {
14602 +#ifdef CONFIG_CC_STACKPROTECTOR
14603 +#ifdef CONFIG_X86_32
14604 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14605 +#endif
14606 +#endif
14607 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14608 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14609 per_cpu(cpu_number, cpu) = cpu;
14610 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14611 */
14612 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14613 #endif
14614 +#ifdef CONFIG_CC_STACKPROTECTOR
14615 +#ifdef CONFIG_X86_32
14616 + if (!cpu)
14617 + per_cpu(stack_canary.canary, cpu) = canary;
14618 +#endif
14619 +#endif
14620 /*
14621 * Up to this point, the boot CPU has been using .init.data
14622 * area. Reload any changed state for the boot CPU.
14623 diff -urNp linux-3.0.3/arch/x86/kernel/signal.c linux-3.0.3/arch/x86/kernel/signal.c
14624 --- linux-3.0.3/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14625 +++ linux-3.0.3/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14626 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14627 * Align the stack pointer according to the i386 ABI,
14628 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14629 */
14630 - sp = ((sp + 4) & -16ul) - 4;
14631 + sp = ((sp - 12) & -16ul) - 4;
14632 #else /* !CONFIG_X86_32 */
14633 sp = round_down(sp, 16) - 8;
14634 #endif
14635 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14636 * Return an always-bogus address instead so we will die with SIGSEGV.
14637 */
14638 if (onsigstack && !likely(on_sig_stack(sp)))
14639 - return (void __user *)-1L;
14640 + return (__force void __user *)-1L;
14641
14642 /* save i387 state */
14643 if (used_math() && save_i387_xstate(*fpstate) < 0)
14644 - return (void __user *)-1L;
14645 + return (__force void __user *)-1L;
14646
14647 return (void __user *)sp;
14648 }
14649 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14650 }
14651
14652 if (current->mm->context.vdso)
14653 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14654 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14655 else
14656 - restorer = &frame->retcode;
14657 + restorer = (void __user *)&frame->retcode;
14658 if (ka->sa.sa_flags & SA_RESTORER)
14659 restorer = ka->sa.sa_restorer;
14660
14661 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14662 * reasons and because gdb uses it as a signature to notice
14663 * signal handler stack frames.
14664 */
14665 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14666 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14667
14668 if (err)
14669 return -EFAULT;
14670 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14671 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14672
14673 /* Set up to return from userspace. */
14674 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14675 + if (current->mm->context.vdso)
14676 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14677 + else
14678 + restorer = (void __user *)&frame->retcode;
14679 if (ka->sa.sa_flags & SA_RESTORER)
14680 restorer = ka->sa.sa_restorer;
14681 put_user_ex(restorer, &frame->pretcode);
14682 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14683 * reasons and because gdb uses it as a signature to notice
14684 * signal handler stack frames.
14685 */
14686 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14687 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14688 } put_user_catch(err);
14689
14690 if (err)
14691 @@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14692 int signr;
14693 sigset_t *oldset;
14694
14695 + pax_track_stack();
14696 +
14697 /*
14698 * We want the common case to go fast, which is why we may in certain
14699 * cases get here from kernel mode. Just return without doing anything
14700 @@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14701 * X86_32: vm86 regs switched out by assembly code before reaching
14702 * here, so testing against kernel CS suffices.
14703 */
14704 - if (!user_mode(regs))
14705 + if (!user_mode_novm(regs))
14706 return;
14707
14708 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14709 diff -urNp linux-3.0.3/arch/x86/kernel/smpboot.c linux-3.0.3/arch/x86/kernel/smpboot.c
14710 --- linux-3.0.3/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14711 +++ linux-3.0.3/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14712 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14713 set_idle_for_cpu(cpu, c_idle.idle);
14714 do_rest:
14715 per_cpu(current_task, cpu) = c_idle.idle;
14716 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14717 #ifdef CONFIG_X86_32
14718 /* Stack for startup_32 can be just as for start_secondary onwards */
14719 irq_ctx_init(cpu);
14720 #else
14721 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14722 initial_gs = per_cpu_offset(cpu);
14723 - per_cpu(kernel_stack, cpu) =
14724 - (unsigned long)task_stack_page(c_idle.idle) -
14725 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14726 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14727 #endif
14728 +
14729 + pax_open_kernel();
14730 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14731 + pax_close_kernel();
14732 +
14733 initial_code = (unsigned long)start_secondary;
14734 stack_start = c_idle.idle->thread.sp;
14735
14736 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14737
14738 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14739
14740 +#ifdef CONFIG_PAX_PER_CPU_PGD
14741 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14742 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14743 + KERNEL_PGD_PTRS);
14744 +#endif
14745 +
14746 err = do_boot_cpu(apicid, cpu);
14747 if (err) {
14748 pr_debug("do_boot_cpu failed %d\n", err);
14749 diff -urNp linux-3.0.3/arch/x86/kernel/step.c linux-3.0.3/arch/x86/kernel/step.c
14750 --- linux-3.0.3/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14751 +++ linux-3.0.3/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14752 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14753 struct desc_struct *desc;
14754 unsigned long base;
14755
14756 - seg &= ~7UL;
14757 + seg >>= 3;
14758
14759 mutex_lock(&child->mm->context.lock);
14760 - if (unlikely((seg >> 3) >= child->mm->context.size))
14761 + if (unlikely(seg >= child->mm->context.size))
14762 addr = -1L; /* bogus selector, access would fault */
14763 else {
14764 desc = child->mm->context.ldt + seg;
14765 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14766 addr += base;
14767 }
14768 mutex_unlock(&child->mm->context.lock);
14769 - }
14770 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14771 + addr = ktla_ktva(addr);
14772
14773 return addr;
14774 }
14775 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14776 unsigned char opcode[15];
14777 unsigned long addr = convert_ip_to_linear(child, regs);
14778
14779 + if (addr == -EINVAL)
14780 + return 0;
14781 +
14782 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14783 for (i = 0; i < copied; i++) {
14784 switch (opcode[i]) {
14785 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14786
14787 #ifdef CONFIG_X86_64
14788 case 0x40 ... 0x4f:
14789 - if (regs->cs != __USER_CS)
14790 + if ((regs->cs & 0xffff) != __USER_CS)
14791 /* 32-bit mode: register increment */
14792 return 0;
14793 /* 64-bit mode: REX prefix */
14794 diff -urNp linux-3.0.3/arch/x86/kernel/syscall_table_32.S linux-3.0.3/arch/x86/kernel/syscall_table_32.S
14795 --- linux-3.0.3/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14796 +++ linux-3.0.3/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14797 @@ -1,3 +1,4 @@
14798 +.section .rodata,"a",@progbits
14799 ENTRY(sys_call_table)
14800 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14801 .long sys_exit
14802 diff -urNp linux-3.0.3/arch/x86/kernel/sys_i386_32.c linux-3.0.3/arch/x86/kernel/sys_i386_32.c
14803 --- linux-3.0.3/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14804 +++ linux-3.0.3/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14805 @@ -24,17 +24,224 @@
14806
14807 #include <asm/syscalls.h>
14808
14809 -/*
14810 - * Do a system call from kernel instead of calling sys_execve so we
14811 - * end up with proper pt_regs.
14812 - */
14813 -int kernel_execve(const char *filename,
14814 - const char *const argv[],
14815 - const char *const envp[])
14816 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14817 {
14818 - long __res;
14819 - asm volatile ("int $0x80"
14820 - : "=a" (__res)
14821 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14822 - return __res;
14823 + unsigned long pax_task_size = TASK_SIZE;
14824 +
14825 +#ifdef CONFIG_PAX_SEGMEXEC
14826 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14827 + pax_task_size = SEGMEXEC_TASK_SIZE;
14828 +#endif
14829 +
14830 + if (len > pax_task_size || addr > pax_task_size - len)
14831 + return -EINVAL;
14832 +
14833 + return 0;
14834 +}
14835 +
14836 +unsigned long
14837 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14838 + unsigned long len, unsigned long pgoff, unsigned long flags)
14839 +{
14840 + struct mm_struct *mm = current->mm;
14841 + struct vm_area_struct *vma;
14842 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14843 +
14844 +#ifdef CONFIG_PAX_SEGMEXEC
14845 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14846 + pax_task_size = SEGMEXEC_TASK_SIZE;
14847 +#endif
14848 +
14849 + pax_task_size -= PAGE_SIZE;
14850 +
14851 + if (len > pax_task_size)
14852 + return -ENOMEM;
14853 +
14854 + if (flags & MAP_FIXED)
14855 + return addr;
14856 +
14857 +#ifdef CONFIG_PAX_RANDMMAP
14858 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14859 +#endif
14860 +
14861 + if (addr) {
14862 + addr = PAGE_ALIGN(addr);
14863 + if (pax_task_size - len >= addr) {
14864 + vma = find_vma(mm, addr);
14865 + if (check_heap_stack_gap(vma, addr, len))
14866 + return addr;
14867 + }
14868 + }
14869 + if (len > mm->cached_hole_size) {
14870 + start_addr = addr = mm->free_area_cache;
14871 + } else {
14872 + start_addr = addr = mm->mmap_base;
14873 + mm->cached_hole_size = 0;
14874 + }
14875 +
14876 +#ifdef CONFIG_PAX_PAGEEXEC
14877 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14878 + start_addr = 0x00110000UL;
14879 +
14880 +#ifdef CONFIG_PAX_RANDMMAP
14881 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14882 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14883 +#endif
14884 +
14885 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14886 + start_addr = addr = mm->mmap_base;
14887 + else
14888 + addr = start_addr;
14889 + }
14890 +#endif
14891 +
14892 +full_search:
14893 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14894 + /* At this point: (!vma || addr < vma->vm_end). */
14895 + if (pax_task_size - len < addr) {
14896 + /*
14897 + * Start a new search - just in case we missed
14898 + * some holes.
14899 + */
14900 + if (start_addr != mm->mmap_base) {
14901 + start_addr = addr = mm->mmap_base;
14902 + mm->cached_hole_size = 0;
14903 + goto full_search;
14904 + }
14905 + return -ENOMEM;
14906 + }
14907 + if (check_heap_stack_gap(vma, addr, len))
14908 + break;
14909 + if (addr + mm->cached_hole_size < vma->vm_start)
14910 + mm->cached_hole_size = vma->vm_start - addr;
14911 + addr = vma->vm_end;
14912 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14913 + start_addr = addr = mm->mmap_base;
14914 + mm->cached_hole_size = 0;
14915 + goto full_search;
14916 + }
14917 + }
14918 +
14919 + /*
14920 + * Remember the place where we stopped the search:
14921 + */
14922 + mm->free_area_cache = addr + len;
14923 + return addr;
14924 +}
14925 +
14926 +unsigned long
14927 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14928 + const unsigned long len, const unsigned long pgoff,
14929 + const unsigned long flags)
14930 +{
14931 + struct vm_area_struct *vma;
14932 + struct mm_struct *mm = current->mm;
14933 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14934 +
14935 +#ifdef CONFIG_PAX_SEGMEXEC
14936 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14937 + pax_task_size = SEGMEXEC_TASK_SIZE;
14938 +#endif
14939 +
14940 + pax_task_size -= PAGE_SIZE;
14941 +
14942 + /* requested length too big for entire address space */
14943 + if (len > pax_task_size)
14944 + return -ENOMEM;
14945 +
14946 + if (flags & MAP_FIXED)
14947 + return addr;
14948 +
14949 +#ifdef CONFIG_PAX_PAGEEXEC
14950 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14951 + goto bottomup;
14952 +#endif
14953 +
14954 +#ifdef CONFIG_PAX_RANDMMAP
14955 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14956 +#endif
14957 +
14958 + /* requesting a specific address */
14959 + if (addr) {
14960 + addr = PAGE_ALIGN(addr);
14961 + if (pax_task_size - len >= addr) {
14962 + vma = find_vma(mm, addr);
14963 + if (check_heap_stack_gap(vma, addr, len))
14964 + return addr;
14965 + }
14966 + }
14967 +
14968 + /* check if free_area_cache is useful for us */
14969 + if (len <= mm->cached_hole_size) {
14970 + mm->cached_hole_size = 0;
14971 + mm->free_area_cache = mm->mmap_base;
14972 + }
14973 +
14974 + /* either no address requested or can't fit in requested address hole */
14975 + addr = mm->free_area_cache;
14976 +
14977 + /* make sure it can fit in the remaining address space */
14978 + if (addr > len) {
14979 + vma = find_vma(mm, addr-len);
14980 + if (check_heap_stack_gap(vma, addr - len, len))
14981 + /* remember the address as a hint for next time */
14982 + return (mm->free_area_cache = addr-len);
14983 + }
14984 +
14985 + if (mm->mmap_base < len)
14986 + goto bottomup;
14987 +
14988 + addr = mm->mmap_base-len;
14989 +
14990 + do {
14991 + /*
14992 + * Lookup failure means no vma is above this address,
14993 + * else if new region fits below vma->vm_start,
14994 + * return with success:
14995 + */
14996 + vma = find_vma(mm, addr);
14997 + if (check_heap_stack_gap(vma, addr, len))
14998 + /* remember the address as a hint for next time */
14999 + return (mm->free_area_cache = addr);
15000 +
15001 + /* remember the largest hole we saw so far */
15002 + if (addr + mm->cached_hole_size < vma->vm_start)
15003 + mm->cached_hole_size = vma->vm_start - addr;
15004 +
15005 + /* try just below the current vma->vm_start */
15006 + addr = skip_heap_stack_gap(vma, len);
15007 + } while (!IS_ERR_VALUE(addr));
15008 +
15009 +bottomup:
15010 + /*
15011 + * A failed mmap() very likely causes application failure,
15012 + * so fall back to the bottom-up function here. This scenario
15013 + * can happen with large stack limits and large mmap()
15014 + * allocations.
15015 + */
15016 +
15017 +#ifdef CONFIG_PAX_SEGMEXEC
15018 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15019 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15020 + else
15021 +#endif
15022 +
15023 + mm->mmap_base = TASK_UNMAPPED_BASE;
15024 +
15025 +#ifdef CONFIG_PAX_RANDMMAP
15026 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15027 + mm->mmap_base += mm->delta_mmap;
15028 +#endif
15029 +
15030 + mm->free_area_cache = mm->mmap_base;
15031 + mm->cached_hole_size = ~0UL;
15032 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15033 + /*
15034 + * Restore the topdown base:
15035 + */
15036 + mm->mmap_base = base;
15037 + mm->free_area_cache = base;
15038 + mm->cached_hole_size = ~0UL;
15039 +
15040 + return addr;
15041 }
15042 diff -urNp linux-3.0.3/arch/x86/kernel/sys_x86_64.c linux-3.0.3/arch/x86/kernel/sys_x86_64.c
15043 --- linux-3.0.3/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15044 +++ linux-3.0.3/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15045 @@ -32,8 +32,8 @@ out:
15046 return error;
15047 }
15048
15049 -static void find_start_end(unsigned long flags, unsigned long *begin,
15050 - unsigned long *end)
15051 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15052 + unsigned long *begin, unsigned long *end)
15053 {
15054 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15055 unsigned long new_begin;
15056 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15057 *begin = new_begin;
15058 }
15059 } else {
15060 - *begin = TASK_UNMAPPED_BASE;
15061 + *begin = mm->mmap_base;
15062 *end = TASK_SIZE;
15063 }
15064 }
15065 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15066 if (flags & MAP_FIXED)
15067 return addr;
15068
15069 - find_start_end(flags, &begin, &end);
15070 + find_start_end(mm, flags, &begin, &end);
15071
15072 if (len > end)
15073 return -ENOMEM;
15074
15075 +#ifdef CONFIG_PAX_RANDMMAP
15076 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15077 +#endif
15078 +
15079 if (addr) {
15080 addr = PAGE_ALIGN(addr);
15081 vma = find_vma(mm, addr);
15082 - if (end - len >= addr &&
15083 - (!vma || addr + len <= vma->vm_start))
15084 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15085 return addr;
15086 }
15087 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15088 @@ -106,7 +109,7 @@ full_search:
15089 }
15090 return -ENOMEM;
15091 }
15092 - if (!vma || addr + len <= vma->vm_start) {
15093 + if (check_heap_stack_gap(vma, addr, len)) {
15094 /*
15095 * Remember the place where we stopped the search:
15096 */
15097 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15098 {
15099 struct vm_area_struct *vma;
15100 struct mm_struct *mm = current->mm;
15101 - unsigned long addr = addr0;
15102 + unsigned long base = mm->mmap_base, addr = addr0;
15103
15104 /* requested length too big for entire address space */
15105 if (len > TASK_SIZE)
15106 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15107 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15108 goto bottomup;
15109
15110 +#ifdef CONFIG_PAX_RANDMMAP
15111 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15112 +#endif
15113 +
15114 /* requesting a specific address */
15115 if (addr) {
15116 addr = PAGE_ALIGN(addr);
15117 - vma = find_vma(mm, addr);
15118 - if (TASK_SIZE - len >= addr &&
15119 - (!vma || addr + len <= vma->vm_start))
15120 - return addr;
15121 + if (TASK_SIZE - len >= addr) {
15122 + vma = find_vma(mm, addr);
15123 + if (check_heap_stack_gap(vma, addr, len))
15124 + return addr;
15125 + }
15126 }
15127
15128 /* check if free_area_cache is useful for us */
15129 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15130 /* make sure it can fit in the remaining address space */
15131 if (addr > len) {
15132 vma = find_vma(mm, addr-len);
15133 - if (!vma || addr <= vma->vm_start)
15134 + if (check_heap_stack_gap(vma, addr - len, len))
15135 /* remember the address as a hint for next time */
15136 return mm->free_area_cache = addr-len;
15137 }
15138 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15139 * return with success:
15140 */
15141 vma = find_vma(mm, addr);
15142 - if (!vma || addr+len <= vma->vm_start)
15143 + if (check_heap_stack_gap(vma, addr, len))
15144 /* remember the address as a hint for next time */
15145 return mm->free_area_cache = addr;
15146
15147 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15148 mm->cached_hole_size = vma->vm_start - addr;
15149
15150 /* try just below the current vma->vm_start */
15151 - addr = vma->vm_start-len;
15152 - } while (len < vma->vm_start);
15153 + addr = skip_heap_stack_gap(vma, len);
15154 + } while (!IS_ERR_VALUE(addr));
15155
15156 bottomup:
15157 /*
15158 @@ -198,13 +206,21 @@ bottomup:
15159 * can happen with large stack limits and large mmap()
15160 * allocations.
15161 */
15162 + mm->mmap_base = TASK_UNMAPPED_BASE;
15163 +
15164 +#ifdef CONFIG_PAX_RANDMMAP
15165 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15166 + mm->mmap_base += mm->delta_mmap;
15167 +#endif
15168 +
15169 + mm->free_area_cache = mm->mmap_base;
15170 mm->cached_hole_size = ~0UL;
15171 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15172 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15173 /*
15174 * Restore the topdown base:
15175 */
15176 - mm->free_area_cache = mm->mmap_base;
15177 + mm->mmap_base = base;
15178 + mm->free_area_cache = base;
15179 mm->cached_hole_size = ~0UL;
15180
15181 return addr;
15182 diff -urNp linux-3.0.3/arch/x86/kernel/tboot.c linux-3.0.3/arch/x86/kernel/tboot.c
15183 --- linux-3.0.3/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15184 +++ linux-3.0.3/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15185 @@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15186
15187 void tboot_shutdown(u32 shutdown_type)
15188 {
15189 - void (*shutdown)(void);
15190 + void (* __noreturn shutdown)(void);
15191
15192 if (!tboot_enabled())
15193 return;
15194 @@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15195
15196 switch_to_tboot_pt();
15197
15198 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15199 + shutdown = (void *)tboot->shutdown_entry;
15200 shutdown();
15201
15202 /* should not reach here */
15203 @@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15204 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15205 }
15206
15207 -static atomic_t ap_wfs_count;
15208 +static atomic_unchecked_t ap_wfs_count;
15209
15210 static int tboot_wait_for_aps(int num_aps)
15211 {
15212 @@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15213 {
15214 switch (action) {
15215 case CPU_DYING:
15216 - atomic_inc(&ap_wfs_count);
15217 + atomic_inc_unchecked(&ap_wfs_count);
15218 if (num_online_cpus() == 1)
15219 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15220 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15221 return NOTIFY_BAD;
15222 break;
15223 }
15224 @@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15225
15226 tboot_create_trampoline();
15227
15228 - atomic_set(&ap_wfs_count, 0);
15229 + atomic_set_unchecked(&ap_wfs_count, 0);
15230 register_hotcpu_notifier(&tboot_cpu_notifier);
15231 return 0;
15232 }
15233 diff -urNp linux-3.0.3/arch/x86/kernel/time.c linux-3.0.3/arch/x86/kernel/time.c
15234 --- linux-3.0.3/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15235 +++ linux-3.0.3/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15236 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15237 {
15238 unsigned long pc = instruction_pointer(regs);
15239
15240 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15241 + if (!user_mode(regs) && in_lock_functions(pc)) {
15242 #ifdef CONFIG_FRAME_POINTER
15243 - return *(unsigned long *)(regs->bp + sizeof(long));
15244 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15245 #else
15246 unsigned long *sp =
15247 (unsigned long *)kernel_stack_pointer(regs);
15248 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15249 * or above a saved flags. Eflags has bits 22-31 zero,
15250 * kernel addresses don't.
15251 */
15252 +
15253 +#ifdef CONFIG_PAX_KERNEXEC
15254 + return ktla_ktva(sp[0]);
15255 +#else
15256 if (sp[0] >> 22)
15257 return sp[0];
15258 if (sp[1] >> 22)
15259 return sp[1];
15260 #endif
15261 +
15262 +#endif
15263 }
15264 return pc;
15265 }
15266 diff -urNp linux-3.0.3/arch/x86/kernel/tls.c linux-3.0.3/arch/x86/kernel/tls.c
15267 --- linux-3.0.3/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15268 +++ linux-3.0.3/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15269 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15270 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15271 return -EINVAL;
15272
15273 +#ifdef CONFIG_PAX_SEGMEXEC
15274 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15275 + return -EINVAL;
15276 +#endif
15277 +
15278 set_tls_desc(p, idx, &info, 1);
15279
15280 return 0;
15281 diff -urNp linux-3.0.3/arch/x86/kernel/trampoline_32.S linux-3.0.3/arch/x86/kernel/trampoline_32.S
15282 --- linux-3.0.3/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15283 +++ linux-3.0.3/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15284 @@ -32,6 +32,12 @@
15285 #include <asm/segment.h>
15286 #include <asm/page_types.h>
15287
15288 +#ifdef CONFIG_PAX_KERNEXEC
15289 +#define ta(X) (X)
15290 +#else
15291 +#define ta(X) ((X) - __PAGE_OFFSET)
15292 +#endif
15293 +
15294 #ifdef CONFIG_SMP
15295
15296 .section ".x86_trampoline","a"
15297 @@ -62,7 +68,7 @@ r_base = .
15298 inc %ax # protected mode (PE) bit
15299 lmsw %ax # into protected mode
15300 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15301 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15302 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15303
15304 # These need to be in the same 64K segment as the above;
15305 # hence we don't use the boot_gdt_descr defined in head.S
15306 diff -urNp linux-3.0.3/arch/x86/kernel/trampoline_64.S linux-3.0.3/arch/x86/kernel/trampoline_64.S
15307 --- linux-3.0.3/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15308 +++ linux-3.0.3/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15309 @@ -90,7 +90,7 @@ startup_32:
15310 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15311 movl %eax, %ds
15312
15313 - movl $X86_CR4_PAE, %eax
15314 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15315 movl %eax, %cr4 # Enable PAE mode
15316
15317 # Setup trampoline 4 level pagetables
15318 @@ -138,7 +138,7 @@ tidt:
15319 # so the kernel can live anywhere
15320 .balign 4
15321 tgdt:
15322 - .short tgdt_end - tgdt # gdt limit
15323 + .short tgdt_end - tgdt - 1 # gdt limit
15324 .long tgdt - r_base
15325 .short 0
15326 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15327 diff -urNp linux-3.0.3/arch/x86/kernel/traps.c linux-3.0.3/arch/x86/kernel/traps.c
15328 --- linux-3.0.3/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15329 +++ linux-3.0.3/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15330 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15331
15332 /* Do we ignore FPU interrupts ? */
15333 char ignore_fpu_irq;
15334 -
15335 -/*
15336 - * The IDT has to be page-aligned to simplify the Pentium
15337 - * F0 0F bug workaround.
15338 - */
15339 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15340 #endif
15341
15342 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15343 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15344 }
15345
15346 static void __kprobes
15347 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15348 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15349 long error_code, siginfo_t *info)
15350 {
15351 struct task_struct *tsk = current;
15352
15353 #ifdef CONFIG_X86_32
15354 - if (regs->flags & X86_VM_MASK) {
15355 + if (v8086_mode(regs)) {
15356 /*
15357 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15358 * On nmi (interrupt 2), do_trap should not be called.
15359 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15360 }
15361 #endif
15362
15363 - if (!user_mode(regs))
15364 + if (!user_mode_novm(regs))
15365 goto kernel_trap;
15366
15367 #ifdef CONFIG_X86_32
15368 @@ -157,7 +151,7 @@ trap_signal:
15369 printk_ratelimit()) {
15370 printk(KERN_INFO
15371 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15372 - tsk->comm, tsk->pid, str,
15373 + tsk->comm, task_pid_nr(tsk), str,
15374 regs->ip, regs->sp, error_code);
15375 print_vma_addr(" in ", regs->ip);
15376 printk("\n");
15377 @@ -174,8 +168,20 @@ kernel_trap:
15378 if (!fixup_exception(regs)) {
15379 tsk->thread.error_code = error_code;
15380 tsk->thread.trap_no = trapnr;
15381 +
15382 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15383 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15384 + str = "PAX: suspicious stack segment fault";
15385 +#endif
15386 +
15387 die(str, regs, error_code);
15388 }
15389 +
15390 +#ifdef CONFIG_PAX_REFCOUNT
15391 + if (trapnr == 4)
15392 + pax_report_refcount_overflow(regs);
15393 +#endif
15394 +
15395 return;
15396
15397 #ifdef CONFIG_X86_32
15398 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15399 conditional_sti(regs);
15400
15401 #ifdef CONFIG_X86_32
15402 - if (regs->flags & X86_VM_MASK)
15403 + if (v8086_mode(regs))
15404 goto gp_in_vm86;
15405 #endif
15406
15407 tsk = current;
15408 - if (!user_mode(regs))
15409 + if (!user_mode_novm(regs))
15410 goto gp_in_kernel;
15411
15412 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15413 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15414 + struct mm_struct *mm = tsk->mm;
15415 + unsigned long limit;
15416 +
15417 + down_write(&mm->mmap_sem);
15418 + limit = mm->context.user_cs_limit;
15419 + if (limit < TASK_SIZE) {
15420 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15421 + up_write(&mm->mmap_sem);
15422 + return;
15423 + }
15424 + up_write(&mm->mmap_sem);
15425 + }
15426 +#endif
15427 +
15428 tsk->thread.error_code = error_code;
15429 tsk->thread.trap_no = 13;
15430
15431 @@ -304,6 +326,13 @@ gp_in_kernel:
15432 if (notify_die(DIE_GPF, "general protection fault", regs,
15433 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15434 return;
15435 +
15436 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15437 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15438 + die("PAX: suspicious general protection fault", regs, error_code);
15439 + else
15440 +#endif
15441 +
15442 die("general protection fault", regs, error_code);
15443 }
15444
15445 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15446 dotraplinkage notrace __kprobes void
15447 do_nmi(struct pt_regs *regs, long error_code)
15448 {
15449 +
15450 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15451 + if (!user_mode(regs)) {
15452 + unsigned long cs = regs->cs & 0xFFFF;
15453 + unsigned long ip = ktva_ktla(regs->ip);
15454 +
15455 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15456 + regs->ip = ip;
15457 + }
15458 +#endif
15459 +
15460 nmi_enter();
15461
15462 inc_irq_stat(__nmi_count);
15463 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15464 /* It's safe to allow irq's after DR6 has been saved */
15465 preempt_conditional_sti(regs);
15466
15467 - if (regs->flags & X86_VM_MASK) {
15468 + if (v8086_mode(regs)) {
15469 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15470 error_code, 1);
15471 preempt_conditional_cli(regs);
15472 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15473 * We already checked v86 mode above, so we can check for kernel mode
15474 * by just checking the CPL of CS.
15475 */
15476 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15477 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15478 tsk->thread.debugreg6 &= ~DR_STEP;
15479 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15480 regs->flags &= ~X86_EFLAGS_TF;
15481 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15482 return;
15483 conditional_sti(regs);
15484
15485 - if (!user_mode_vm(regs))
15486 + if (!user_mode(regs))
15487 {
15488 if (!fixup_exception(regs)) {
15489 task->thread.error_code = error_code;
15490 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15491 void __math_state_restore(void)
15492 {
15493 struct thread_info *thread = current_thread_info();
15494 - struct task_struct *tsk = thread->task;
15495 + struct task_struct *tsk = current;
15496
15497 /*
15498 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15499 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15500 */
15501 asmlinkage void math_state_restore(void)
15502 {
15503 - struct thread_info *thread = current_thread_info();
15504 - struct task_struct *tsk = thread->task;
15505 + struct task_struct *tsk = current;
15506
15507 if (!tsk_used_math(tsk)) {
15508 local_irq_enable();
15509 diff -urNp linux-3.0.3/arch/x86/kernel/verify_cpu.S linux-3.0.3/arch/x86/kernel/verify_cpu.S
15510 --- linux-3.0.3/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15511 +++ linux-3.0.3/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15512 @@ -20,6 +20,7 @@
15513 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15514 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15515 * arch/x86/kernel/head_32.S: processor startup
15516 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15517 *
15518 * verify_cpu, returns the status of longmode and SSE in register %eax.
15519 * 0: Success 1: Failure
15520 diff -urNp linux-3.0.3/arch/x86/kernel/vm86_32.c linux-3.0.3/arch/x86/kernel/vm86_32.c
15521 --- linux-3.0.3/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15522 +++ linux-3.0.3/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15523 @@ -41,6 +41,7 @@
15524 #include <linux/ptrace.h>
15525 #include <linux/audit.h>
15526 #include <linux/stddef.h>
15527 +#include <linux/grsecurity.h>
15528
15529 #include <asm/uaccess.h>
15530 #include <asm/io.h>
15531 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15532 do_exit(SIGSEGV);
15533 }
15534
15535 - tss = &per_cpu(init_tss, get_cpu());
15536 + tss = init_tss + get_cpu();
15537 current->thread.sp0 = current->thread.saved_sp0;
15538 current->thread.sysenter_cs = __KERNEL_CS;
15539 load_sp0(tss, &current->thread);
15540 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15541 struct task_struct *tsk;
15542 int tmp, ret = -EPERM;
15543
15544 +#ifdef CONFIG_GRKERNSEC_VM86
15545 + if (!capable(CAP_SYS_RAWIO)) {
15546 + gr_handle_vm86();
15547 + goto out;
15548 + }
15549 +#endif
15550 +
15551 tsk = current;
15552 if (tsk->thread.saved_sp0)
15553 goto out;
15554 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15555 int tmp, ret;
15556 struct vm86plus_struct __user *v86;
15557
15558 +#ifdef CONFIG_GRKERNSEC_VM86
15559 + if (!capable(CAP_SYS_RAWIO)) {
15560 + gr_handle_vm86();
15561 + ret = -EPERM;
15562 + goto out;
15563 + }
15564 +#endif
15565 +
15566 tsk = current;
15567 switch (cmd) {
15568 case VM86_REQUEST_IRQ:
15569 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15570 tsk->thread.saved_fs = info->regs32->fs;
15571 tsk->thread.saved_gs = get_user_gs(info->regs32);
15572
15573 - tss = &per_cpu(init_tss, get_cpu());
15574 + tss = init_tss + get_cpu();
15575 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15576 if (cpu_has_sep)
15577 tsk->thread.sysenter_cs = 0;
15578 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15579 goto cannot_handle;
15580 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15581 goto cannot_handle;
15582 - intr_ptr = (unsigned long __user *) (i << 2);
15583 + intr_ptr = (__force unsigned long __user *) (i << 2);
15584 if (get_user(segoffs, intr_ptr))
15585 goto cannot_handle;
15586 if ((segoffs >> 16) == BIOSSEG)
15587 diff -urNp linux-3.0.3/arch/x86/kernel/vmlinux.lds.S linux-3.0.3/arch/x86/kernel/vmlinux.lds.S
15588 --- linux-3.0.3/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15589 +++ linux-3.0.3/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15590 @@ -26,6 +26,13 @@
15591 #include <asm/page_types.h>
15592 #include <asm/cache.h>
15593 #include <asm/boot.h>
15594 +#include <asm/segment.h>
15595 +
15596 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15597 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15598 +#else
15599 +#define __KERNEL_TEXT_OFFSET 0
15600 +#endif
15601
15602 #undef i386 /* in case the preprocessor is a 32bit one */
15603
15604 @@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15605
15606 PHDRS {
15607 text PT_LOAD FLAGS(5); /* R_E */
15608 +#ifdef CONFIG_X86_32
15609 + module PT_LOAD FLAGS(5); /* R_E */
15610 +#endif
15611 +#ifdef CONFIG_XEN
15612 + rodata PT_LOAD FLAGS(5); /* R_E */
15613 +#else
15614 + rodata PT_LOAD FLAGS(4); /* R__ */
15615 +#endif
15616 data PT_LOAD FLAGS(6); /* RW_ */
15617 #ifdef CONFIG_X86_64
15618 user PT_LOAD FLAGS(5); /* R_E */
15619 +#endif
15620 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15621 #ifdef CONFIG_SMP
15622 percpu PT_LOAD FLAGS(6); /* RW_ */
15623 #endif
15624 + text.init PT_LOAD FLAGS(5); /* R_E */
15625 + text.exit PT_LOAD FLAGS(5); /* R_E */
15626 init PT_LOAD FLAGS(7); /* RWE */
15627 -#endif
15628 note PT_NOTE FLAGS(0); /* ___ */
15629 }
15630
15631 SECTIONS
15632 {
15633 #ifdef CONFIG_X86_32
15634 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15635 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15636 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15637 #else
15638 - . = __START_KERNEL;
15639 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15640 + . = __START_KERNEL;
15641 #endif
15642
15643 /* Text and read-only data */
15644 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15645 - _text = .;
15646 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15647 /* bootstrapping code */
15648 +#ifdef CONFIG_X86_32
15649 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15650 +#else
15651 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15652 +#endif
15653 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15654 + _text = .;
15655 HEAD_TEXT
15656 #ifdef CONFIG_X86_32
15657 . = ALIGN(PAGE_SIZE);
15658 @@ -109,13 +131,47 @@ SECTIONS
15659 IRQENTRY_TEXT
15660 *(.fixup)
15661 *(.gnu.warning)
15662 - /* End of text section */
15663 - _etext = .;
15664 } :text = 0x9090
15665
15666 - NOTES :text :note
15667 + . += __KERNEL_TEXT_OFFSET;
15668 +
15669 +#ifdef CONFIG_X86_32
15670 + . = ALIGN(PAGE_SIZE);
15671 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15672 +
15673 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15674 + MODULES_EXEC_VADDR = .;
15675 + BYTE(0)
15676 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15677 + . = ALIGN(HPAGE_SIZE);
15678 + MODULES_EXEC_END = . - 1;
15679 +#endif
15680 +
15681 + } :module
15682 +#endif
15683 +
15684 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15685 + /* End of text section */
15686 + _etext = . - __KERNEL_TEXT_OFFSET;
15687 + }
15688 +
15689 +#ifdef CONFIG_X86_32
15690 + . = ALIGN(PAGE_SIZE);
15691 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15692 + *(.idt)
15693 + . = ALIGN(PAGE_SIZE);
15694 + *(.empty_zero_page)
15695 + *(.initial_pg_fixmap)
15696 + *(.initial_pg_pmd)
15697 + *(.initial_page_table)
15698 + *(.swapper_pg_dir)
15699 + } :rodata
15700 +#endif
15701 +
15702 + . = ALIGN(PAGE_SIZE);
15703 + NOTES :rodata :note
15704
15705 - EXCEPTION_TABLE(16) :text = 0x9090
15706 + EXCEPTION_TABLE(16) :rodata
15707
15708 #if defined(CONFIG_DEBUG_RODATA)
15709 /* .text should occupy whole number of pages */
15710 @@ -127,16 +183,20 @@ SECTIONS
15711
15712 /* Data */
15713 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15714 +
15715 +#ifdef CONFIG_PAX_KERNEXEC
15716 + . = ALIGN(HPAGE_SIZE);
15717 +#else
15718 + . = ALIGN(PAGE_SIZE);
15719 +#endif
15720 +
15721 /* Start of data section */
15722 _sdata = .;
15723
15724 /* init_task */
15725 INIT_TASK_DATA(THREAD_SIZE)
15726
15727 -#ifdef CONFIG_X86_32
15728 - /* 32 bit has nosave before _edata */
15729 NOSAVE_DATA
15730 -#endif
15731
15732 PAGE_ALIGNED_DATA(PAGE_SIZE)
15733
15734 @@ -208,12 +268,19 @@ SECTIONS
15735 #endif /* CONFIG_X86_64 */
15736
15737 /* Init code and data - will be freed after init */
15738 - . = ALIGN(PAGE_SIZE);
15739 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15740 + BYTE(0)
15741 +
15742 +#ifdef CONFIG_PAX_KERNEXEC
15743 + . = ALIGN(HPAGE_SIZE);
15744 +#else
15745 + . = ALIGN(PAGE_SIZE);
15746 +#endif
15747 +
15748 __init_begin = .; /* paired with __init_end */
15749 - }
15750 + } :init.begin
15751
15752 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15753 +#ifdef CONFIG_SMP
15754 /*
15755 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15756 * output PHDR, so the next output section - .init.text - should
15757 @@ -222,12 +289,27 @@ SECTIONS
15758 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15759 #endif
15760
15761 - INIT_TEXT_SECTION(PAGE_SIZE)
15762 -#ifdef CONFIG_X86_64
15763 - :init
15764 -#endif
15765 + . = ALIGN(PAGE_SIZE);
15766 + init_begin = .;
15767 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15768 + VMLINUX_SYMBOL(_sinittext) = .;
15769 + INIT_TEXT
15770 + VMLINUX_SYMBOL(_einittext) = .;
15771 + . = ALIGN(PAGE_SIZE);
15772 + } :text.init
15773
15774 - INIT_DATA_SECTION(16)
15775 + /*
15776 + * .exit.text is discard at runtime, not link time, to deal with
15777 + * references from .altinstructions and .eh_frame
15778 + */
15779 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15780 + EXIT_TEXT
15781 + . = ALIGN(16);
15782 + } :text.exit
15783 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15784 +
15785 + . = ALIGN(PAGE_SIZE);
15786 + INIT_DATA_SECTION(16) :init
15787
15788 /*
15789 * Code and data for a variety of lowlevel trampolines, to be
15790 @@ -301,19 +383,12 @@ SECTIONS
15791 }
15792
15793 . = ALIGN(8);
15794 - /*
15795 - * .exit.text is discard at runtime, not link time, to deal with
15796 - * references from .altinstructions and .eh_frame
15797 - */
15798 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15799 - EXIT_TEXT
15800 - }
15801
15802 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15803 EXIT_DATA
15804 }
15805
15806 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15807 +#ifndef CONFIG_SMP
15808 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15809 #endif
15810
15811 @@ -332,16 +407,10 @@ SECTIONS
15812 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15813 __smp_locks = .;
15814 *(.smp_locks)
15815 - . = ALIGN(PAGE_SIZE);
15816 __smp_locks_end = .;
15817 + . = ALIGN(PAGE_SIZE);
15818 }
15819
15820 -#ifdef CONFIG_X86_64
15821 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15822 - NOSAVE_DATA
15823 - }
15824 -#endif
15825 -
15826 /* BSS */
15827 . = ALIGN(PAGE_SIZE);
15828 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15829 @@ -357,6 +426,7 @@ SECTIONS
15830 __brk_base = .;
15831 . += 64 * 1024; /* 64k alignment slop space */
15832 *(.brk_reservation) /* areas brk users have reserved */
15833 + . = ALIGN(HPAGE_SIZE);
15834 __brk_limit = .;
15835 }
15836
15837 @@ -383,13 +453,12 @@ SECTIONS
15838 * for the boot processor.
15839 */
15840 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15841 -INIT_PER_CPU(gdt_page);
15842 INIT_PER_CPU(irq_stack_union);
15843
15844 /*
15845 * Build-time check on the image size:
15846 */
15847 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15848 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15849 "kernel image bigger than KERNEL_IMAGE_SIZE");
15850
15851 #ifdef CONFIG_SMP
15852 diff -urNp linux-3.0.3/arch/x86/kernel/vsyscall_64.c linux-3.0.3/arch/x86/kernel/vsyscall_64.c
15853 --- linux-3.0.3/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15854 +++ linux-3.0.3/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15855 @@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15856 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15857 {
15858 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15859 - .sysctl_enabled = 1,
15860 + .sysctl_enabled = 0,
15861 };
15862
15863 void update_vsyscall_tz(void)
15864 @@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15865 static ctl_table kernel_table2[] = {
15866 { .procname = "vsyscall64",
15867 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15868 - .mode = 0644,
15869 + .mode = 0444,
15870 .proc_handler = proc_dointvec },
15871 {}
15872 };
15873 diff -urNp linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c
15874 --- linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15875 +++ linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15876 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15877 EXPORT_SYMBOL(copy_user_generic_string);
15878 EXPORT_SYMBOL(copy_user_generic_unrolled);
15879 EXPORT_SYMBOL(__copy_user_nocache);
15880 -EXPORT_SYMBOL(_copy_from_user);
15881 -EXPORT_SYMBOL(_copy_to_user);
15882
15883 EXPORT_SYMBOL(copy_page);
15884 EXPORT_SYMBOL(clear_page);
15885 diff -urNp linux-3.0.3/arch/x86/kernel/xsave.c linux-3.0.3/arch/x86/kernel/xsave.c
15886 --- linux-3.0.3/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15887 +++ linux-3.0.3/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15888 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15889 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15890 return -EINVAL;
15891
15892 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15893 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15894 fx_sw_user->extended_size -
15895 FP_XSTATE_MAGIC2_SIZE));
15896 if (err)
15897 @@ -267,7 +267,7 @@ fx_only:
15898 * the other extended state.
15899 */
15900 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15901 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15902 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15903 }
15904
15905 /*
15906 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15907 if (use_xsave())
15908 err = restore_user_xstate(buf);
15909 else
15910 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
15911 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
15912 buf);
15913 if (unlikely(err)) {
15914 /*
15915 diff -urNp linux-3.0.3/arch/x86/kvm/emulate.c linux-3.0.3/arch/x86/kvm/emulate.c
15916 --- linux-3.0.3/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15917 +++ linux-3.0.3/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15918 @@ -96,7 +96,7 @@
15919 #define Src2ImmByte (2<<29)
15920 #define Src2One (3<<29)
15921 #define Src2Imm (4<<29)
15922 -#define Src2Mask (7<<29)
15923 +#define Src2Mask (7U<<29)
15924
15925 #define X2(x...) x, x
15926 #define X3(x...) X2(x), x
15927 @@ -207,6 +207,7 @@ struct gprefix {
15928
15929 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15930 do { \
15931 + unsigned long _tmp; \
15932 __asm__ __volatile__ ( \
15933 _PRE_EFLAGS("0", "4", "2") \
15934 _op _suffix " %"_x"3,%1; " \
15935 @@ -220,8 +221,6 @@ struct gprefix {
15936 /* Raw emulation: instruction has two explicit operands. */
15937 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15938 do { \
15939 - unsigned long _tmp; \
15940 - \
15941 switch ((_dst).bytes) { \
15942 case 2: \
15943 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15944 @@ -237,7 +236,6 @@ struct gprefix {
15945
15946 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15947 do { \
15948 - unsigned long _tmp; \
15949 switch ((_dst).bytes) { \
15950 case 1: \
15951 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
15952 diff -urNp linux-3.0.3/arch/x86/kvm/lapic.c linux-3.0.3/arch/x86/kvm/lapic.c
15953 --- linux-3.0.3/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
15954 +++ linux-3.0.3/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
15955 @@ -53,7 +53,7 @@
15956 #define APIC_BUS_CYCLE_NS 1
15957
15958 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15959 -#define apic_debug(fmt, arg...)
15960 +#define apic_debug(fmt, arg...) do {} while (0)
15961
15962 #define APIC_LVT_NUM 6
15963 /* 14 is the version for Xeon and Pentium 8.4.8*/
15964 diff -urNp linux-3.0.3/arch/x86/kvm/mmu.c linux-3.0.3/arch/x86/kvm/mmu.c
15965 --- linux-3.0.3/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
15966 +++ linux-3.0.3/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
15967 @@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15968
15969 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
15970
15971 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
15972 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
15973
15974 /*
15975 * Assume that the pte write on a page table of the same type
15976 @@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15977 }
15978
15979 spin_lock(&vcpu->kvm->mmu_lock);
15980 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15981 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15982 gentry = 0;
15983 kvm_mmu_free_some_pages(vcpu);
15984 ++vcpu->kvm->stat.mmu_pte_write;
15985 diff -urNp linux-3.0.3/arch/x86/kvm/paging_tmpl.h linux-3.0.3/arch/x86/kvm/paging_tmpl.h
15986 --- linux-3.0.3/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
15987 +++ linux-3.0.3/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
15988 @@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
15989 unsigned long mmu_seq;
15990 bool map_writable;
15991
15992 + pax_track_stack();
15993 +
15994 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
15995
15996 r = mmu_topup_memory_caches(vcpu);
15997 @@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
15998 if (need_flush)
15999 kvm_flush_remote_tlbs(vcpu->kvm);
16000
16001 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16002 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16003
16004 spin_unlock(&vcpu->kvm->mmu_lock);
16005
16006 diff -urNp linux-3.0.3/arch/x86/kvm/svm.c linux-3.0.3/arch/x86/kvm/svm.c
16007 --- linux-3.0.3/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16008 +++ linux-3.0.3/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16009 @@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16010 int cpu = raw_smp_processor_id();
16011
16012 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16013 +
16014 + pax_open_kernel();
16015 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16016 + pax_close_kernel();
16017 +
16018 load_TR_desc();
16019 }
16020
16021 @@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16022 #endif
16023 #endif
16024
16025 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16026 + __set_fs(current_thread_info()->addr_limit);
16027 +#endif
16028 +
16029 reload_tss(vcpu);
16030
16031 local_irq_disable();
16032 diff -urNp linux-3.0.3/arch/x86/kvm/vmx.c linux-3.0.3/arch/x86/kvm/vmx.c
16033 --- linux-3.0.3/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16034 +++ linux-3.0.3/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16035 @@ -797,7 +797,11 @@ static void reload_tss(void)
16036 struct desc_struct *descs;
16037
16038 descs = (void *)gdt->address;
16039 +
16040 + pax_open_kernel();
16041 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16042 + pax_close_kernel();
16043 +
16044 load_TR_desc();
16045 }
16046
16047 @@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16048 if (!cpu_has_vmx_flexpriority())
16049 flexpriority_enabled = 0;
16050
16051 - if (!cpu_has_vmx_tpr_shadow())
16052 - kvm_x86_ops->update_cr8_intercept = NULL;
16053 + if (!cpu_has_vmx_tpr_shadow()) {
16054 + pax_open_kernel();
16055 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16056 + pax_close_kernel();
16057 + }
16058
16059 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16060 kvm_disable_largepages();
16061 @@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16062 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16063
16064 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16065 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16066 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16067 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16068 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16069 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16070 @@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16071 "jmp .Lkvm_vmx_return \n\t"
16072 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16073 ".Lkvm_vmx_return: "
16074 +
16075 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16076 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16077 + ".Lkvm_vmx_return2: "
16078 +#endif
16079 +
16080 /* Save guest registers, load host registers, keep flags */
16081 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16082 "pop %0 \n\t"
16083 @@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16084 #endif
16085 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16086 [wordsize]"i"(sizeof(ulong))
16087 +
16088 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16089 + ,[cs]"i"(__KERNEL_CS)
16090 +#endif
16091 +
16092 : "cc", "memory"
16093 , R"ax", R"bx", R"di", R"si"
16094 #ifdef CONFIG_X86_64
16095 @@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16096
16097 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16098
16099 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16100 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16101 +
16102 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16103 + loadsegment(fs, __KERNEL_PERCPU);
16104 +#endif
16105 +
16106 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16107 + __set_fs(current_thread_info()->addr_limit);
16108 +#endif
16109 +
16110 vmx->launched = 1;
16111
16112 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16113 diff -urNp linux-3.0.3/arch/x86/kvm/x86.c linux-3.0.3/arch/x86/kvm/x86.c
16114 --- linux-3.0.3/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16115 +++ linux-3.0.3/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16116 @@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16117 if (n < msr_list.nmsrs)
16118 goto out;
16119 r = -EFAULT;
16120 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16121 + goto out;
16122 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16123 num_msrs_to_save * sizeof(u32)))
16124 goto out;
16125 @@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16126 struct kvm_cpuid2 *cpuid,
16127 struct kvm_cpuid_entry2 __user *entries)
16128 {
16129 - int r;
16130 + int r, i;
16131
16132 r = -E2BIG;
16133 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16134 goto out;
16135 r = -EFAULT;
16136 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16137 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16138 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16139 goto out;
16140 + for (i = 0; i < cpuid->nent; ++i) {
16141 + struct kvm_cpuid_entry2 cpuid_entry;
16142 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16143 + goto out;
16144 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16145 + }
16146 vcpu->arch.cpuid_nent = cpuid->nent;
16147 kvm_apic_set_version(vcpu);
16148 kvm_x86_ops->cpuid_update(vcpu);
16149 @@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16150 struct kvm_cpuid2 *cpuid,
16151 struct kvm_cpuid_entry2 __user *entries)
16152 {
16153 - int r;
16154 + int r, i;
16155
16156 r = -E2BIG;
16157 if (cpuid->nent < vcpu->arch.cpuid_nent)
16158 goto out;
16159 r = -EFAULT;
16160 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16161 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16162 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16163 goto out;
16164 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16165 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16166 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16167 + goto out;
16168 + }
16169 return 0;
16170
16171 out:
16172 @@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16173 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16174 struct kvm_interrupt *irq)
16175 {
16176 - if (irq->irq < 0 || irq->irq >= 256)
16177 + if (irq->irq >= 256)
16178 return -EINVAL;
16179 if (irqchip_in_kernel(vcpu->kvm))
16180 return -ENXIO;
16181 @@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16182 }
16183 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16184
16185 -int kvm_arch_init(void *opaque)
16186 +int kvm_arch_init(const void *opaque)
16187 {
16188 int r;
16189 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16190 diff -urNp linux-3.0.3/arch/x86/lguest/boot.c linux-3.0.3/arch/x86/lguest/boot.c
16191 --- linux-3.0.3/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16192 +++ linux-3.0.3/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16193 @@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16194 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16195 * Launcher to reboot us.
16196 */
16197 -static void lguest_restart(char *reason)
16198 +static __noreturn void lguest_restart(char *reason)
16199 {
16200 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16201 + BUG();
16202 }
16203
16204 /*G:050
16205 diff -urNp linux-3.0.3/arch/x86/lib/atomic64_32.c linux-3.0.3/arch/x86/lib/atomic64_32.c
16206 --- linux-3.0.3/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16207 +++ linux-3.0.3/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16208 @@ -8,18 +8,30 @@
16209
16210 long long atomic64_read_cx8(long long, const atomic64_t *v);
16211 EXPORT_SYMBOL(atomic64_read_cx8);
16212 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16213 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16214 long long atomic64_set_cx8(long long, const atomic64_t *v);
16215 EXPORT_SYMBOL(atomic64_set_cx8);
16216 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16217 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16218 long long atomic64_xchg_cx8(long long, unsigned high);
16219 EXPORT_SYMBOL(atomic64_xchg_cx8);
16220 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16221 EXPORT_SYMBOL(atomic64_add_return_cx8);
16222 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16223 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16224 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16225 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16226 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16227 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16228 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16229 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16230 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16231 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16232 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16233 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16234 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16235 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16236 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16237 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16238 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16239 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16240 #ifndef CONFIG_X86_CMPXCHG64
16241 long long atomic64_read_386(long long, const atomic64_t *v);
16242 EXPORT_SYMBOL(atomic64_read_386);
16243 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16244 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16245 long long atomic64_set_386(long long, const atomic64_t *v);
16246 EXPORT_SYMBOL(atomic64_set_386);
16247 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16248 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16249 long long atomic64_xchg_386(long long, unsigned high);
16250 EXPORT_SYMBOL(atomic64_xchg_386);
16251 long long atomic64_add_return_386(long long a, atomic64_t *v);
16252 EXPORT_SYMBOL(atomic64_add_return_386);
16253 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16254 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16255 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16256 EXPORT_SYMBOL(atomic64_sub_return_386);
16257 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16258 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16259 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16260 EXPORT_SYMBOL(atomic64_inc_return_386);
16261 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16262 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16263 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16264 EXPORT_SYMBOL(atomic64_dec_return_386);
16265 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16266 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16267 long long atomic64_add_386(long long a, atomic64_t *v);
16268 EXPORT_SYMBOL(atomic64_add_386);
16269 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16270 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16271 long long atomic64_sub_386(long long a, atomic64_t *v);
16272 EXPORT_SYMBOL(atomic64_sub_386);
16273 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16274 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16275 long long atomic64_inc_386(long long a, atomic64_t *v);
16276 EXPORT_SYMBOL(atomic64_inc_386);
16277 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16278 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16279 long long atomic64_dec_386(long long a, atomic64_t *v);
16280 EXPORT_SYMBOL(atomic64_dec_386);
16281 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16282 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16283 long long atomic64_dec_if_positive_386(atomic64_t *v);
16284 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16285 int atomic64_inc_not_zero_386(atomic64_t *v);
16286 diff -urNp linux-3.0.3/arch/x86/lib/atomic64_386_32.S linux-3.0.3/arch/x86/lib/atomic64_386_32.S
16287 --- linux-3.0.3/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16288 +++ linux-3.0.3/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16289 @@ -48,6 +48,10 @@ BEGIN(read)
16290 movl (v), %eax
16291 movl 4(v), %edx
16292 RET_ENDP
16293 +BEGIN(read_unchecked)
16294 + movl (v), %eax
16295 + movl 4(v), %edx
16296 +RET_ENDP
16297 #undef v
16298
16299 #define v %esi
16300 @@ -55,6 +59,10 @@ BEGIN(set)
16301 movl %ebx, (v)
16302 movl %ecx, 4(v)
16303 RET_ENDP
16304 +BEGIN(set_unchecked)
16305 + movl %ebx, (v)
16306 + movl %ecx, 4(v)
16307 +RET_ENDP
16308 #undef v
16309
16310 #define v %esi
16311 @@ -70,6 +78,20 @@ RET_ENDP
16312 BEGIN(add)
16313 addl %eax, (v)
16314 adcl %edx, 4(v)
16315 +
16316 +#ifdef CONFIG_PAX_REFCOUNT
16317 + jno 0f
16318 + subl %eax, (v)
16319 + sbbl %edx, 4(v)
16320 + int $4
16321 +0:
16322 + _ASM_EXTABLE(0b, 0b)
16323 +#endif
16324 +
16325 +RET_ENDP
16326 +BEGIN(add_unchecked)
16327 + addl %eax, (v)
16328 + adcl %edx, 4(v)
16329 RET_ENDP
16330 #undef v
16331
16332 @@ -77,6 +99,24 @@ RET_ENDP
16333 BEGIN(add_return)
16334 addl (v), %eax
16335 adcl 4(v), %edx
16336 +
16337 +#ifdef CONFIG_PAX_REFCOUNT
16338 + into
16339 +1234:
16340 + _ASM_EXTABLE(1234b, 2f)
16341 +#endif
16342 +
16343 + movl %eax, (v)
16344 + movl %edx, 4(v)
16345 +
16346 +#ifdef CONFIG_PAX_REFCOUNT
16347 +2:
16348 +#endif
16349 +
16350 +RET_ENDP
16351 +BEGIN(add_return_unchecked)
16352 + addl (v), %eax
16353 + adcl 4(v), %edx
16354 movl %eax, (v)
16355 movl %edx, 4(v)
16356 RET_ENDP
16357 @@ -86,6 +126,20 @@ RET_ENDP
16358 BEGIN(sub)
16359 subl %eax, (v)
16360 sbbl %edx, 4(v)
16361 +
16362 +#ifdef CONFIG_PAX_REFCOUNT
16363 + jno 0f
16364 + addl %eax, (v)
16365 + adcl %edx, 4(v)
16366 + int $4
16367 +0:
16368 + _ASM_EXTABLE(0b, 0b)
16369 +#endif
16370 +
16371 +RET_ENDP
16372 +BEGIN(sub_unchecked)
16373 + subl %eax, (v)
16374 + sbbl %edx, 4(v)
16375 RET_ENDP
16376 #undef v
16377
16378 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16379 sbbl $0, %edx
16380 addl (v), %eax
16381 adcl 4(v), %edx
16382 +
16383 +#ifdef CONFIG_PAX_REFCOUNT
16384 + into
16385 +1234:
16386 + _ASM_EXTABLE(1234b, 2f)
16387 +#endif
16388 +
16389 + movl %eax, (v)
16390 + movl %edx, 4(v)
16391 +
16392 +#ifdef CONFIG_PAX_REFCOUNT
16393 +2:
16394 +#endif
16395 +
16396 +RET_ENDP
16397 +BEGIN(sub_return_unchecked)
16398 + negl %edx
16399 + negl %eax
16400 + sbbl $0, %edx
16401 + addl (v), %eax
16402 + adcl 4(v), %edx
16403 movl %eax, (v)
16404 movl %edx, 4(v)
16405 RET_ENDP
16406 @@ -105,6 +180,20 @@ RET_ENDP
16407 BEGIN(inc)
16408 addl $1, (v)
16409 adcl $0, 4(v)
16410 +
16411 +#ifdef CONFIG_PAX_REFCOUNT
16412 + jno 0f
16413 + subl $1, (v)
16414 + sbbl $0, 4(v)
16415 + int $4
16416 +0:
16417 + _ASM_EXTABLE(0b, 0b)
16418 +#endif
16419 +
16420 +RET_ENDP
16421 +BEGIN(inc_unchecked)
16422 + addl $1, (v)
16423 + adcl $0, 4(v)
16424 RET_ENDP
16425 #undef v
16426
16427 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16428 movl 4(v), %edx
16429 addl $1, %eax
16430 adcl $0, %edx
16431 +
16432 +#ifdef CONFIG_PAX_REFCOUNT
16433 + into
16434 +1234:
16435 + _ASM_EXTABLE(1234b, 2f)
16436 +#endif
16437 +
16438 + movl %eax, (v)
16439 + movl %edx, 4(v)
16440 +
16441 +#ifdef CONFIG_PAX_REFCOUNT
16442 +2:
16443 +#endif
16444 +
16445 +RET_ENDP
16446 +BEGIN(inc_return_unchecked)
16447 + movl (v), %eax
16448 + movl 4(v), %edx
16449 + addl $1, %eax
16450 + adcl $0, %edx
16451 movl %eax, (v)
16452 movl %edx, 4(v)
16453 RET_ENDP
16454 @@ -123,6 +232,20 @@ RET_ENDP
16455 BEGIN(dec)
16456 subl $1, (v)
16457 sbbl $0, 4(v)
16458 +
16459 +#ifdef CONFIG_PAX_REFCOUNT
16460 + jno 0f
16461 + addl $1, (v)
16462 + adcl $0, 4(v)
16463 + int $4
16464 +0:
16465 + _ASM_EXTABLE(0b, 0b)
16466 +#endif
16467 +
16468 +RET_ENDP
16469 +BEGIN(dec_unchecked)
16470 + subl $1, (v)
16471 + sbbl $0, 4(v)
16472 RET_ENDP
16473 #undef v
16474
16475 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16476 movl 4(v), %edx
16477 subl $1, %eax
16478 sbbl $0, %edx
16479 +
16480 +#ifdef CONFIG_PAX_REFCOUNT
16481 + into
16482 +1234:
16483 + _ASM_EXTABLE(1234b, 2f)
16484 +#endif
16485 +
16486 + movl %eax, (v)
16487 + movl %edx, 4(v)
16488 +
16489 +#ifdef CONFIG_PAX_REFCOUNT
16490 +2:
16491 +#endif
16492 +
16493 +RET_ENDP
16494 +BEGIN(dec_return_unchecked)
16495 + movl (v), %eax
16496 + movl 4(v), %edx
16497 + subl $1, %eax
16498 + sbbl $0, %edx
16499 movl %eax, (v)
16500 movl %edx, 4(v)
16501 RET_ENDP
16502 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16503 adcl %edx, %edi
16504 addl (v), %eax
16505 adcl 4(v), %edx
16506 +
16507 +#ifdef CONFIG_PAX_REFCOUNT
16508 + into
16509 +1234:
16510 + _ASM_EXTABLE(1234b, 2f)
16511 +#endif
16512 +
16513 cmpl %eax, %esi
16514 je 3f
16515 1:
16516 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16517 1:
16518 addl $1, %eax
16519 adcl $0, %edx
16520 +
16521 +#ifdef CONFIG_PAX_REFCOUNT
16522 + into
16523 +1234:
16524 + _ASM_EXTABLE(1234b, 2f)
16525 +#endif
16526 +
16527 movl %eax, (v)
16528 movl %edx, 4(v)
16529 movl $1, %eax
16530 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16531 movl 4(v), %edx
16532 subl $1, %eax
16533 sbbl $0, %edx
16534 +
16535 +#ifdef CONFIG_PAX_REFCOUNT
16536 + into
16537 +1234:
16538 + _ASM_EXTABLE(1234b, 1f)
16539 +#endif
16540 +
16541 js 1f
16542 movl %eax, (v)
16543 movl %edx, 4(v)
16544 diff -urNp linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S
16545 --- linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16546 +++ linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S 2011-08-23 21:47:55.000000000 -0400
16547 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16548 CFI_ENDPROC
16549 ENDPROC(atomic64_read_cx8)
16550
16551 +ENTRY(atomic64_read_unchecked_cx8)
16552 + CFI_STARTPROC
16553 +
16554 + read64 %ecx
16555 + ret
16556 + CFI_ENDPROC
16557 +ENDPROC(atomic64_read_unchecked_cx8)
16558 +
16559 ENTRY(atomic64_set_cx8)
16560 CFI_STARTPROC
16561
16562 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16563 CFI_ENDPROC
16564 ENDPROC(atomic64_set_cx8)
16565
16566 +ENTRY(atomic64_set_unchecked_cx8)
16567 + CFI_STARTPROC
16568 +
16569 +1:
16570 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16571 + * are atomic on 586 and newer */
16572 + cmpxchg8b (%esi)
16573 + jne 1b
16574 +
16575 + ret
16576 + CFI_ENDPROC
16577 +ENDPROC(atomic64_set_unchecked_cx8)
16578 +
16579 ENTRY(atomic64_xchg_cx8)
16580 CFI_STARTPROC
16581
16582 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16583 CFI_ENDPROC
16584 ENDPROC(atomic64_xchg_cx8)
16585
16586 -.macro addsub_return func ins insc
16587 -ENTRY(atomic64_\func\()_return_cx8)
16588 +.macro addsub_return func ins insc unchecked=""
16589 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16590 CFI_STARTPROC
16591 SAVE ebp
16592 SAVE ebx
16593 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16594 movl %edx, %ecx
16595 \ins\()l %esi, %ebx
16596 \insc\()l %edi, %ecx
16597 +
16598 +.ifb \unchecked
16599 +#ifdef CONFIG_PAX_REFCOUNT
16600 + into
16601 +2:
16602 + _ASM_EXTABLE(2b, 3f)
16603 +#endif
16604 +.endif
16605 +
16606 LOCK_PREFIX
16607 cmpxchg8b (%ebp)
16608 jne 1b
16609 -
16610 -10:
16611 movl %ebx, %eax
16612 movl %ecx, %edx
16613 +
16614 +.ifb \unchecked
16615 +#ifdef CONFIG_PAX_REFCOUNT
16616 +3:
16617 +#endif
16618 +.endif
16619 +
16620 RESTORE edi
16621 RESTORE esi
16622 RESTORE ebx
16623 RESTORE ebp
16624 ret
16625 CFI_ENDPROC
16626 -ENDPROC(atomic64_\func\()_return_cx8)
16627 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16628 .endm
16629
16630 addsub_return add add adc
16631 addsub_return sub sub sbb
16632 +addsub_return add add adc _unchecked
16633 +addsub_return sub sub sbb _unchecked
16634
16635 -.macro incdec_return func ins insc
16636 -ENTRY(atomic64_\func\()_return_cx8)
16637 +.macro incdec_return func ins insc unchecked
16638 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16639 CFI_STARTPROC
16640 SAVE ebx
16641
16642 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16643 movl %edx, %ecx
16644 \ins\()l $1, %ebx
16645 \insc\()l $0, %ecx
16646 +
16647 +.ifb \unchecked
16648 +#ifdef CONFIG_PAX_REFCOUNT
16649 + into
16650 +2:
16651 + _ASM_EXTABLE(2b, 3f)
16652 +#endif
16653 +.endif
16654 +
16655 LOCK_PREFIX
16656 cmpxchg8b (%esi)
16657 jne 1b
16658
16659 -10:
16660 movl %ebx, %eax
16661 movl %ecx, %edx
16662 +
16663 +.ifb \unchecked
16664 +#ifdef CONFIG_PAX_REFCOUNT
16665 +3:
16666 +#endif
16667 +.endif
16668 +
16669 RESTORE ebx
16670 ret
16671 CFI_ENDPROC
16672 -ENDPROC(atomic64_\func\()_return_cx8)
16673 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16674 .endm
16675
16676 incdec_return inc add adc
16677 incdec_return dec sub sbb
16678 +incdec_return inc add adc _unchecked
16679 +incdec_return dec sub sbb _unchecked
16680
16681 ENTRY(atomic64_dec_if_positive_cx8)
16682 CFI_STARTPROC
16683 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16684 movl %edx, %ecx
16685 subl $1, %ebx
16686 sbb $0, %ecx
16687 +
16688 +#ifdef CONFIG_PAX_REFCOUNT
16689 + into
16690 +1234:
16691 + _ASM_EXTABLE(1234b, 2f)
16692 +#endif
16693 +
16694 js 2f
16695 LOCK_PREFIX
16696 cmpxchg8b (%esi)
16697 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16698 movl %edx, %ecx
16699 addl %esi, %ebx
16700 adcl %edi, %ecx
16701 +
16702 +#ifdef CONFIG_PAX_REFCOUNT
16703 + into
16704 +1234:
16705 + _ASM_EXTABLE(1234b, 3f)
16706 +#endif
16707 +
16708 LOCK_PREFIX
16709 cmpxchg8b (%ebp)
16710 jne 1b
16711 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16712 movl %edx, %ecx
16713 addl $1, %ebx
16714 adcl $0, %ecx
16715 +
16716 +#ifdef CONFIG_PAX_REFCOUNT
16717 + into
16718 +1234:
16719 + _ASM_EXTABLE(1234b, 3f)
16720 +#endif
16721 +
16722 LOCK_PREFIX
16723 cmpxchg8b (%esi)
16724 jne 1b
16725 diff -urNp linux-3.0.3/arch/x86/lib/checksum_32.S linux-3.0.3/arch/x86/lib/checksum_32.S
16726 --- linux-3.0.3/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16727 +++ linux-3.0.3/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16728 @@ -28,7 +28,8 @@
16729 #include <linux/linkage.h>
16730 #include <asm/dwarf2.h>
16731 #include <asm/errno.h>
16732 -
16733 +#include <asm/segment.h>
16734 +
16735 /*
16736 * computes a partial checksum, e.g. for TCP/UDP fragments
16737 */
16738 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16739
16740 #define ARGBASE 16
16741 #define FP 12
16742 -
16743 -ENTRY(csum_partial_copy_generic)
16744 +
16745 +ENTRY(csum_partial_copy_generic_to_user)
16746 CFI_STARTPROC
16747 +
16748 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16749 + pushl_cfi %gs
16750 + popl_cfi %es
16751 + jmp csum_partial_copy_generic
16752 +#endif
16753 +
16754 +ENTRY(csum_partial_copy_generic_from_user)
16755 +
16756 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16757 + pushl_cfi %gs
16758 + popl_cfi %ds
16759 +#endif
16760 +
16761 +ENTRY(csum_partial_copy_generic)
16762 subl $4,%esp
16763 CFI_ADJUST_CFA_OFFSET 4
16764 pushl_cfi %edi
16765 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16766 jmp 4f
16767 SRC(1: movw (%esi), %bx )
16768 addl $2, %esi
16769 -DST( movw %bx, (%edi) )
16770 +DST( movw %bx, %es:(%edi) )
16771 addl $2, %edi
16772 addw %bx, %ax
16773 adcl $0, %eax
16774 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16775 SRC(1: movl (%esi), %ebx )
16776 SRC( movl 4(%esi), %edx )
16777 adcl %ebx, %eax
16778 -DST( movl %ebx, (%edi) )
16779 +DST( movl %ebx, %es:(%edi) )
16780 adcl %edx, %eax
16781 -DST( movl %edx, 4(%edi) )
16782 +DST( movl %edx, %es:4(%edi) )
16783
16784 SRC( movl 8(%esi), %ebx )
16785 SRC( movl 12(%esi), %edx )
16786 adcl %ebx, %eax
16787 -DST( movl %ebx, 8(%edi) )
16788 +DST( movl %ebx, %es:8(%edi) )
16789 adcl %edx, %eax
16790 -DST( movl %edx, 12(%edi) )
16791 +DST( movl %edx, %es:12(%edi) )
16792
16793 SRC( movl 16(%esi), %ebx )
16794 SRC( movl 20(%esi), %edx )
16795 adcl %ebx, %eax
16796 -DST( movl %ebx, 16(%edi) )
16797 +DST( movl %ebx, %es:16(%edi) )
16798 adcl %edx, %eax
16799 -DST( movl %edx, 20(%edi) )
16800 +DST( movl %edx, %es:20(%edi) )
16801
16802 SRC( movl 24(%esi), %ebx )
16803 SRC( movl 28(%esi), %edx )
16804 adcl %ebx, %eax
16805 -DST( movl %ebx, 24(%edi) )
16806 +DST( movl %ebx, %es:24(%edi) )
16807 adcl %edx, %eax
16808 -DST( movl %edx, 28(%edi) )
16809 +DST( movl %edx, %es:28(%edi) )
16810
16811 lea 32(%esi), %esi
16812 lea 32(%edi), %edi
16813 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16814 shrl $2, %edx # This clears CF
16815 SRC(3: movl (%esi), %ebx )
16816 adcl %ebx, %eax
16817 -DST( movl %ebx, (%edi) )
16818 +DST( movl %ebx, %es:(%edi) )
16819 lea 4(%esi), %esi
16820 lea 4(%edi), %edi
16821 dec %edx
16822 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16823 jb 5f
16824 SRC( movw (%esi), %cx )
16825 leal 2(%esi), %esi
16826 -DST( movw %cx, (%edi) )
16827 +DST( movw %cx, %es:(%edi) )
16828 leal 2(%edi), %edi
16829 je 6f
16830 shll $16,%ecx
16831 SRC(5: movb (%esi), %cl )
16832 -DST( movb %cl, (%edi) )
16833 +DST( movb %cl, %es:(%edi) )
16834 6: addl %ecx, %eax
16835 adcl $0, %eax
16836 7:
16837 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16838
16839 6001:
16840 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16841 - movl $-EFAULT, (%ebx)
16842 + movl $-EFAULT, %ss:(%ebx)
16843
16844 # zero the complete destination - computing the rest
16845 # is too much work
16846 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16847
16848 6002:
16849 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16850 - movl $-EFAULT,(%ebx)
16851 + movl $-EFAULT,%ss:(%ebx)
16852 jmp 5000b
16853
16854 .previous
16855
16856 + pushl_cfi %ss
16857 + popl_cfi %ds
16858 + pushl_cfi %ss
16859 + popl_cfi %es
16860 popl_cfi %ebx
16861 CFI_RESTORE ebx
16862 popl_cfi %esi
16863 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16864 popl_cfi %ecx # equivalent to addl $4,%esp
16865 ret
16866 CFI_ENDPROC
16867 -ENDPROC(csum_partial_copy_generic)
16868 +ENDPROC(csum_partial_copy_generic_to_user)
16869
16870 #else
16871
16872 /* Version for PentiumII/PPro */
16873
16874 #define ROUND1(x) \
16875 + nop; nop; nop; \
16876 SRC(movl x(%esi), %ebx ) ; \
16877 addl %ebx, %eax ; \
16878 - DST(movl %ebx, x(%edi) ) ;
16879 + DST(movl %ebx, %es:x(%edi)) ;
16880
16881 #define ROUND(x) \
16882 + nop; nop; nop; \
16883 SRC(movl x(%esi), %ebx ) ; \
16884 adcl %ebx, %eax ; \
16885 - DST(movl %ebx, x(%edi) ) ;
16886 + DST(movl %ebx, %es:x(%edi)) ;
16887
16888 #define ARGBASE 12
16889 -
16890 -ENTRY(csum_partial_copy_generic)
16891 +
16892 +ENTRY(csum_partial_copy_generic_to_user)
16893 CFI_STARTPROC
16894 +
16895 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16896 + pushl_cfi %gs
16897 + popl_cfi %es
16898 + jmp csum_partial_copy_generic
16899 +#endif
16900 +
16901 +ENTRY(csum_partial_copy_generic_from_user)
16902 +
16903 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16904 + pushl_cfi %gs
16905 + popl_cfi %ds
16906 +#endif
16907 +
16908 +ENTRY(csum_partial_copy_generic)
16909 pushl_cfi %ebx
16910 CFI_REL_OFFSET ebx, 0
16911 pushl_cfi %edi
16912 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16913 subl %ebx, %edi
16914 lea -1(%esi),%edx
16915 andl $-32,%edx
16916 - lea 3f(%ebx,%ebx), %ebx
16917 + lea 3f(%ebx,%ebx,2), %ebx
16918 testl %esi, %esi
16919 jmp *%ebx
16920 1: addl $64,%esi
16921 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16922 jb 5f
16923 SRC( movw (%esi), %dx )
16924 leal 2(%esi), %esi
16925 -DST( movw %dx, (%edi) )
16926 +DST( movw %dx, %es:(%edi) )
16927 leal 2(%edi), %edi
16928 je 6f
16929 shll $16,%edx
16930 5:
16931 SRC( movb (%esi), %dl )
16932 -DST( movb %dl, (%edi) )
16933 +DST( movb %dl, %es:(%edi) )
16934 6: addl %edx, %eax
16935 adcl $0, %eax
16936 7:
16937 .section .fixup, "ax"
16938 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16939 - movl $-EFAULT, (%ebx)
16940 + movl $-EFAULT, %ss:(%ebx)
16941 # zero the complete destination (computing the rest is too much work)
16942 movl ARGBASE+8(%esp),%edi # dst
16943 movl ARGBASE+12(%esp),%ecx # len
16944 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16945 rep; stosb
16946 jmp 7b
16947 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16948 - movl $-EFAULT, (%ebx)
16949 + movl $-EFAULT, %ss:(%ebx)
16950 jmp 7b
16951 .previous
16952
16953 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16954 + pushl_cfi %ss
16955 + popl_cfi %ds
16956 + pushl_cfi %ss
16957 + popl_cfi %es
16958 +#endif
16959 +
16960 popl_cfi %esi
16961 CFI_RESTORE esi
16962 popl_cfi %edi
16963 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
16964 CFI_RESTORE ebx
16965 ret
16966 CFI_ENDPROC
16967 -ENDPROC(csum_partial_copy_generic)
16968 +ENDPROC(csum_partial_copy_generic_to_user)
16969
16970 #undef ROUND
16971 #undef ROUND1
16972 diff -urNp linux-3.0.3/arch/x86/lib/clear_page_64.S linux-3.0.3/arch/x86/lib/clear_page_64.S
16973 --- linux-3.0.3/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
16974 +++ linux-3.0.3/arch/x86/lib/clear_page_64.S 2011-08-23 21:47:55.000000000 -0400
16975 @@ -58,7 +58,7 @@ ENDPROC(clear_page)
16976
16977 #include <asm/cpufeature.h>
16978
16979 - .section .altinstr_replacement,"ax"
16980 + .section .altinstr_replacement,"a"
16981 1: .byte 0xeb /* jmp <disp8> */
16982 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
16983 2: .byte 0xeb /* jmp <disp8> */
16984 diff -urNp linux-3.0.3/arch/x86/lib/copy_page_64.S linux-3.0.3/arch/x86/lib/copy_page_64.S
16985 --- linux-3.0.3/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
16986 +++ linux-3.0.3/arch/x86/lib/copy_page_64.S 2011-08-23 21:47:55.000000000 -0400
16987 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
16988
16989 #include <asm/cpufeature.h>
16990
16991 - .section .altinstr_replacement,"ax"
16992 + .section .altinstr_replacement,"a"
16993 1: .byte 0xeb /* jmp <disp8> */
16994 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
16995 2:
16996 diff -urNp linux-3.0.3/arch/x86/lib/copy_user_64.S linux-3.0.3/arch/x86/lib/copy_user_64.S
16997 --- linux-3.0.3/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
16998 +++ linux-3.0.3/arch/x86/lib/copy_user_64.S 2011-08-23 21:47:55.000000000 -0400
16999 @@ -16,6 +16,7 @@
17000 #include <asm/thread_info.h>
17001 #include <asm/cpufeature.h>
17002 #include <asm/alternative-asm.h>
17003 +#include <asm/pgtable.h>
17004
17005 /*
17006 * By placing feature2 after feature1 in altinstructions section, we logically
17007 @@ -29,7 +30,7 @@
17008 .byte 0xe9 /* 32bit jump */
17009 .long \orig-1f /* by default jump to orig */
17010 1:
17011 - .section .altinstr_replacement,"ax"
17012 + .section .altinstr_replacement,"a"
17013 2: .byte 0xe9 /* near jump with 32bit immediate */
17014 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17015 3: .byte 0xe9 /* near jump with 32bit immediate */
17016 @@ -71,41 +72,13 @@
17017 #endif
17018 .endm
17019
17020 -/* Standard copy_to_user with segment limit checking */
17021 -ENTRY(_copy_to_user)
17022 - CFI_STARTPROC
17023 - GET_THREAD_INFO(%rax)
17024 - movq %rdi,%rcx
17025 - addq %rdx,%rcx
17026 - jc bad_to_user
17027 - cmpq TI_addr_limit(%rax),%rcx
17028 - ja bad_to_user
17029 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17030 - copy_user_generic_unrolled,copy_user_generic_string, \
17031 - copy_user_enhanced_fast_string
17032 - CFI_ENDPROC
17033 -ENDPROC(_copy_to_user)
17034 -
17035 -/* Standard copy_from_user with segment limit checking */
17036 -ENTRY(_copy_from_user)
17037 - CFI_STARTPROC
17038 - GET_THREAD_INFO(%rax)
17039 - movq %rsi,%rcx
17040 - addq %rdx,%rcx
17041 - jc bad_from_user
17042 - cmpq TI_addr_limit(%rax),%rcx
17043 - ja bad_from_user
17044 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17045 - copy_user_generic_unrolled,copy_user_generic_string, \
17046 - copy_user_enhanced_fast_string
17047 - CFI_ENDPROC
17048 -ENDPROC(_copy_from_user)
17049 -
17050 .section .fixup,"ax"
17051 /* must zero dest */
17052 ENTRY(bad_from_user)
17053 bad_from_user:
17054 CFI_STARTPROC
17055 + testl %edx,%edx
17056 + js bad_to_user
17057 movl %edx,%ecx
17058 xorl %eax,%eax
17059 rep
17060 diff -urNp linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S
17061 --- linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17062 +++ linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S 2011-08-23 21:47:55.000000000 -0400
17063 @@ -14,6 +14,7 @@
17064 #include <asm/current.h>
17065 #include <asm/asm-offsets.h>
17066 #include <asm/thread_info.h>
17067 +#include <asm/pgtable.h>
17068
17069 .macro ALIGN_DESTINATION
17070 #ifdef FIX_ALIGNMENT
17071 @@ -50,6 +51,15 @@
17072 */
17073 ENTRY(__copy_user_nocache)
17074 CFI_STARTPROC
17075 +
17076 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17077 + mov $PAX_USER_SHADOW_BASE,%rcx
17078 + cmp %rcx,%rsi
17079 + jae 1f
17080 + add %rcx,%rsi
17081 +1:
17082 +#endif
17083 +
17084 cmpl $8,%edx
17085 jb 20f /* less then 8 bytes, go to byte copy loop */
17086 ALIGN_DESTINATION
17087 diff -urNp linux-3.0.3/arch/x86/lib/csum-wrappers_64.c linux-3.0.3/arch/x86/lib/csum-wrappers_64.c
17088 --- linux-3.0.3/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17089 +++ linux-3.0.3/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17090 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17091 len -= 2;
17092 }
17093 }
17094 +
17095 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17096 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17097 + src += PAX_USER_SHADOW_BASE;
17098 +#endif
17099 +
17100 isum = csum_partial_copy_generic((__force const void *)src,
17101 dst, len, isum, errp, NULL);
17102 if (unlikely(*errp))
17103 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17104 }
17105
17106 *errp = 0;
17107 +
17108 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17109 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17110 + dst += PAX_USER_SHADOW_BASE;
17111 +#endif
17112 +
17113 return csum_partial_copy_generic(src, (void __force *)dst,
17114 len, isum, NULL, errp);
17115 }
17116 diff -urNp linux-3.0.3/arch/x86/lib/getuser.S linux-3.0.3/arch/x86/lib/getuser.S
17117 --- linux-3.0.3/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17118 +++ linux-3.0.3/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17119 @@ -33,14 +33,35 @@
17120 #include <asm/asm-offsets.h>
17121 #include <asm/thread_info.h>
17122 #include <asm/asm.h>
17123 +#include <asm/segment.h>
17124 +#include <asm/pgtable.h>
17125 +
17126 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17127 +#define __copyuser_seg gs;
17128 +#else
17129 +#define __copyuser_seg
17130 +#endif
17131
17132 .text
17133 ENTRY(__get_user_1)
17134 CFI_STARTPROC
17135 +
17136 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17137 GET_THREAD_INFO(%_ASM_DX)
17138 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17139 jae bad_get_user
17140 -1: movzb (%_ASM_AX),%edx
17141 +
17142 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17143 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17144 + cmp %_ASM_DX,%_ASM_AX
17145 + jae 1234f
17146 + add %_ASM_DX,%_ASM_AX
17147 +1234:
17148 +#endif
17149 +
17150 +#endif
17151 +
17152 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17153 xor %eax,%eax
17154 ret
17155 CFI_ENDPROC
17156 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17157 ENTRY(__get_user_2)
17158 CFI_STARTPROC
17159 add $1,%_ASM_AX
17160 +
17161 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17162 jc bad_get_user
17163 GET_THREAD_INFO(%_ASM_DX)
17164 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17165 jae bad_get_user
17166 -2: movzwl -1(%_ASM_AX),%edx
17167 +
17168 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17169 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17170 + cmp %_ASM_DX,%_ASM_AX
17171 + jae 1234f
17172 + add %_ASM_DX,%_ASM_AX
17173 +1234:
17174 +#endif
17175 +
17176 +#endif
17177 +
17178 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17179 xor %eax,%eax
17180 ret
17181 CFI_ENDPROC
17182 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17183 ENTRY(__get_user_4)
17184 CFI_STARTPROC
17185 add $3,%_ASM_AX
17186 +
17187 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17188 jc bad_get_user
17189 GET_THREAD_INFO(%_ASM_DX)
17190 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17191 jae bad_get_user
17192 -3: mov -3(%_ASM_AX),%edx
17193 +
17194 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17195 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17196 + cmp %_ASM_DX,%_ASM_AX
17197 + jae 1234f
17198 + add %_ASM_DX,%_ASM_AX
17199 +1234:
17200 +#endif
17201 +
17202 +#endif
17203 +
17204 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17205 xor %eax,%eax
17206 ret
17207 CFI_ENDPROC
17208 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17209 GET_THREAD_INFO(%_ASM_DX)
17210 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17211 jae bad_get_user
17212 +
17213 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17214 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17215 + cmp %_ASM_DX,%_ASM_AX
17216 + jae 1234f
17217 + add %_ASM_DX,%_ASM_AX
17218 +1234:
17219 +#endif
17220 +
17221 4: movq -7(%_ASM_AX),%_ASM_DX
17222 xor %eax,%eax
17223 ret
17224 diff -urNp linux-3.0.3/arch/x86/lib/insn.c linux-3.0.3/arch/x86/lib/insn.c
17225 --- linux-3.0.3/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17226 +++ linux-3.0.3/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17227 @@ -21,6 +21,11 @@
17228 #include <linux/string.h>
17229 #include <asm/inat.h>
17230 #include <asm/insn.h>
17231 +#ifdef __KERNEL__
17232 +#include <asm/pgtable_types.h>
17233 +#else
17234 +#define ktla_ktva(addr) addr
17235 +#endif
17236
17237 #define get_next(t, insn) \
17238 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17239 @@ -40,8 +45,8 @@
17240 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17241 {
17242 memset(insn, 0, sizeof(*insn));
17243 - insn->kaddr = kaddr;
17244 - insn->next_byte = kaddr;
17245 + insn->kaddr = ktla_ktva(kaddr);
17246 + insn->next_byte = ktla_ktva(kaddr);
17247 insn->x86_64 = x86_64 ? 1 : 0;
17248 insn->opnd_bytes = 4;
17249 if (x86_64)
17250 diff -urNp linux-3.0.3/arch/x86/lib/mmx_32.c linux-3.0.3/arch/x86/lib/mmx_32.c
17251 --- linux-3.0.3/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17252 +++ linux-3.0.3/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17253 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17254 {
17255 void *p;
17256 int i;
17257 + unsigned long cr0;
17258
17259 if (unlikely(in_interrupt()))
17260 return __memcpy(to, from, len);
17261 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17262 kernel_fpu_begin();
17263
17264 __asm__ __volatile__ (
17265 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17266 - " prefetch 64(%0)\n"
17267 - " prefetch 128(%0)\n"
17268 - " prefetch 192(%0)\n"
17269 - " prefetch 256(%0)\n"
17270 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17271 + " prefetch 64(%1)\n"
17272 + " prefetch 128(%1)\n"
17273 + " prefetch 192(%1)\n"
17274 + " prefetch 256(%1)\n"
17275 "2: \n"
17276 ".section .fixup, \"ax\"\n"
17277 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17278 + "3: \n"
17279 +
17280 +#ifdef CONFIG_PAX_KERNEXEC
17281 + " movl %%cr0, %0\n"
17282 + " movl %0, %%eax\n"
17283 + " andl $0xFFFEFFFF, %%eax\n"
17284 + " movl %%eax, %%cr0\n"
17285 +#endif
17286 +
17287 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17288 +
17289 +#ifdef CONFIG_PAX_KERNEXEC
17290 + " movl %0, %%cr0\n"
17291 +#endif
17292 +
17293 " jmp 2b\n"
17294 ".previous\n"
17295 _ASM_EXTABLE(1b, 3b)
17296 - : : "r" (from));
17297 + : "=&r" (cr0) : "r" (from) : "ax");
17298
17299 for ( ; i > 5; i--) {
17300 __asm__ __volatile__ (
17301 - "1: prefetch 320(%0)\n"
17302 - "2: movq (%0), %%mm0\n"
17303 - " movq 8(%0), %%mm1\n"
17304 - " movq 16(%0), %%mm2\n"
17305 - " movq 24(%0), %%mm3\n"
17306 - " movq %%mm0, (%1)\n"
17307 - " movq %%mm1, 8(%1)\n"
17308 - " movq %%mm2, 16(%1)\n"
17309 - " movq %%mm3, 24(%1)\n"
17310 - " movq 32(%0), %%mm0\n"
17311 - " movq 40(%0), %%mm1\n"
17312 - " movq 48(%0), %%mm2\n"
17313 - " movq 56(%0), %%mm3\n"
17314 - " movq %%mm0, 32(%1)\n"
17315 - " movq %%mm1, 40(%1)\n"
17316 - " movq %%mm2, 48(%1)\n"
17317 - " movq %%mm3, 56(%1)\n"
17318 + "1: prefetch 320(%1)\n"
17319 + "2: movq (%1), %%mm0\n"
17320 + " movq 8(%1), %%mm1\n"
17321 + " movq 16(%1), %%mm2\n"
17322 + " movq 24(%1), %%mm3\n"
17323 + " movq %%mm0, (%2)\n"
17324 + " movq %%mm1, 8(%2)\n"
17325 + " movq %%mm2, 16(%2)\n"
17326 + " movq %%mm3, 24(%2)\n"
17327 + " movq 32(%1), %%mm0\n"
17328 + " movq 40(%1), %%mm1\n"
17329 + " movq 48(%1), %%mm2\n"
17330 + " movq 56(%1), %%mm3\n"
17331 + " movq %%mm0, 32(%2)\n"
17332 + " movq %%mm1, 40(%2)\n"
17333 + " movq %%mm2, 48(%2)\n"
17334 + " movq %%mm3, 56(%2)\n"
17335 ".section .fixup, \"ax\"\n"
17336 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17337 + "3:\n"
17338 +
17339 +#ifdef CONFIG_PAX_KERNEXEC
17340 + " movl %%cr0, %0\n"
17341 + " movl %0, %%eax\n"
17342 + " andl $0xFFFEFFFF, %%eax\n"
17343 + " movl %%eax, %%cr0\n"
17344 +#endif
17345 +
17346 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17347 +
17348 +#ifdef CONFIG_PAX_KERNEXEC
17349 + " movl %0, %%cr0\n"
17350 +#endif
17351 +
17352 " jmp 2b\n"
17353 ".previous\n"
17354 _ASM_EXTABLE(1b, 3b)
17355 - : : "r" (from), "r" (to) : "memory");
17356 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17357
17358 from += 64;
17359 to += 64;
17360 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17361 static void fast_copy_page(void *to, void *from)
17362 {
17363 int i;
17364 + unsigned long cr0;
17365
17366 kernel_fpu_begin();
17367
17368 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17369 * but that is for later. -AV
17370 */
17371 __asm__ __volatile__(
17372 - "1: prefetch (%0)\n"
17373 - " prefetch 64(%0)\n"
17374 - " prefetch 128(%0)\n"
17375 - " prefetch 192(%0)\n"
17376 - " prefetch 256(%0)\n"
17377 + "1: prefetch (%1)\n"
17378 + " prefetch 64(%1)\n"
17379 + " prefetch 128(%1)\n"
17380 + " prefetch 192(%1)\n"
17381 + " prefetch 256(%1)\n"
17382 "2: \n"
17383 ".section .fixup, \"ax\"\n"
17384 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17385 + "3: \n"
17386 +
17387 +#ifdef CONFIG_PAX_KERNEXEC
17388 + " movl %%cr0, %0\n"
17389 + " movl %0, %%eax\n"
17390 + " andl $0xFFFEFFFF, %%eax\n"
17391 + " movl %%eax, %%cr0\n"
17392 +#endif
17393 +
17394 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17395 +
17396 +#ifdef CONFIG_PAX_KERNEXEC
17397 + " movl %0, %%cr0\n"
17398 +#endif
17399 +
17400 " jmp 2b\n"
17401 ".previous\n"
17402 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17403 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17404
17405 for (i = 0; i < (4096-320)/64; i++) {
17406 __asm__ __volatile__ (
17407 - "1: prefetch 320(%0)\n"
17408 - "2: movq (%0), %%mm0\n"
17409 - " movntq %%mm0, (%1)\n"
17410 - " movq 8(%0), %%mm1\n"
17411 - " movntq %%mm1, 8(%1)\n"
17412 - " movq 16(%0), %%mm2\n"
17413 - " movntq %%mm2, 16(%1)\n"
17414 - " movq 24(%0), %%mm3\n"
17415 - " movntq %%mm3, 24(%1)\n"
17416 - " movq 32(%0), %%mm4\n"
17417 - " movntq %%mm4, 32(%1)\n"
17418 - " movq 40(%0), %%mm5\n"
17419 - " movntq %%mm5, 40(%1)\n"
17420 - " movq 48(%0), %%mm6\n"
17421 - " movntq %%mm6, 48(%1)\n"
17422 - " movq 56(%0), %%mm7\n"
17423 - " movntq %%mm7, 56(%1)\n"
17424 + "1: prefetch 320(%1)\n"
17425 + "2: movq (%1), %%mm0\n"
17426 + " movntq %%mm0, (%2)\n"
17427 + " movq 8(%1), %%mm1\n"
17428 + " movntq %%mm1, 8(%2)\n"
17429 + " movq 16(%1), %%mm2\n"
17430 + " movntq %%mm2, 16(%2)\n"
17431 + " movq 24(%1), %%mm3\n"
17432 + " movntq %%mm3, 24(%2)\n"
17433 + " movq 32(%1), %%mm4\n"
17434 + " movntq %%mm4, 32(%2)\n"
17435 + " movq 40(%1), %%mm5\n"
17436 + " movntq %%mm5, 40(%2)\n"
17437 + " movq 48(%1), %%mm6\n"
17438 + " movntq %%mm6, 48(%2)\n"
17439 + " movq 56(%1), %%mm7\n"
17440 + " movntq %%mm7, 56(%2)\n"
17441 ".section .fixup, \"ax\"\n"
17442 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17443 + "3:\n"
17444 +
17445 +#ifdef CONFIG_PAX_KERNEXEC
17446 + " movl %%cr0, %0\n"
17447 + " movl %0, %%eax\n"
17448 + " andl $0xFFFEFFFF, %%eax\n"
17449 + " movl %%eax, %%cr0\n"
17450 +#endif
17451 +
17452 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17453 +
17454 +#ifdef CONFIG_PAX_KERNEXEC
17455 + " movl %0, %%cr0\n"
17456 +#endif
17457 +
17458 " jmp 2b\n"
17459 ".previous\n"
17460 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17461 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17462
17463 from += 64;
17464 to += 64;
17465 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17466 static void fast_copy_page(void *to, void *from)
17467 {
17468 int i;
17469 + unsigned long cr0;
17470
17471 kernel_fpu_begin();
17472
17473 __asm__ __volatile__ (
17474 - "1: prefetch (%0)\n"
17475 - " prefetch 64(%0)\n"
17476 - " prefetch 128(%0)\n"
17477 - " prefetch 192(%0)\n"
17478 - " prefetch 256(%0)\n"
17479 + "1: prefetch (%1)\n"
17480 + " prefetch 64(%1)\n"
17481 + " prefetch 128(%1)\n"
17482 + " prefetch 192(%1)\n"
17483 + " prefetch 256(%1)\n"
17484 "2: \n"
17485 ".section .fixup, \"ax\"\n"
17486 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17487 + "3: \n"
17488 +
17489 +#ifdef CONFIG_PAX_KERNEXEC
17490 + " movl %%cr0, %0\n"
17491 + " movl %0, %%eax\n"
17492 + " andl $0xFFFEFFFF, %%eax\n"
17493 + " movl %%eax, %%cr0\n"
17494 +#endif
17495 +
17496 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17497 +
17498 +#ifdef CONFIG_PAX_KERNEXEC
17499 + " movl %0, %%cr0\n"
17500 +#endif
17501 +
17502 " jmp 2b\n"
17503 ".previous\n"
17504 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17505 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17506
17507 for (i = 0; i < 4096/64; i++) {
17508 __asm__ __volatile__ (
17509 - "1: prefetch 320(%0)\n"
17510 - "2: movq (%0), %%mm0\n"
17511 - " movq 8(%0), %%mm1\n"
17512 - " movq 16(%0), %%mm2\n"
17513 - " movq 24(%0), %%mm3\n"
17514 - " movq %%mm0, (%1)\n"
17515 - " movq %%mm1, 8(%1)\n"
17516 - " movq %%mm2, 16(%1)\n"
17517 - " movq %%mm3, 24(%1)\n"
17518 - " movq 32(%0), %%mm0\n"
17519 - " movq 40(%0), %%mm1\n"
17520 - " movq 48(%0), %%mm2\n"
17521 - " movq 56(%0), %%mm3\n"
17522 - " movq %%mm0, 32(%1)\n"
17523 - " movq %%mm1, 40(%1)\n"
17524 - " movq %%mm2, 48(%1)\n"
17525 - " movq %%mm3, 56(%1)\n"
17526 + "1: prefetch 320(%1)\n"
17527 + "2: movq (%1), %%mm0\n"
17528 + " movq 8(%1), %%mm1\n"
17529 + " movq 16(%1), %%mm2\n"
17530 + " movq 24(%1), %%mm3\n"
17531 + " movq %%mm0, (%2)\n"
17532 + " movq %%mm1, 8(%2)\n"
17533 + " movq %%mm2, 16(%2)\n"
17534 + " movq %%mm3, 24(%2)\n"
17535 + " movq 32(%1), %%mm0\n"
17536 + " movq 40(%1), %%mm1\n"
17537 + " movq 48(%1), %%mm2\n"
17538 + " movq 56(%1), %%mm3\n"
17539 + " movq %%mm0, 32(%2)\n"
17540 + " movq %%mm1, 40(%2)\n"
17541 + " movq %%mm2, 48(%2)\n"
17542 + " movq %%mm3, 56(%2)\n"
17543 ".section .fixup, \"ax\"\n"
17544 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17545 + "3:\n"
17546 +
17547 +#ifdef CONFIG_PAX_KERNEXEC
17548 + " movl %%cr0, %0\n"
17549 + " movl %0, %%eax\n"
17550 + " andl $0xFFFEFFFF, %%eax\n"
17551 + " movl %%eax, %%cr0\n"
17552 +#endif
17553 +
17554 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17555 +
17556 +#ifdef CONFIG_PAX_KERNEXEC
17557 + " movl %0, %%cr0\n"
17558 +#endif
17559 +
17560 " jmp 2b\n"
17561 ".previous\n"
17562 _ASM_EXTABLE(1b, 3b)
17563 - : : "r" (from), "r" (to) : "memory");
17564 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17565
17566 from += 64;
17567 to += 64;
17568 diff -urNp linux-3.0.3/arch/x86/lib/putuser.S linux-3.0.3/arch/x86/lib/putuser.S
17569 --- linux-3.0.3/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17570 +++ linux-3.0.3/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17571 @@ -15,7 +15,8 @@
17572 #include <asm/thread_info.h>
17573 #include <asm/errno.h>
17574 #include <asm/asm.h>
17575 -
17576 +#include <asm/segment.h>
17577 +#include <asm/pgtable.h>
17578
17579 /*
17580 * __put_user_X
17581 @@ -29,52 +30,119 @@
17582 * as they get called from within inline assembly.
17583 */
17584
17585 -#define ENTER CFI_STARTPROC ; \
17586 - GET_THREAD_INFO(%_ASM_BX)
17587 +#define ENTER CFI_STARTPROC
17588 #define EXIT ret ; \
17589 CFI_ENDPROC
17590
17591 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17592 +#define _DEST %_ASM_CX,%_ASM_BX
17593 +#else
17594 +#define _DEST %_ASM_CX
17595 +#endif
17596 +
17597 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17598 +#define __copyuser_seg gs;
17599 +#else
17600 +#define __copyuser_seg
17601 +#endif
17602 +
17603 .text
17604 ENTRY(__put_user_1)
17605 ENTER
17606 +
17607 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17608 + GET_THREAD_INFO(%_ASM_BX)
17609 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17610 jae bad_put_user
17611 -1: movb %al,(%_ASM_CX)
17612 +
17613 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17614 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17615 + cmp %_ASM_BX,%_ASM_CX
17616 + jb 1234f
17617 + xor %ebx,%ebx
17618 +1234:
17619 +#endif
17620 +
17621 +#endif
17622 +
17623 +1: __copyuser_seg movb %al,(_DEST)
17624 xor %eax,%eax
17625 EXIT
17626 ENDPROC(__put_user_1)
17627
17628 ENTRY(__put_user_2)
17629 ENTER
17630 +
17631 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17632 + GET_THREAD_INFO(%_ASM_BX)
17633 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17634 sub $1,%_ASM_BX
17635 cmp %_ASM_BX,%_ASM_CX
17636 jae bad_put_user
17637 -2: movw %ax,(%_ASM_CX)
17638 +
17639 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17640 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17641 + cmp %_ASM_BX,%_ASM_CX
17642 + jb 1234f
17643 + xor %ebx,%ebx
17644 +1234:
17645 +#endif
17646 +
17647 +#endif
17648 +
17649 +2: __copyuser_seg movw %ax,(_DEST)
17650 xor %eax,%eax
17651 EXIT
17652 ENDPROC(__put_user_2)
17653
17654 ENTRY(__put_user_4)
17655 ENTER
17656 +
17657 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17658 + GET_THREAD_INFO(%_ASM_BX)
17659 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17660 sub $3,%_ASM_BX
17661 cmp %_ASM_BX,%_ASM_CX
17662 jae bad_put_user
17663 -3: movl %eax,(%_ASM_CX)
17664 +
17665 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17666 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17667 + cmp %_ASM_BX,%_ASM_CX
17668 + jb 1234f
17669 + xor %ebx,%ebx
17670 +1234:
17671 +#endif
17672 +
17673 +#endif
17674 +
17675 +3: __copyuser_seg movl %eax,(_DEST)
17676 xor %eax,%eax
17677 EXIT
17678 ENDPROC(__put_user_4)
17679
17680 ENTRY(__put_user_8)
17681 ENTER
17682 +
17683 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17684 + GET_THREAD_INFO(%_ASM_BX)
17685 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17686 sub $7,%_ASM_BX
17687 cmp %_ASM_BX,%_ASM_CX
17688 jae bad_put_user
17689 -4: mov %_ASM_AX,(%_ASM_CX)
17690 +
17691 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17692 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17693 + cmp %_ASM_BX,%_ASM_CX
17694 + jb 1234f
17695 + xor %ebx,%ebx
17696 +1234:
17697 +#endif
17698 +
17699 +#endif
17700 +
17701 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
17702 #ifdef CONFIG_X86_32
17703 -5: movl %edx,4(%_ASM_CX)
17704 +5: __copyuser_seg movl %edx,4(_DEST)
17705 #endif
17706 xor %eax,%eax
17707 EXIT
17708 diff -urNp linux-3.0.3/arch/x86/lib/usercopy_32.c linux-3.0.3/arch/x86/lib/usercopy_32.c
17709 --- linux-3.0.3/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
17710 +++ linux-3.0.3/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
17711 @@ -43,7 +43,7 @@ do { \
17712 __asm__ __volatile__( \
17713 " testl %1,%1\n" \
17714 " jz 2f\n" \
17715 - "0: lodsb\n" \
17716 + "0: "__copyuser_seg"lodsb\n" \
17717 " stosb\n" \
17718 " testb %%al,%%al\n" \
17719 " jz 1f\n" \
17720 @@ -128,10 +128,12 @@ do { \
17721 int __d0; \
17722 might_fault(); \
17723 __asm__ __volatile__( \
17724 + __COPYUSER_SET_ES \
17725 "0: rep; stosl\n" \
17726 " movl %2,%0\n" \
17727 "1: rep; stosb\n" \
17728 "2:\n" \
17729 + __COPYUSER_RESTORE_ES \
17730 ".section .fixup,\"ax\"\n" \
17731 "3: lea 0(%2,%0,4),%0\n" \
17732 " jmp 2b\n" \
17733 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17734 might_fault();
17735
17736 __asm__ __volatile__(
17737 + __COPYUSER_SET_ES
17738 " testl %0, %0\n"
17739 " jz 3f\n"
17740 " andl %0,%%ecx\n"
17741 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17742 " subl %%ecx,%0\n"
17743 " addl %0,%%eax\n"
17744 "1:\n"
17745 + __COPYUSER_RESTORE_ES
17746 ".section .fixup,\"ax\"\n"
17747 "2: xorl %%eax,%%eax\n"
17748 " jmp 1b\n"
17749 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17750
17751 #ifdef CONFIG_X86_INTEL_USERCOPY
17752 static unsigned long
17753 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17754 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17755 {
17756 int d0, d1;
17757 __asm__ __volatile__(
17758 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17759 " .align 2,0x90\n"
17760 "3: movl 0(%4), %%eax\n"
17761 "4: movl 4(%4), %%edx\n"
17762 - "5: movl %%eax, 0(%3)\n"
17763 - "6: movl %%edx, 4(%3)\n"
17764 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17765 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17766 "7: movl 8(%4), %%eax\n"
17767 "8: movl 12(%4),%%edx\n"
17768 - "9: movl %%eax, 8(%3)\n"
17769 - "10: movl %%edx, 12(%3)\n"
17770 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17771 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17772 "11: movl 16(%4), %%eax\n"
17773 "12: movl 20(%4), %%edx\n"
17774 - "13: movl %%eax, 16(%3)\n"
17775 - "14: movl %%edx, 20(%3)\n"
17776 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17777 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17778 "15: movl 24(%4), %%eax\n"
17779 "16: movl 28(%4), %%edx\n"
17780 - "17: movl %%eax, 24(%3)\n"
17781 - "18: movl %%edx, 28(%3)\n"
17782 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17783 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17784 "19: movl 32(%4), %%eax\n"
17785 "20: movl 36(%4), %%edx\n"
17786 - "21: movl %%eax, 32(%3)\n"
17787 - "22: movl %%edx, 36(%3)\n"
17788 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17789 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17790 "23: movl 40(%4), %%eax\n"
17791 "24: movl 44(%4), %%edx\n"
17792 - "25: movl %%eax, 40(%3)\n"
17793 - "26: movl %%edx, 44(%3)\n"
17794 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17795 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17796 "27: movl 48(%4), %%eax\n"
17797 "28: movl 52(%4), %%edx\n"
17798 - "29: movl %%eax, 48(%3)\n"
17799 - "30: movl %%edx, 52(%3)\n"
17800 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17801 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17802 "31: movl 56(%4), %%eax\n"
17803 "32: movl 60(%4), %%edx\n"
17804 - "33: movl %%eax, 56(%3)\n"
17805 - "34: movl %%edx, 60(%3)\n"
17806 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17807 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17808 " addl $-64, %0\n"
17809 " addl $64, %4\n"
17810 " addl $64, %3\n"
17811 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17812 " shrl $2, %0\n"
17813 " andl $3, %%eax\n"
17814 " cld\n"
17815 + __COPYUSER_SET_ES
17816 "99: rep; movsl\n"
17817 "36: movl %%eax, %0\n"
17818 "37: rep; movsb\n"
17819 "100:\n"
17820 + __COPYUSER_RESTORE_ES
17821 + ".section .fixup,\"ax\"\n"
17822 + "101: lea 0(%%eax,%0,4),%0\n"
17823 + " jmp 100b\n"
17824 + ".previous\n"
17825 + ".section __ex_table,\"a\"\n"
17826 + " .align 4\n"
17827 + " .long 1b,100b\n"
17828 + " .long 2b,100b\n"
17829 + " .long 3b,100b\n"
17830 + " .long 4b,100b\n"
17831 + " .long 5b,100b\n"
17832 + " .long 6b,100b\n"
17833 + " .long 7b,100b\n"
17834 + " .long 8b,100b\n"
17835 + " .long 9b,100b\n"
17836 + " .long 10b,100b\n"
17837 + " .long 11b,100b\n"
17838 + " .long 12b,100b\n"
17839 + " .long 13b,100b\n"
17840 + " .long 14b,100b\n"
17841 + " .long 15b,100b\n"
17842 + " .long 16b,100b\n"
17843 + " .long 17b,100b\n"
17844 + " .long 18b,100b\n"
17845 + " .long 19b,100b\n"
17846 + " .long 20b,100b\n"
17847 + " .long 21b,100b\n"
17848 + " .long 22b,100b\n"
17849 + " .long 23b,100b\n"
17850 + " .long 24b,100b\n"
17851 + " .long 25b,100b\n"
17852 + " .long 26b,100b\n"
17853 + " .long 27b,100b\n"
17854 + " .long 28b,100b\n"
17855 + " .long 29b,100b\n"
17856 + " .long 30b,100b\n"
17857 + " .long 31b,100b\n"
17858 + " .long 32b,100b\n"
17859 + " .long 33b,100b\n"
17860 + " .long 34b,100b\n"
17861 + " .long 35b,100b\n"
17862 + " .long 36b,100b\n"
17863 + " .long 37b,100b\n"
17864 + " .long 99b,101b\n"
17865 + ".previous"
17866 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
17867 + : "1"(to), "2"(from), "0"(size)
17868 + : "eax", "edx", "memory");
17869 + return size;
17870 +}
17871 +
17872 +static unsigned long
17873 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17874 +{
17875 + int d0, d1;
17876 + __asm__ __volatile__(
17877 + " .align 2,0x90\n"
17878 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17879 + " cmpl $67, %0\n"
17880 + " jbe 3f\n"
17881 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17882 + " .align 2,0x90\n"
17883 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17884 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17885 + "5: movl %%eax, 0(%3)\n"
17886 + "6: movl %%edx, 4(%3)\n"
17887 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17888 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17889 + "9: movl %%eax, 8(%3)\n"
17890 + "10: movl %%edx, 12(%3)\n"
17891 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17892 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17893 + "13: movl %%eax, 16(%3)\n"
17894 + "14: movl %%edx, 20(%3)\n"
17895 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17896 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17897 + "17: movl %%eax, 24(%3)\n"
17898 + "18: movl %%edx, 28(%3)\n"
17899 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17900 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17901 + "21: movl %%eax, 32(%3)\n"
17902 + "22: movl %%edx, 36(%3)\n"
17903 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17904 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17905 + "25: movl %%eax, 40(%3)\n"
17906 + "26: movl %%edx, 44(%3)\n"
17907 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17908 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17909 + "29: movl %%eax, 48(%3)\n"
17910 + "30: movl %%edx, 52(%3)\n"
17911 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17912 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17913 + "33: movl %%eax, 56(%3)\n"
17914 + "34: movl %%edx, 60(%3)\n"
17915 + " addl $-64, %0\n"
17916 + " addl $64, %4\n"
17917 + " addl $64, %3\n"
17918 + " cmpl $63, %0\n"
17919 + " ja 1b\n"
17920 + "35: movl %0, %%eax\n"
17921 + " shrl $2, %0\n"
17922 + " andl $3, %%eax\n"
17923 + " cld\n"
17924 + "99: rep; "__copyuser_seg" movsl\n"
17925 + "36: movl %%eax, %0\n"
17926 + "37: rep; "__copyuser_seg" movsb\n"
17927 + "100:\n"
17928 ".section .fixup,\"ax\"\n"
17929 "101: lea 0(%%eax,%0,4),%0\n"
17930 " jmp 100b\n"
17931 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17932 int d0, d1;
17933 __asm__ __volatile__(
17934 " .align 2,0x90\n"
17935 - "0: movl 32(%4), %%eax\n"
17936 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17937 " cmpl $67, %0\n"
17938 " jbe 2f\n"
17939 - "1: movl 64(%4), %%eax\n"
17940 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17941 " .align 2,0x90\n"
17942 - "2: movl 0(%4), %%eax\n"
17943 - "21: movl 4(%4), %%edx\n"
17944 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17945 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17946 " movl %%eax, 0(%3)\n"
17947 " movl %%edx, 4(%3)\n"
17948 - "3: movl 8(%4), %%eax\n"
17949 - "31: movl 12(%4),%%edx\n"
17950 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
17951 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
17952 " movl %%eax, 8(%3)\n"
17953 " movl %%edx, 12(%3)\n"
17954 - "4: movl 16(%4), %%eax\n"
17955 - "41: movl 20(%4), %%edx\n"
17956 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
17957 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
17958 " movl %%eax, 16(%3)\n"
17959 " movl %%edx, 20(%3)\n"
17960 - "10: movl 24(%4), %%eax\n"
17961 - "51: movl 28(%4), %%edx\n"
17962 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
17963 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
17964 " movl %%eax, 24(%3)\n"
17965 " movl %%edx, 28(%3)\n"
17966 - "11: movl 32(%4), %%eax\n"
17967 - "61: movl 36(%4), %%edx\n"
17968 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
17969 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
17970 " movl %%eax, 32(%3)\n"
17971 " movl %%edx, 36(%3)\n"
17972 - "12: movl 40(%4), %%eax\n"
17973 - "71: movl 44(%4), %%edx\n"
17974 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
17975 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
17976 " movl %%eax, 40(%3)\n"
17977 " movl %%edx, 44(%3)\n"
17978 - "13: movl 48(%4), %%eax\n"
17979 - "81: movl 52(%4), %%edx\n"
17980 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
17981 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
17982 " movl %%eax, 48(%3)\n"
17983 " movl %%edx, 52(%3)\n"
17984 - "14: movl 56(%4), %%eax\n"
17985 - "91: movl 60(%4), %%edx\n"
17986 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
17987 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
17988 " movl %%eax, 56(%3)\n"
17989 " movl %%edx, 60(%3)\n"
17990 " addl $-64, %0\n"
17991 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
17992 " shrl $2, %0\n"
17993 " andl $3, %%eax\n"
17994 " cld\n"
17995 - "6: rep; movsl\n"
17996 + "6: rep; "__copyuser_seg" movsl\n"
17997 " movl %%eax,%0\n"
17998 - "7: rep; movsb\n"
17999 + "7: rep; "__copyuser_seg" movsb\n"
18000 "8:\n"
18001 ".section .fixup,\"ax\"\n"
18002 "9: lea 0(%%eax,%0,4),%0\n"
18003 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18004
18005 __asm__ __volatile__(
18006 " .align 2,0x90\n"
18007 - "0: movl 32(%4), %%eax\n"
18008 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18009 " cmpl $67, %0\n"
18010 " jbe 2f\n"
18011 - "1: movl 64(%4), %%eax\n"
18012 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18013 " .align 2,0x90\n"
18014 - "2: movl 0(%4), %%eax\n"
18015 - "21: movl 4(%4), %%edx\n"
18016 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18017 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18018 " movnti %%eax, 0(%3)\n"
18019 " movnti %%edx, 4(%3)\n"
18020 - "3: movl 8(%4), %%eax\n"
18021 - "31: movl 12(%4),%%edx\n"
18022 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18023 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18024 " movnti %%eax, 8(%3)\n"
18025 " movnti %%edx, 12(%3)\n"
18026 - "4: movl 16(%4), %%eax\n"
18027 - "41: movl 20(%4), %%edx\n"
18028 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18029 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18030 " movnti %%eax, 16(%3)\n"
18031 " movnti %%edx, 20(%3)\n"
18032 - "10: movl 24(%4), %%eax\n"
18033 - "51: movl 28(%4), %%edx\n"
18034 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18035 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18036 " movnti %%eax, 24(%3)\n"
18037 " movnti %%edx, 28(%3)\n"
18038 - "11: movl 32(%4), %%eax\n"
18039 - "61: movl 36(%4), %%edx\n"
18040 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18041 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18042 " movnti %%eax, 32(%3)\n"
18043 " movnti %%edx, 36(%3)\n"
18044 - "12: movl 40(%4), %%eax\n"
18045 - "71: movl 44(%4), %%edx\n"
18046 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18047 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18048 " movnti %%eax, 40(%3)\n"
18049 " movnti %%edx, 44(%3)\n"
18050 - "13: movl 48(%4), %%eax\n"
18051 - "81: movl 52(%4), %%edx\n"
18052 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18053 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18054 " movnti %%eax, 48(%3)\n"
18055 " movnti %%edx, 52(%3)\n"
18056 - "14: movl 56(%4), %%eax\n"
18057 - "91: movl 60(%4), %%edx\n"
18058 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18059 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18060 " movnti %%eax, 56(%3)\n"
18061 " movnti %%edx, 60(%3)\n"
18062 " addl $-64, %0\n"
18063 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18064 " shrl $2, %0\n"
18065 " andl $3, %%eax\n"
18066 " cld\n"
18067 - "6: rep; movsl\n"
18068 + "6: rep; "__copyuser_seg" movsl\n"
18069 " movl %%eax,%0\n"
18070 - "7: rep; movsb\n"
18071 + "7: rep; "__copyuser_seg" movsb\n"
18072 "8:\n"
18073 ".section .fixup,\"ax\"\n"
18074 "9: lea 0(%%eax,%0,4),%0\n"
18075 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18076
18077 __asm__ __volatile__(
18078 " .align 2,0x90\n"
18079 - "0: movl 32(%4), %%eax\n"
18080 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18081 " cmpl $67, %0\n"
18082 " jbe 2f\n"
18083 - "1: movl 64(%4), %%eax\n"
18084 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18085 " .align 2,0x90\n"
18086 - "2: movl 0(%4), %%eax\n"
18087 - "21: movl 4(%4), %%edx\n"
18088 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18089 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18090 " movnti %%eax, 0(%3)\n"
18091 " movnti %%edx, 4(%3)\n"
18092 - "3: movl 8(%4), %%eax\n"
18093 - "31: movl 12(%4),%%edx\n"
18094 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18095 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18096 " movnti %%eax, 8(%3)\n"
18097 " movnti %%edx, 12(%3)\n"
18098 - "4: movl 16(%4), %%eax\n"
18099 - "41: movl 20(%4), %%edx\n"
18100 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18101 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18102 " movnti %%eax, 16(%3)\n"
18103 " movnti %%edx, 20(%3)\n"
18104 - "10: movl 24(%4), %%eax\n"
18105 - "51: movl 28(%4), %%edx\n"
18106 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18107 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18108 " movnti %%eax, 24(%3)\n"
18109 " movnti %%edx, 28(%3)\n"
18110 - "11: movl 32(%4), %%eax\n"
18111 - "61: movl 36(%4), %%edx\n"
18112 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18113 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18114 " movnti %%eax, 32(%3)\n"
18115 " movnti %%edx, 36(%3)\n"
18116 - "12: movl 40(%4), %%eax\n"
18117 - "71: movl 44(%4), %%edx\n"
18118 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18119 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18120 " movnti %%eax, 40(%3)\n"
18121 " movnti %%edx, 44(%3)\n"
18122 - "13: movl 48(%4), %%eax\n"
18123 - "81: movl 52(%4), %%edx\n"
18124 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18125 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18126 " movnti %%eax, 48(%3)\n"
18127 " movnti %%edx, 52(%3)\n"
18128 - "14: movl 56(%4), %%eax\n"
18129 - "91: movl 60(%4), %%edx\n"
18130 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18131 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18132 " movnti %%eax, 56(%3)\n"
18133 " movnti %%edx, 60(%3)\n"
18134 " addl $-64, %0\n"
18135 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18136 " shrl $2, %0\n"
18137 " andl $3, %%eax\n"
18138 " cld\n"
18139 - "6: rep; movsl\n"
18140 + "6: rep; "__copyuser_seg" movsl\n"
18141 " movl %%eax,%0\n"
18142 - "7: rep; movsb\n"
18143 + "7: rep; "__copyuser_seg" movsb\n"
18144 "8:\n"
18145 ".section .fixup,\"ax\"\n"
18146 "9: lea 0(%%eax,%0,4),%0\n"
18147 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18148 */
18149 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18150 unsigned long size);
18151 -unsigned long __copy_user_intel(void __user *to, const void *from,
18152 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18153 + unsigned long size);
18154 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18155 unsigned long size);
18156 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18157 const void __user *from, unsigned long size);
18158 #endif /* CONFIG_X86_INTEL_USERCOPY */
18159
18160 /* Generic arbitrary sized copy. */
18161 -#define __copy_user(to, from, size) \
18162 +#define __copy_user(to, from, size, prefix, set, restore) \
18163 do { \
18164 int __d0, __d1, __d2; \
18165 __asm__ __volatile__( \
18166 + set \
18167 " cmp $7,%0\n" \
18168 " jbe 1f\n" \
18169 " movl %1,%0\n" \
18170 " negl %0\n" \
18171 " andl $7,%0\n" \
18172 " subl %0,%3\n" \
18173 - "4: rep; movsb\n" \
18174 + "4: rep; "prefix"movsb\n" \
18175 " movl %3,%0\n" \
18176 " shrl $2,%0\n" \
18177 " andl $3,%3\n" \
18178 " .align 2,0x90\n" \
18179 - "0: rep; movsl\n" \
18180 + "0: rep; "prefix"movsl\n" \
18181 " movl %3,%0\n" \
18182 - "1: rep; movsb\n" \
18183 + "1: rep; "prefix"movsb\n" \
18184 "2:\n" \
18185 + restore \
18186 ".section .fixup,\"ax\"\n" \
18187 "5: addl %3,%0\n" \
18188 " jmp 2b\n" \
18189 @@ -682,14 +799,14 @@ do { \
18190 " negl %0\n" \
18191 " andl $7,%0\n" \
18192 " subl %0,%3\n" \
18193 - "4: rep; movsb\n" \
18194 + "4: rep; "__copyuser_seg"movsb\n" \
18195 " movl %3,%0\n" \
18196 " shrl $2,%0\n" \
18197 " andl $3,%3\n" \
18198 " .align 2,0x90\n" \
18199 - "0: rep; movsl\n" \
18200 + "0: rep; "__copyuser_seg"movsl\n" \
18201 " movl %3,%0\n" \
18202 - "1: rep; movsb\n" \
18203 + "1: rep; "__copyuser_seg"movsb\n" \
18204 "2:\n" \
18205 ".section .fixup,\"ax\"\n" \
18206 "5: addl %3,%0\n" \
18207 @@ -775,9 +892,9 @@ survive:
18208 }
18209 #endif
18210 if (movsl_is_ok(to, from, n))
18211 - __copy_user(to, from, n);
18212 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18213 else
18214 - n = __copy_user_intel(to, from, n);
18215 + n = __generic_copy_to_user_intel(to, from, n);
18216 return n;
18217 }
18218 EXPORT_SYMBOL(__copy_to_user_ll);
18219 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18220 unsigned long n)
18221 {
18222 if (movsl_is_ok(to, from, n))
18223 - __copy_user(to, from, n);
18224 + __copy_user(to, from, n, __copyuser_seg, "", "");
18225 else
18226 - n = __copy_user_intel((void __user *)to,
18227 - (const void *)from, n);
18228 + n = __generic_copy_from_user_intel(to, from, n);
18229 return n;
18230 }
18231 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18232 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18233 if (n > 64 && cpu_has_xmm2)
18234 n = __copy_user_intel_nocache(to, from, n);
18235 else
18236 - __copy_user(to, from, n);
18237 + __copy_user(to, from, n, __copyuser_seg, "", "");
18238 #else
18239 - __copy_user(to, from, n);
18240 + __copy_user(to, from, n, __copyuser_seg, "", "");
18241 #endif
18242 return n;
18243 }
18244 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18245
18246 -/**
18247 - * copy_to_user: - Copy a block of data into user space.
18248 - * @to: Destination address, in user space.
18249 - * @from: Source address, in kernel space.
18250 - * @n: Number of bytes to copy.
18251 - *
18252 - * Context: User context only. This function may sleep.
18253 - *
18254 - * Copy data from kernel space to user space.
18255 - *
18256 - * Returns number of bytes that could not be copied.
18257 - * On success, this will be zero.
18258 - */
18259 -unsigned long
18260 -copy_to_user(void __user *to, const void *from, unsigned long n)
18261 +void copy_from_user_overflow(void)
18262 {
18263 - if (access_ok(VERIFY_WRITE, to, n))
18264 - n = __copy_to_user(to, from, n);
18265 - return n;
18266 + WARN(1, "Buffer overflow detected!\n");
18267 }
18268 -EXPORT_SYMBOL(copy_to_user);
18269 +EXPORT_SYMBOL(copy_from_user_overflow);
18270
18271 -/**
18272 - * copy_from_user: - Copy a block of data from user space.
18273 - * @to: Destination address, in kernel space.
18274 - * @from: Source address, in user space.
18275 - * @n: Number of bytes to copy.
18276 - *
18277 - * Context: User context only. This function may sleep.
18278 - *
18279 - * Copy data from user space to kernel space.
18280 - *
18281 - * Returns number of bytes that could not be copied.
18282 - * On success, this will be zero.
18283 - *
18284 - * If some data could not be copied, this function will pad the copied
18285 - * data to the requested size using zero bytes.
18286 - */
18287 -unsigned long
18288 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18289 +void copy_to_user_overflow(void)
18290 {
18291 - if (access_ok(VERIFY_READ, from, n))
18292 - n = __copy_from_user(to, from, n);
18293 - else
18294 - memset(to, 0, n);
18295 - return n;
18296 + WARN(1, "Buffer overflow detected!\n");
18297 }
18298 -EXPORT_SYMBOL(_copy_from_user);
18299 +EXPORT_SYMBOL(copy_to_user_overflow);
18300
18301 -void copy_from_user_overflow(void)
18302 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18303 +void __set_fs(mm_segment_t x)
18304 {
18305 - WARN(1, "Buffer overflow detected!\n");
18306 + switch (x.seg) {
18307 + case 0:
18308 + loadsegment(gs, 0);
18309 + break;
18310 + case TASK_SIZE_MAX:
18311 + loadsegment(gs, __USER_DS);
18312 + break;
18313 + case -1UL:
18314 + loadsegment(gs, __KERNEL_DS);
18315 + break;
18316 + default:
18317 + BUG();
18318 + }
18319 + return;
18320 }
18321 -EXPORT_SYMBOL(copy_from_user_overflow);
18322 +EXPORT_SYMBOL(__set_fs);
18323 +
18324 +void set_fs(mm_segment_t x)
18325 +{
18326 + current_thread_info()->addr_limit = x;
18327 + __set_fs(x);
18328 +}
18329 +EXPORT_SYMBOL(set_fs);
18330 +#endif
18331 diff -urNp linux-3.0.3/arch/x86/lib/usercopy_64.c linux-3.0.3/arch/x86/lib/usercopy_64.c
18332 --- linux-3.0.3/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18333 +++ linux-3.0.3/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18334 @@ -42,6 +42,12 @@ long
18335 __strncpy_from_user(char *dst, const char __user *src, long count)
18336 {
18337 long res;
18338 +
18339 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18340 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18341 + src += PAX_USER_SHADOW_BASE;
18342 +#endif
18343 +
18344 __do_strncpy_from_user(dst, src, count, res);
18345 return res;
18346 }
18347 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18348 {
18349 long __d0;
18350 might_fault();
18351 +
18352 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18353 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18354 + addr += PAX_USER_SHADOW_BASE;
18355 +#endif
18356 +
18357 /* no memory constraint because it doesn't change any memory gcc knows
18358 about */
18359 asm volatile(
18360 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18361
18362 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18363 {
18364 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18365 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18366 +
18367 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18368 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18369 + to += PAX_USER_SHADOW_BASE;
18370 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18371 + from += PAX_USER_SHADOW_BASE;
18372 +#endif
18373 +
18374 return copy_user_generic((__force void *)to, (__force void *)from, len);
18375 - }
18376 - return len;
18377 + }
18378 + return len;
18379 }
18380 EXPORT_SYMBOL(copy_in_user);
18381
18382 diff -urNp linux-3.0.3/arch/x86/Makefile linux-3.0.3/arch/x86/Makefile
18383 --- linux-3.0.3/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18384 +++ linux-3.0.3/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18385 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18386 else
18387 BITS := 64
18388 UTS_MACHINE := x86_64
18389 + biarch := $(call cc-option,-m64)
18390 CHECKFLAGS += -D__x86_64__ -m64
18391
18392 KBUILD_AFLAGS += -m64
18393 @@ -195,3 +196,12 @@ define archhelp
18394 echo ' FDARGS="..." arguments for the booted kernel'
18395 echo ' FDINITRD=file initrd for the booted kernel'
18396 endef
18397 +
18398 +define OLD_LD
18399 +
18400 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18401 +*** Please upgrade your binutils to 2.18 or newer
18402 +endef
18403 +
18404 +archprepare:
18405 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18406 diff -urNp linux-3.0.3/arch/x86/mm/extable.c linux-3.0.3/arch/x86/mm/extable.c
18407 --- linux-3.0.3/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18408 +++ linux-3.0.3/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18409 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18410 const struct exception_table_entry *fixup;
18411
18412 #ifdef CONFIG_PNPBIOS
18413 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18414 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18415 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18416 extern u32 pnp_bios_is_utter_crap;
18417 pnp_bios_is_utter_crap = 1;
18418 diff -urNp linux-3.0.3/arch/x86/mm/fault.c linux-3.0.3/arch/x86/mm/fault.c
18419 --- linux-3.0.3/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18420 +++ linux-3.0.3/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18421 @@ -13,10 +13,18 @@
18422 #include <linux/perf_event.h> /* perf_sw_event */
18423 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18424 #include <linux/prefetch.h> /* prefetchw */
18425 +#include <linux/unistd.h>
18426 +#include <linux/compiler.h>
18427
18428 #include <asm/traps.h> /* dotraplinkage, ... */
18429 #include <asm/pgalloc.h> /* pgd_*(), ... */
18430 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18431 +#include <asm/vsyscall.h>
18432 +#include <asm/tlbflush.h>
18433 +
18434 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18435 +#include <asm/stacktrace.h>
18436 +#endif
18437
18438 /*
18439 * Page fault error code bits:
18440 @@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18441 int ret = 0;
18442
18443 /* kprobe_running() needs smp_processor_id() */
18444 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18445 + if (kprobes_built_in() && !user_mode(regs)) {
18446 preempt_disable();
18447 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18448 ret = 1;
18449 @@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18450 return !instr_lo || (instr_lo>>1) == 1;
18451 case 0x00:
18452 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18453 - if (probe_kernel_address(instr, opcode))
18454 + if (user_mode(regs)) {
18455 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18456 + return 0;
18457 + } else if (probe_kernel_address(instr, opcode))
18458 return 0;
18459
18460 *prefetch = (instr_lo == 0xF) &&
18461 @@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18462 while (instr < max_instr) {
18463 unsigned char opcode;
18464
18465 - if (probe_kernel_address(instr, opcode))
18466 + if (user_mode(regs)) {
18467 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18468 + break;
18469 + } else if (probe_kernel_address(instr, opcode))
18470 break;
18471
18472 instr++;
18473 @@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18474 force_sig_info(si_signo, &info, tsk);
18475 }
18476
18477 +#ifdef CONFIG_PAX_EMUTRAMP
18478 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18479 +#endif
18480 +
18481 +#ifdef CONFIG_PAX_PAGEEXEC
18482 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18483 +{
18484 + pgd_t *pgd;
18485 + pud_t *pud;
18486 + pmd_t *pmd;
18487 +
18488 + pgd = pgd_offset(mm, address);
18489 + if (!pgd_present(*pgd))
18490 + return NULL;
18491 + pud = pud_offset(pgd, address);
18492 + if (!pud_present(*pud))
18493 + return NULL;
18494 + pmd = pmd_offset(pud, address);
18495 + if (!pmd_present(*pmd))
18496 + return NULL;
18497 + return pmd;
18498 +}
18499 +#endif
18500 +
18501 DEFINE_SPINLOCK(pgd_lock);
18502 LIST_HEAD(pgd_list);
18503
18504 @@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18505 for (address = VMALLOC_START & PMD_MASK;
18506 address >= TASK_SIZE && address < FIXADDR_TOP;
18507 address += PMD_SIZE) {
18508 +
18509 +#ifdef CONFIG_PAX_PER_CPU_PGD
18510 + unsigned long cpu;
18511 +#else
18512 struct page *page;
18513 +#endif
18514
18515 spin_lock(&pgd_lock);
18516 +
18517 +#ifdef CONFIG_PAX_PER_CPU_PGD
18518 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18519 + pgd_t *pgd = get_cpu_pgd(cpu);
18520 + pmd_t *ret;
18521 +#else
18522 list_for_each_entry(page, &pgd_list, lru) {
18523 + pgd_t *pgd = page_address(page);
18524 spinlock_t *pgt_lock;
18525 pmd_t *ret;
18526
18527 @@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18528 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18529
18530 spin_lock(pgt_lock);
18531 - ret = vmalloc_sync_one(page_address(page), address);
18532 +#endif
18533 +
18534 + ret = vmalloc_sync_one(pgd, address);
18535 +
18536 +#ifndef CONFIG_PAX_PER_CPU_PGD
18537 spin_unlock(pgt_lock);
18538 +#endif
18539
18540 if (!ret)
18541 break;
18542 @@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18543 * an interrupt in the middle of a task switch..
18544 */
18545 pgd_paddr = read_cr3();
18546 +
18547 +#ifdef CONFIG_PAX_PER_CPU_PGD
18548 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18549 +#endif
18550 +
18551 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18552 if (!pmd_k)
18553 return -1;
18554 @@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
18555 * happen within a race in page table update. In the later
18556 * case just flush:
18557 */
18558 +
18559 +#ifdef CONFIG_PAX_PER_CPU_PGD
18560 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18561 + pgd = pgd_offset_cpu(smp_processor_id(), address);
18562 +#else
18563 pgd = pgd_offset(current->active_mm, address);
18564 +#endif
18565 +
18566 pgd_ref = pgd_offset_k(address);
18567 if (pgd_none(*pgd_ref))
18568 return -1;
18569 @@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
18570 static int is_errata100(struct pt_regs *regs, unsigned long address)
18571 {
18572 #ifdef CONFIG_X86_64
18573 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18574 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18575 return 1;
18576 #endif
18577 return 0;
18578 @@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
18579 }
18580
18581 static const char nx_warning[] = KERN_CRIT
18582 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18583 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18584
18585 static void
18586 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18587 @@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
18588 if (!oops_may_print())
18589 return;
18590
18591 - if (error_code & PF_INSTR) {
18592 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18593 unsigned int level;
18594
18595 pte_t *pte = lookup_address(address, &level);
18596
18597 if (pte && pte_present(*pte) && !pte_exec(*pte))
18598 - printk(nx_warning, current_uid());
18599 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18600 + }
18601 +
18602 +#ifdef CONFIG_PAX_KERNEXEC
18603 + if (init_mm.start_code <= address && address < init_mm.end_code) {
18604 + if (current->signal->curr_ip)
18605 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18606 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18607 + else
18608 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18609 + current->comm, task_pid_nr(current), current_uid(), current_euid());
18610 }
18611 +#endif
18612
18613 printk(KERN_ALERT "BUG: unable to handle kernel ");
18614 if (address < PAGE_SIZE)
18615 @@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
18616 unsigned long address, int si_code)
18617 {
18618 struct task_struct *tsk = current;
18619 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18620 + struct mm_struct *mm = tsk->mm;
18621 +#endif
18622 +
18623 +#ifdef CONFIG_X86_64
18624 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18625 + if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
18626 + regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
18627 + regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
18628 + regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
18629 + return;
18630 + }
18631 + }
18632 +#endif
18633 +
18634 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18635 + if (mm && (error_code & PF_USER)) {
18636 + unsigned long ip = regs->ip;
18637 +
18638 + if (v8086_mode(regs))
18639 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18640 +
18641 + /*
18642 + * It's possible to have interrupts off here:
18643 + */
18644 + local_irq_enable();
18645 +
18646 +#ifdef CONFIG_PAX_PAGEEXEC
18647 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18648 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18649 +
18650 +#ifdef CONFIG_PAX_EMUTRAMP
18651 + switch (pax_handle_fetch_fault(regs)) {
18652 + case 2:
18653 + return;
18654 + }
18655 +#endif
18656 +
18657 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18658 + do_group_exit(SIGKILL);
18659 + }
18660 +#endif
18661 +
18662 +#ifdef CONFIG_PAX_SEGMEXEC
18663 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18664 +
18665 +#ifdef CONFIG_PAX_EMUTRAMP
18666 + switch (pax_handle_fetch_fault(regs)) {
18667 + case 2:
18668 + return;
18669 + }
18670 +#endif
18671 +
18672 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18673 + do_group_exit(SIGKILL);
18674 + }
18675 +#endif
18676 +
18677 + }
18678 +#endif
18679
18680 /* User mode accesses just cause a SIGSEGV */
18681 if (error_code & PF_USER) {
18682 @@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
18683 return 1;
18684 }
18685
18686 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18687 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18688 +{
18689 + pte_t *pte;
18690 + pmd_t *pmd;
18691 + spinlock_t *ptl;
18692 + unsigned char pte_mask;
18693 +
18694 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18695 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
18696 + return 0;
18697 +
18698 + /* PaX: it's our fault, let's handle it if we can */
18699 +
18700 + /* PaX: take a look at read faults before acquiring any locks */
18701 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18702 + /* instruction fetch attempt from a protected page in user mode */
18703 + up_read(&mm->mmap_sem);
18704 +
18705 +#ifdef CONFIG_PAX_EMUTRAMP
18706 + switch (pax_handle_fetch_fault(regs)) {
18707 + case 2:
18708 + return 1;
18709 + }
18710 +#endif
18711 +
18712 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18713 + do_group_exit(SIGKILL);
18714 + }
18715 +
18716 + pmd = pax_get_pmd(mm, address);
18717 + if (unlikely(!pmd))
18718 + return 0;
18719 +
18720 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18721 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18722 + pte_unmap_unlock(pte, ptl);
18723 + return 0;
18724 + }
18725 +
18726 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18727 + /* write attempt to a protected page in user mode */
18728 + pte_unmap_unlock(pte, ptl);
18729 + return 0;
18730 + }
18731 +
18732 +#ifdef CONFIG_SMP
18733 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18734 +#else
18735 + if (likely(address > get_limit(regs->cs)))
18736 +#endif
18737 + {
18738 + set_pte(pte, pte_mkread(*pte));
18739 + __flush_tlb_one(address);
18740 + pte_unmap_unlock(pte, ptl);
18741 + up_read(&mm->mmap_sem);
18742 + return 1;
18743 + }
18744 +
18745 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18746 +
18747 + /*
18748 + * PaX: fill DTLB with user rights and retry
18749 + */
18750 + __asm__ __volatile__ (
18751 + "orb %2,(%1)\n"
18752 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18753 +/*
18754 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18755 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18756 + * page fault when examined during a TLB load attempt. this is true not only
18757 + * for PTEs holding a non-present entry but also present entries that will
18758 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18759 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18760 + * for our target pages since their PTEs are simply not in the TLBs at all.
18761 +
18762 + * the best thing in omitting it is that we gain around 15-20% speed in the
18763 + * fast path of the page fault handler and can get rid of tracing since we
18764 + * can no longer flush unintended entries.
18765 + */
18766 + "invlpg (%0)\n"
18767 +#endif
18768 + __copyuser_seg"testb $0,(%0)\n"
18769 + "xorb %3,(%1)\n"
18770 + :
18771 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18772 + : "memory", "cc");
18773 + pte_unmap_unlock(pte, ptl);
18774 + up_read(&mm->mmap_sem);
18775 + return 1;
18776 +}
18777 +#endif
18778 +
18779 /*
18780 * Handle a spurious fault caused by a stale TLB entry.
18781 *
18782 @@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
18783 static inline int
18784 access_error(unsigned long error_code, struct vm_area_struct *vma)
18785 {
18786 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18787 + return 1;
18788 +
18789 if (error_code & PF_WRITE) {
18790 /* write, present and write, not present: */
18791 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18792 @@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
18793 {
18794 struct vm_area_struct *vma;
18795 struct task_struct *tsk;
18796 - unsigned long address;
18797 struct mm_struct *mm;
18798 int fault;
18799 int write = error_code & PF_WRITE;
18800 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
18801 (write ? FAULT_FLAG_WRITE : 0);
18802
18803 + /* Get the faulting address: */
18804 + unsigned long address = read_cr2();
18805 +
18806 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18807 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18808 + if (!search_exception_tables(regs->ip)) {
18809 + bad_area_nosemaphore(regs, error_code, address);
18810 + return;
18811 + }
18812 + if (address < PAX_USER_SHADOW_BASE) {
18813 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18814 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18815 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18816 + } else
18817 + address -= PAX_USER_SHADOW_BASE;
18818 + }
18819 +#endif
18820 +
18821 tsk = current;
18822 mm = tsk->mm;
18823
18824 - /* Get the faulting address: */
18825 - address = read_cr2();
18826 -
18827 /*
18828 * Detect and handle instructions that would cause a page fault for
18829 * both a tracked kernel page and a userspace page.
18830 @@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
18831 * User-mode registers count as a user access even for any
18832 * potential system fault or CPU buglet:
18833 */
18834 - if (user_mode_vm(regs)) {
18835 + if (user_mode(regs)) {
18836 local_irq_enable();
18837 error_code |= PF_USER;
18838 } else {
18839 @@ -1103,6 +1351,11 @@ retry:
18840 might_sleep();
18841 }
18842
18843 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18844 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18845 + return;
18846 +#endif
18847 +
18848 vma = find_vma(mm, address);
18849 if (unlikely(!vma)) {
18850 bad_area(regs, error_code, address);
18851 @@ -1114,18 +1367,24 @@ retry:
18852 bad_area(regs, error_code, address);
18853 return;
18854 }
18855 - if (error_code & PF_USER) {
18856 - /*
18857 - * Accessing the stack below %sp is always a bug.
18858 - * The large cushion allows instructions like enter
18859 - * and pusha to work. ("enter $65535, $31" pushes
18860 - * 32 pointers and then decrements %sp by 65535.)
18861 - */
18862 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18863 - bad_area(regs, error_code, address);
18864 - return;
18865 - }
18866 + /*
18867 + * Accessing the stack below %sp is always a bug.
18868 + * The large cushion allows instructions like enter
18869 + * and pusha to work. ("enter $65535, $31" pushes
18870 + * 32 pointers and then decrements %sp by 65535.)
18871 + */
18872 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18873 + bad_area(regs, error_code, address);
18874 + return;
18875 }
18876 +
18877 +#ifdef CONFIG_PAX_SEGMEXEC
18878 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18879 + bad_area(regs, error_code, address);
18880 + return;
18881 + }
18882 +#endif
18883 +
18884 if (unlikely(expand_stack(vma, address))) {
18885 bad_area(regs, error_code, address);
18886 return;
18887 @@ -1180,3 +1439,199 @@ good_area:
18888
18889 up_read(&mm->mmap_sem);
18890 }
18891 +
18892 +#ifdef CONFIG_PAX_EMUTRAMP
18893 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
18894 +{
18895 + int err;
18896 +
18897 + do { /* PaX: gcc trampoline emulation #1 */
18898 + unsigned char mov1, mov2;
18899 + unsigned short jmp;
18900 + unsigned int addr1, addr2;
18901 +
18902 +#ifdef CONFIG_X86_64
18903 + if ((regs->ip + 11) >> 32)
18904 + break;
18905 +#endif
18906 +
18907 + err = get_user(mov1, (unsigned char __user *)regs->ip);
18908 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18909 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
18910 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18911 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
18912 +
18913 + if (err)
18914 + break;
18915 +
18916 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
18917 + regs->cx = addr1;
18918 + regs->ax = addr2;
18919 + regs->ip = addr2;
18920 + return 2;
18921 + }
18922 + } while (0);
18923 +
18924 + do { /* PaX: gcc trampoline emulation #2 */
18925 + unsigned char mov, jmp;
18926 + unsigned int addr1, addr2;
18927 +
18928 +#ifdef CONFIG_X86_64
18929 + if ((regs->ip + 9) >> 32)
18930 + break;
18931 +#endif
18932 +
18933 + err = get_user(mov, (unsigned char __user *)regs->ip);
18934 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18935 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
18936 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18937 +
18938 + if (err)
18939 + break;
18940 +
18941 + if (mov == 0xB9 && jmp == 0xE9) {
18942 + regs->cx = addr1;
18943 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
18944 + return 2;
18945 + }
18946 + } while (0);
18947 +
18948 + return 1; /* PaX in action */
18949 +}
18950 +
18951 +#ifdef CONFIG_X86_64
18952 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
18953 +{
18954 + int err;
18955 +
18956 + do { /* PaX: gcc trampoline emulation #1 */
18957 + unsigned short mov1, mov2, jmp1;
18958 + unsigned char jmp2;
18959 + unsigned int addr1;
18960 + unsigned long addr2;
18961 +
18962 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18963 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
18964 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18965 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18966 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18967 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18968 +
18969 + if (err)
18970 + break;
18971 +
18972 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18973 + regs->r11 = addr1;
18974 + regs->r10 = addr2;
18975 + regs->ip = addr1;
18976 + return 2;
18977 + }
18978 + } while (0);
18979 +
18980 + do { /* PaX: gcc trampoline emulation #2 */
18981 + unsigned short mov1, mov2, jmp1;
18982 + unsigned char jmp2;
18983 + unsigned long addr1, addr2;
18984 +
18985 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18986 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18987 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18988 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18989 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18990 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18991 +
18992 + if (err)
18993 + break;
18994 +
18995 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18996 + regs->r11 = addr1;
18997 + regs->r10 = addr2;
18998 + regs->ip = addr1;
18999 + return 2;
19000 + }
19001 + } while (0);
19002 +
19003 + return 1; /* PaX in action */
19004 +}
19005 +#endif
19006 +
19007 +/*
19008 + * PaX: decide what to do with offenders (regs->ip = fault address)
19009 + *
19010 + * returns 1 when task should be killed
19011 + * 2 when gcc trampoline was detected
19012 + */
19013 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19014 +{
19015 + if (v8086_mode(regs))
19016 + return 1;
19017 +
19018 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19019 + return 1;
19020 +
19021 +#ifdef CONFIG_X86_32
19022 + return pax_handle_fetch_fault_32(regs);
19023 +#else
19024 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19025 + return pax_handle_fetch_fault_32(regs);
19026 + else
19027 + return pax_handle_fetch_fault_64(regs);
19028 +#endif
19029 +}
19030 +#endif
19031 +
19032 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19033 +void pax_report_insns(void *pc, void *sp)
19034 +{
19035 + long i;
19036 +
19037 + printk(KERN_ERR "PAX: bytes at PC: ");
19038 + for (i = 0; i < 20; i++) {
19039 + unsigned char c;
19040 + if (get_user(c, (__force unsigned char __user *)pc+i))
19041 + printk(KERN_CONT "?? ");
19042 + else
19043 + printk(KERN_CONT "%02x ", c);
19044 + }
19045 + printk("\n");
19046 +
19047 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19048 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19049 + unsigned long c;
19050 + if (get_user(c, (__force unsigned long __user *)sp+i))
19051 +#ifdef CONFIG_X86_32
19052 + printk(KERN_CONT "???????? ");
19053 +#else
19054 + printk(KERN_CONT "???????????????? ");
19055 +#endif
19056 + else
19057 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19058 + }
19059 + printk("\n");
19060 +}
19061 +#endif
19062 +
19063 +/**
19064 + * probe_kernel_write(): safely attempt to write to a location
19065 + * @dst: address to write to
19066 + * @src: pointer to the data that shall be written
19067 + * @size: size of the data chunk
19068 + *
19069 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19070 + * happens, handle that and return -EFAULT.
19071 + */
19072 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19073 +{
19074 + long ret;
19075 + mm_segment_t old_fs = get_fs();
19076 +
19077 + set_fs(KERNEL_DS);
19078 + pagefault_disable();
19079 + pax_open_kernel();
19080 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19081 + pax_close_kernel();
19082 + pagefault_enable();
19083 + set_fs(old_fs);
19084 +
19085 + return ret ? -EFAULT : 0;
19086 +}
19087 diff -urNp linux-3.0.3/arch/x86/mm/gup.c linux-3.0.3/arch/x86/mm/gup.c
19088 --- linux-3.0.3/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19089 +++ linux-3.0.3/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19090 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19091 addr = start;
19092 len = (unsigned long) nr_pages << PAGE_SHIFT;
19093 end = start + len;
19094 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19095 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19096 (void __user *)start, len)))
19097 return 0;
19098
19099 diff -urNp linux-3.0.3/arch/x86/mm/highmem_32.c linux-3.0.3/arch/x86/mm/highmem_32.c
19100 --- linux-3.0.3/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19101 +++ linux-3.0.3/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19102 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19103 idx = type + KM_TYPE_NR*smp_processor_id();
19104 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19105 BUG_ON(!pte_none(*(kmap_pte-idx)));
19106 +
19107 + pax_open_kernel();
19108 set_pte(kmap_pte-idx, mk_pte(page, prot));
19109 + pax_close_kernel();
19110
19111 return (void *)vaddr;
19112 }
19113 diff -urNp linux-3.0.3/arch/x86/mm/hugetlbpage.c linux-3.0.3/arch/x86/mm/hugetlbpage.c
19114 --- linux-3.0.3/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19115 +++ linux-3.0.3/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19116 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19117 struct hstate *h = hstate_file(file);
19118 struct mm_struct *mm = current->mm;
19119 struct vm_area_struct *vma;
19120 - unsigned long start_addr;
19121 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19122 +
19123 +#ifdef CONFIG_PAX_SEGMEXEC
19124 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19125 + pax_task_size = SEGMEXEC_TASK_SIZE;
19126 +#endif
19127 +
19128 + pax_task_size -= PAGE_SIZE;
19129
19130 if (len > mm->cached_hole_size) {
19131 - start_addr = mm->free_area_cache;
19132 + start_addr = mm->free_area_cache;
19133 } else {
19134 - start_addr = TASK_UNMAPPED_BASE;
19135 - mm->cached_hole_size = 0;
19136 + start_addr = mm->mmap_base;
19137 + mm->cached_hole_size = 0;
19138 }
19139
19140 full_search:
19141 @@ -280,26 +287,27 @@ full_search:
19142
19143 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19144 /* At this point: (!vma || addr < vma->vm_end). */
19145 - if (TASK_SIZE - len < addr) {
19146 + if (pax_task_size - len < addr) {
19147 /*
19148 * Start a new search - just in case we missed
19149 * some holes.
19150 */
19151 - if (start_addr != TASK_UNMAPPED_BASE) {
19152 - start_addr = TASK_UNMAPPED_BASE;
19153 + if (start_addr != mm->mmap_base) {
19154 + start_addr = mm->mmap_base;
19155 mm->cached_hole_size = 0;
19156 goto full_search;
19157 }
19158 return -ENOMEM;
19159 }
19160 - if (!vma || addr + len <= vma->vm_start) {
19161 - mm->free_area_cache = addr + len;
19162 - return addr;
19163 - }
19164 + if (check_heap_stack_gap(vma, addr, len))
19165 + break;
19166 if (addr + mm->cached_hole_size < vma->vm_start)
19167 mm->cached_hole_size = vma->vm_start - addr;
19168 addr = ALIGN(vma->vm_end, huge_page_size(h));
19169 }
19170 +
19171 + mm->free_area_cache = addr + len;
19172 + return addr;
19173 }
19174
19175 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19176 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19177 {
19178 struct hstate *h = hstate_file(file);
19179 struct mm_struct *mm = current->mm;
19180 - struct vm_area_struct *vma, *prev_vma;
19181 - unsigned long base = mm->mmap_base, addr = addr0;
19182 + struct vm_area_struct *vma;
19183 + unsigned long base = mm->mmap_base, addr;
19184 unsigned long largest_hole = mm->cached_hole_size;
19185 - int first_time = 1;
19186
19187 /* don't allow allocations above current base */
19188 if (mm->free_area_cache > base)
19189 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19190 largest_hole = 0;
19191 mm->free_area_cache = base;
19192 }
19193 -try_again:
19194 +
19195 /* make sure it can fit in the remaining address space */
19196 if (mm->free_area_cache < len)
19197 goto fail;
19198
19199 /* either no address requested or can't fit in requested address hole */
19200 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19201 + addr = (mm->free_area_cache - len);
19202 do {
19203 + addr &= huge_page_mask(h);
19204 + vma = find_vma(mm, addr);
19205 /*
19206 * Lookup failure means no vma is above this address,
19207 * i.e. return with success:
19208 - */
19209 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19210 - return addr;
19211 -
19212 - /*
19213 * new region fits between prev_vma->vm_end and
19214 * vma->vm_start, use it:
19215 */
19216 - if (addr + len <= vma->vm_start &&
19217 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19218 + if (check_heap_stack_gap(vma, addr, len)) {
19219 /* remember the address as a hint for next time */
19220 - mm->cached_hole_size = largest_hole;
19221 - return (mm->free_area_cache = addr);
19222 - } else {
19223 - /* pull free_area_cache down to the first hole */
19224 - if (mm->free_area_cache == vma->vm_end) {
19225 - mm->free_area_cache = vma->vm_start;
19226 - mm->cached_hole_size = largest_hole;
19227 - }
19228 + mm->cached_hole_size = largest_hole;
19229 + return (mm->free_area_cache = addr);
19230 + }
19231 + /* pull free_area_cache down to the first hole */
19232 + if (mm->free_area_cache == vma->vm_end) {
19233 + mm->free_area_cache = vma->vm_start;
19234 + mm->cached_hole_size = largest_hole;
19235 }
19236
19237 /* remember the largest hole we saw so far */
19238 if (addr + largest_hole < vma->vm_start)
19239 - largest_hole = vma->vm_start - addr;
19240 + largest_hole = vma->vm_start - addr;
19241
19242 /* try just below the current vma->vm_start */
19243 - addr = (vma->vm_start - len) & huge_page_mask(h);
19244 - } while (len <= vma->vm_start);
19245 + addr = skip_heap_stack_gap(vma, len);
19246 + } while (!IS_ERR_VALUE(addr));
19247
19248 fail:
19249 /*
19250 - * if hint left us with no space for the requested
19251 - * mapping then try again:
19252 - */
19253 - if (first_time) {
19254 - mm->free_area_cache = base;
19255 - largest_hole = 0;
19256 - first_time = 0;
19257 - goto try_again;
19258 - }
19259 - /*
19260 * A failed mmap() very likely causes application failure,
19261 * so fall back to the bottom-up function here. This scenario
19262 * can happen with large stack limits and large mmap()
19263 * allocations.
19264 */
19265 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19266 +
19267 +#ifdef CONFIG_PAX_SEGMEXEC
19268 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19269 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19270 + else
19271 +#endif
19272 +
19273 + mm->mmap_base = TASK_UNMAPPED_BASE;
19274 +
19275 +#ifdef CONFIG_PAX_RANDMMAP
19276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19277 + mm->mmap_base += mm->delta_mmap;
19278 +#endif
19279 +
19280 + mm->free_area_cache = mm->mmap_base;
19281 mm->cached_hole_size = ~0UL;
19282 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19283 len, pgoff, flags);
19284 @@ -386,6 +392,7 @@ fail:
19285 /*
19286 * Restore the topdown base:
19287 */
19288 + mm->mmap_base = base;
19289 mm->free_area_cache = base;
19290 mm->cached_hole_size = ~0UL;
19291
19292 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19293 struct hstate *h = hstate_file(file);
19294 struct mm_struct *mm = current->mm;
19295 struct vm_area_struct *vma;
19296 + unsigned long pax_task_size = TASK_SIZE;
19297
19298 if (len & ~huge_page_mask(h))
19299 return -EINVAL;
19300 - if (len > TASK_SIZE)
19301 +
19302 +#ifdef CONFIG_PAX_SEGMEXEC
19303 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19304 + pax_task_size = SEGMEXEC_TASK_SIZE;
19305 +#endif
19306 +
19307 + pax_task_size -= PAGE_SIZE;
19308 +
19309 + if (len > pax_task_size)
19310 return -ENOMEM;
19311
19312 if (flags & MAP_FIXED) {
19313 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19314 if (addr) {
19315 addr = ALIGN(addr, huge_page_size(h));
19316 vma = find_vma(mm, addr);
19317 - if (TASK_SIZE - len >= addr &&
19318 - (!vma || addr + len <= vma->vm_start))
19319 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19320 return addr;
19321 }
19322 if (mm->get_unmapped_area == arch_get_unmapped_area)
19323 diff -urNp linux-3.0.3/arch/x86/mm/init_32.c linux-3.0.3/arch/x86/mm/init_32.c
19324 --- linux-3.0.3/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19325 +++ linux-3.0.3/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19326 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19327 }
19328
19329 /*
19330 - * Creates a middle page table and puts a pointer to it in the
19331 - * given global directory entry. This only returns the gd entry
19332 - * in non-PAE compilation mode, since the middle layer is folded.
19333 - */
19334 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19335 -{
19336 - pud_t *pud;
19337 - pmd_t *pmd_table;
19338 -
19339 -#ifdef CONFIG_X86_PAE
19340 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19341 - if (after_bootmem)
19342 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19343 - else
19344 - pmd_table = (pmd_t *)alloc_low_page();
19345 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19346 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19347 - pud = pud_offset(pgd, 0);
19348 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19349 -
19350 - return pmd_table;
19351 - }
19352 -#endif
19353 - pud = pud_offset(pgd, 0);
19354 - pmd_table = pmd_offset(pud, 0);
19355 -
19356 - return pmd_table;
19357 -}
19358 -
19359 -/*
19360 * Create a page table and place a pointer to it in a middle page
19361 * directory entry:
19362 */
19363 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19364 page_table = (pte_t *)alloc_low_page();
19365
19366 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19367 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19368 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19369 +#else
19370 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19371 +#endif
19372 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19373 }
19374
19375 return pte_offset_kernel(pmd, 0);
19376 }
19377
19378 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19379 +{
19380 + pud_t *pud;
19381 + pmd_t *pmd_table;
19382 +
19383 + pud = pud_offset(pgd, 0);
19384 + pmd_table = pmd_offset(pud, 0);
19385 +
19386 + return pmd_table;
19387 +}
19388 +
19389 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19390 {
19391 int pgd_idx = pgd_index(vaddr);
19392 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19393 int pgd_idx, pmd_idx;
19394 unsigned long vaddr;
19395 pgd_t *pgd;
19396 + pud_t *pud;
19397 pmd_t *pmd;
19398 pte_t *pte = NULL;
19399
19400 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19401 pgd = pgd_base + pgd_idx;
19402
19403 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19404 - pmd = one_md_table_init(pgd);
19405 - pmd = pmd + pmd_index(vaddr);
19406 + pud = pud_offset(pgd, vaddr);
19407 + pmd = pmd_offset(pud, vaddr);
19408 +
19409 +#ifdef CONFIG_X86_PAE
19410 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19411 +#endif
19412 +
19413 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19414 pmd++, pmd_idx++) {
19415 pte = page_table_kmap_check(one_page_table_init(pmd),
19416 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19417 }
19418 }
19419
19420 -static inline int is_kernel_text(unsigned long addr)
19421 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19422 {
19423 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19424 - return 1;
19425 - return 0;
19426 + if ((start > ktla_ktva((unsigned long)_etext) ||
19427 + end <= ktla_ktva((unsigned long)_stext)) &&
19428 + (start > ktla_ktva((unsigned long)_einittext) ||
19429 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19430 +
19431 +#ifdef CONFIG_ACPI_SLEEP
19432 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19433 +#endif
19434 +
19435 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19436 + return 0;
19437 + return 1;
19438 }
19439
19440 /*
19441 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19442 unsigned long last_map_addr = end;
19443 unsigned long start_pfn, end_pfn;
19444 pgd_t *pgd_base = swapper_pg_dir;
19445 - int pgd_idx, pmd_idx, pte_ofs;
19446 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19447 unsigned long pfn;
19448 pgd_t *pgd;
19449 + pud_t *pud;
19450 pmd_t *pmd;
19451 pte_t *pte;
19452 unsigned pages_2m, pages_4k;
19453 @@ -281,8 +282,13 @@ repeat:
19454 pfn = start_pfn;
19455 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19456 pgd = pgd_base + pgd_idx;
19457 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19458 - pmd = one_md_table_init(pgd);
19459 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19460 + pud = pud_offset(pgd, 0);
19461 + pmd = pmd_offset(pud, 0);
19462 +
19463 +#ifdef CONFIG_X86_PAE
19464 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19465 +#endif
19466
19467 if (pfn >= end_pfn)
19468 continue;
19469 @@ -294,14 +300,13 @@ repeat:
19470 #endif
19471 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19472 pmd++, pmd_idx++) {
19473 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19474 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19475
19476 /*
19477 * Map with big pages if possible, otherwise
19478 * create normal page tables:
19479 */
19480 if (use_pse) {
19481 - unsigned int addr2;
19482 pgprot_t prot = PAGE_KERNEL_LARGE;
19483 /*
19484 * first pass will use the same initial
19485 @@ -311,11 +316,7 @@ repeat:
19486 __pgprot(PTE_IDENT_ATTR |
19487 _PAGE_PSE);
19488
19489 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19490 - PAGE_OFFSET + PAGE_SIZE-1;
19491 -
19492 - if (is_kernel_text(addr) ||
19493 - is_kernel_text(addr2))
19494 + if (is_kernel_text(address, address + PMD_SIZE))
19495 prot = PAGE_KERNEL_LARGE_EXEC;
19496
19497 pages_2m++;
19498 @@ -332,7 +333,7 @@ repeat:
19499 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19500 pte += pte_ofs;
19501 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19502 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19503 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19504 pgprot_t prot = PAGE_KERNEL;
19505 /*
19506 * first pass will use the same initial
19507 @@ -340,7 +341,7 @@ repeat:
19508 */
19509 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19510
19511 - if (is_kernel_text(addr))
19512 + if (is_kernel_text(address, address + PAGE_SIZE))
19513 prot = PAGE_KERNEL_EXEC;
19514
19515 pages_4k++;
19516 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19517
19518 pud = pud_offset(pgd, va);
19519 pmd = pmd_offset(pud, va);
19520 - if (!pmd_present(*pmd))
19521 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19522 break;
19523
19524 pte = pte_offset_kernel(pmd, va);
19525 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19526
19527 static void __init pagetable_init(void)
19528 {
19529 - pgd_t *pgd_base = swapper_pg_dir;
19530 -
19531 - permanent_kmaps_init(pgd_base);
19532 + permanent_kmaps_init(swapper_pg_dir);
19533 }
19534
19535 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19536 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19537 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19538
19539 /* user-defined highmem size */
19540 @@ -757,6 +756,12 @@ void __init mem_init(void)
19541
19542 pci_iommu_alloc();
19543
19544 +#ifdef CONFIG_PAX_PER_CPU_PGD
19545 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19546 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19547 + KERNEL_PGD_PTRS);
19548 +#endif
19549 +
19550 #ifdef CONFIG_FLATMEM
19551 BUG_ON(!mem_map);
19552 #endif
19553 @@ -774,7 +779,7 @@ void __init mem_init(void)
19554 set_highmem_pages_init();
19555
19556 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19557 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19558 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19559 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19560
19561 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19562 @@ -815,10 +820,10 @@ void __init mem_init(void)
19563 ((unsigned long)&__init_end -
19564 (unsigned long)&__init_begin) >> 10,
19565
19566 - (unsigned long)&_etext, (unsigned long)&_edata,
19567 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19568 + (unsigned long)&_sdata, (unsigned long)&_edata,
19569 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19570
19571 - (unsigned long)&_text, (unsigned long)&_etext,
19572 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19573 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19574
19575 /*
19576 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
19577 if (!kernel_set_to_readonly)
19578 return;
19579
19580 + start = ktla_ktva(start);
19581 pr_debug("Set kernel text: %lx - %lx for read write\n",
19582 start, start+size);
19583
19584 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
19585 if (!kernel_set_to_readonly)
19586 return;
19587
19588 + start = ktla_ktva(start);
19589 pr_debug("Set kernel text: %lx - %lx for read only\n",
19590 start, start+size);
19591
19592 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
19593 unsigned long start = PFN_ALIGN(_text);
19594 unsigned long size = PFN_ALIGN(_etext) - start;
19595
19596 + start = ktla_ktva(start);
19597 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19598 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19599 size >> 10);
19600 diff -urNp linux-3.0.3/arch/x86/mm/init_64.c linux-3.0.3/arch/x86/mm/init_64.c
19601 --- linux-3.0.3/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
19602 +++ linux-3.0.3/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
19603 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
19604 * around without checking the pgd every time.
19605 */
19606
19607 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19608 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19609 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19610
19611 int force_personality32;
19612 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
19613
19614 for (address = start; address <= end; address += PGDIR_SIZE) {
19615 const pgd_t *pgd_ref = pgd_offset_k(address);
19616 +
19617 +#ifdef CONFIG_PAX_PER_CPU_PGD
19618 + unsigned long cpu;
19619 +#else
19620 struct page *page;
19621 +#endif
19622
19623 if (pgd_none(*pgd_ref))
19624 continue;
19625
19626 spin_lock(&pgd_lock);
19627 +
19628 +#ifdef CONFIG_PAX_PER_CPU_PGD
19629 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19630 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19631 +#else
19632 list_for_each_entry(page, &pgd_list, lru) {
19633 pgd_t *pgd;
19634 spinlock_t *pgt_lock;
19635 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
19636 /* the pgt_lock only for Xen */
19637 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19638 spin_lock(pgt_lock);
19639 +#endif
19640
19641 if (pgd_none(*pgd))
19642 set_pgd(pgd, *pgd_ref);
19643 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
19644 BUG_ON(pgd_page_vaddr(*pgd)
19645 != pgd_page_vaddr(*pgd_ref));
19646
19647 +#ifndef CONFIG_PAX_PER_CPU_PGD
19648 spin_unlock(pgt_lock);
19649 +#endif
19650 +
19651 }
19652 spin_unlock(&pgd_lock);
19653 }
19654 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19655 pmd = fill_pmd(pud, vaddr);
19656 pte = fill_pte(pmd, vaddr);
19657
19658 + pax_open_kernel();
19659 set_pte(pte, new_pte);
19660 + pax_close_kernel();
19661
19662 /*
19663 * It's enough to flush this one mapping.
19664 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
19665 pgd = pgd_offset_k((unsigned long)__va(phys));
19666 if (pgd_none(*pgd)) {
19667 pud = (pud_t *) spp_getpage();
19668 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19669 - _PAGE_USER));
19670 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19671 }
19672 pud = pud_offset(pgd, (unsigned long)__va(phys));
19673 if (pud_none(*pud)) {
19674 pmd = (pmd_t *) spp_getpage();
19675 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19676 - _PAGE_USER));
19677 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19678 }
19679 pmd = pmd_offset(pud, phys);
19680 BUG_ON(!pmd_none(*pmd));
19681 @@ -693,6 +707,12 @@ void __init mem_init(void)
19682
19683 pci_iommu_alloc();
19684
19685 +#ifdef CONFIG_PAX_PER_CPU_PGD
19686 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19687 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19688 + KERNEL_PGD_PTRS);
19689 +#endif
19690 +
19691 /* clear_bss() already clear the empty_zero_page */
19692
19693 reservedpages = 0;
19694 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
19695 static struct vm_area_struct gate_vma = {
19696 .vm_start = VSYSCALL_START,
19697 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19698 - .vm_page_prot = PAGE_READONLY_EXEC,
19699 - .vm_flags = VM_READ | VM_EXEC
19700 + .vm_page_prot = PAGE_READONLY,
19701 + .vm_flags = VM_READ
19702 };
19703
19704 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19705 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
19706
19707 const char *arch_vma_name(struct vm_area_struct *vma)
19708 {
19709 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19710 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19711 return "[vdso]";
19712 if (vma == &gate_vma)
19713 return "[vsyscall]";
19714 diff -urNp linux-3.0.3/arch/x86/mm/init.c linux-3.0.3/arch/x86/mm/init.c
19715 --- linux-3.0.3/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
19716 +++ linux-3.0.3/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
19717 @@ -31,7 +31,7 @@ int direct_gbpages
19718 static void __init find_early_table_space(unsigned long end, int use_pse,
19719 int use_gbpages)
19720 {
19721 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19722 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19723 phys_addr_t base;
19724
19725 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19726 @@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
19727 */
19728 int devmem_is_allowed(unsigned long pagenr)
19729 {
19730 - if (pagenr <= 256)
19731 +#ifdef CONFIG_GRKERNSEC_KMEM
19732 + /* allow BDA */
19733 + if (!pagenr)
19734 + return 1;
19735 + /* allow EBDA */
19736 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19737 + return 1;
19738 +#else
19739 + if (!pagenr)
19740 + return 1;
19741 +#ifdef CONFIG_VM86
19742 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19743 + return 1;
19744 +#endif
19745 +#endif
19746 +
19747 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19748 return 1;
19749 +#ifdef CONFIG_GRKERNSEC_KMEM
19750 + /* throw out everything else below 1MB */
19751 + if (pagenr <= 256)
19752 + return 0;
19753 +#endif
19754 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19755 return 0;
19756 if (!page_is_ram(pagenr))
19757 return 1;
19758 +
19759 return 0;
19760 }
19761
19762 @@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
19763
19764 void free_initmem(void)
19765 {
19766 +
19767 +#ifdef CONFIG_PAX_KERNEXEC
19768 +#ifdef CONFIG_X86_32
19769 + /* PaX: limit KERNEL_CS to actual size */
19770 + unsigned long addr, limit;
19771 + struct desc_struct d;
19772 + int cpu;
19773 +
19774 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19775 + limit = (limit - 1UL) >> PAGE_SHIFT;
19776 +
19777 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19778 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19779 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19780 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19781 + }
19782 +
19783 + /* PaX: make KERNEL_CS read-only */
19784 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19785 + if (!paravirt_enabled())
19786 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19787 +/*
19788 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19789 + pgd = pgd_offset_k(addr);
19790 + pud = pud_offset(pgd, addr);
19791 + pmd = pmd_offset(pud, addr);
19792 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19793 + }
19794 +*/
19795 +#ifdef CONFIG_X86_PAE
19796 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19797 +/*
19798 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19799 + pgd = pgd_offset_k(addr);
19800 + pud = pud_offset(pgd, addr);
19801 + pmd = pmd_offset(pud, addr);
19802 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19803 + }
19804 +*/
19805 +#endif
19806 +
19807 +#ifdef CONFIG_MODULES
19808 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19809 +#endif
19810 +
19811 +#else
19812 + pgd_t *pgd;
19813 + pud_t *pud;
19814 + pmd_t *pmd;
19815 + unsigned long addr, end;
19816 +
19817 + /* PaX: make kernel code/rodata read-only, rest non-executable */
19818 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19819 + pgd = pgd_offset_k(addr);
19820 + pud = pud_offset(pgd, addr);
19821 + pmd = pmd_offset(pud, addr);
19822 + if (!pmd_present(*pmd))
19823 + continue;
19824 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19825 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19826 + else
19827 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19828 + }
19829 +
19830 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19831 + end = addr + KERNEL_IMAGE_SIZE;
19832 + for (; addr < end; addr += PMD_SIZE) {
19833 + pgd = pgd_offset_k(addr);
19834 + pud = pud_offset(pgd, addr);
19835 + pmd = pmd_offset(pud, addr);
19836 + if (!pmd_present(*pmd))
19837 + continue;
19838 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19839 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19840 + }
19841 +#endif
19842 +
19843 + flush_tlb_all();
19844 +#endif
19845 +
19846 free_init_pages("unused kernel memory",
19847 (unsigned long)(&__init_begin),
19848 (unsigned long)(&__init_end));
19849 diff -urNp linux-3.0.3/arch/x86/mm/iomap_32.c linux-3.0.3/arch/x86/mm/iomap_32.c
19850 --- linux-3.0.3/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
19851 +++ linux-3.0.3/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
19852 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19853 type = kmap_atomic_idx_push();
19854 idx = type + KM_TYPE_NR * smp_processor_id();
19855 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19856 +
19857 + pax_open_kernel();
19858 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19859 + pax_close_kernel();
19860 +
19861 arch_flush_lazy_mmu_mode();
19862
19863 return (void *)vaddr;
19864 diff -urNp linux-3.0.3/arch/x86/mm/ioremap.c linux-3.0.3/arch/x86/mm/ioremap.c
19865 --- linux-3.0.3/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
19866 +++ linux-3.0.3/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
19867 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
19868 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19869 int is_ram = page_is_ram(pfn);
19870
19871 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19872 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19873 return NULL;
19874 WARN_ON_ONCE(is_ram);
19875 }
19876 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19877 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19878
19879 static __initdata int after_paging_init;
19880 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19881 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19882
19883 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
19884 {
19885 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
19886 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
19887
19888 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
19889 - memset(bm_pte, 0, sizeof(bm_pte));
19890 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
19891 + pmd_populate_user(&init_mm, pmd, bm_pte);
19892
19893 /*
19894 * The boot-ioremap range spans multiple pmds, for which
19895 diff -urNp linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c
19896 --- linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
19897 +++ linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
19898 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
19899 * memory (e.g. tracked pages)? For now, we need this to avoid
19900 * invoking kmemcheck for PnP BIOS calls.
19901 */
19902 - if (regs->flags & X86_VM_MASK)
19903 + if (v8086_mode(regs))
19904 return false;
19905 - if (regs->cs != __KERNEL_CS)
19906 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
19907 return false;
19908
19909 pte = kmemcheck_pte_lookup(address);
19910 diff -urNp linux-3.0.3/arch/x86/mm/mmap.c linux-3.0.3/arch/x86/mm/mmap.c
19911 --- linux-3.0.3/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
19912 +++ linux-3.0.3/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
19913 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
19914 * Leave an at least ~128 MB hole with possible stack randomization.
19915 */
19916 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
19917 -#define MAX_GAP (TASK_SIZE/6*5)
19918 +#define MAX_GAP (pax_task_size/6*5)
19919
19920 /*
19921 * True on X86_32 or when emulating IA32 on X86_64
19922 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
19923 return rnd << PAGE_SHIFT;
19924 }
19925
19926 -static unsigned long mmap_base(void)
19927 +static unsigned long mmap_base(struct mm_struct *mm)
19928 {
19929 unsigned long gap = rlimit(RLIMIT_STACK);
19930 + unsigned long pax_task_size = TASK_SIZE;
19931 +
19932 +#ifdef CONFIG_PAX_SEGMEXEC
19933 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19934 + pax_task_size = SEGMEXEC_TASK_SIZE;
19935 +#endif
19936
19937 if (gap < MIN_GAP)
19938 gap = MIN_GAP;
19939 else if (gap > MAX_GAP)
19940 gap = MAX_GAP;
19941
19942 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
19943 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
19944 }
19945
19946 /*
19947 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
19948 * does, but not when emulating X86_32
19949 */
19950 -static unsigned long mmap_legacy_base(void)
19951 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
19952 {
19953 - if (mmap_is_ia32())
19954 + if (mmap_is_ia32()) {
19955 +
19956 +#ifdef CONFIG_PAX_SEGMEXEC
19957 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19958 + return SEGMEXEC_TASK_UNMAPPED_BASE;
19959 + else
19960 +#endif
19961 +
19962 return TASK_UNMAPPED_BASE;
19963 - else
19964 + } else
19965 return TASK_UNMAPPED_BASE + mmap_rnd();
19966 }
19967
19968 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
19969 void arch_pick_mmap_layout(struct mm_struct *mm)
19970 {
19971 if (mmap_is_legacy()) {
19972 - mm->mmap_base = mmap_legacy_base();
19973 + mm->mmap_base = mmap_legacy_base(mm);
19974 +
19975 +#ifdef CONFIG_PAX_RANDMMAP
19976 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19977 + mm->mmap_base += mm->delta_mmap;
19978 +#endif
19979 +
19980 mm->get_unmapped_area = arch_get_unmapped_area;
19981 mm->unmap_area = arch_unmap_area;
19982 } else {
19983 - mm->mmap_base = mmap_base();
19984 + mm->mmap_base = mmap_base(mm);
19985 +
19986 +#ifdef CONFIG_PAX_RANDMMAP
19987 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19988 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19989 +#endif
19990 +
19991 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19992 mm->unmap_area = arch_unmap_area_topdown;
19993 }
19994 diff -urNp linux-3.0.3/arch/x86/mm/mmio-mod.c linux-3.0.3/arch/x86/mm/mmio-mod.c
19995 --- linux-3.0.3/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
19996 +++ linux-3.0.3/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
19997 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
19998 break;
19999 default:
20000 {
20001 - unsigned char *ip = (unsigned char *)instptr;
20002 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20003 my_trace->opcode = MMIO_UNKNOWN_OP;
20004 my_trace->width = 0;
20005 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20006 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20007 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20008 void __iomem *addr)
20009 {
20010 - static atomic_t next_id;
20011 + static atomic_unchecked_t next_id;
20012 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20013 /* These are page-unaligned. */
20014 struct mmiotrace_map map = {
20015 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20016 .private = trace
20017 },
20018 .phys = offset,
20019 - .id = atomic_inc_return(&next_id)
20020 + .id = atomic_inc_return_unchecked(&next_id)
20021 };
20022 map.map_id = trace->id;
20023
20024 diff -urNp linux-3.0.3/arch/x86/mm/pageattr.c linux-3.0.3/arch/x86/mm/pageattr.c
20025 --- linux-3.0.3/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20026 +++ linux-3.0.3/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20027 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20028 */
20029 #ifdef CONFIG_PCI_BIOS
20030 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20031 - pgprot_val(forbidden) |= _PAGE_NX;
20032 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20033 #endif
20034
20035 /*
20036 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20037 * Does not cover __inittext since that is gone later on. On
20038 * 64bit we do not enforce !NX on the low mapping
20039 */
20040 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20041 - pgprot_val(forbidden) |= _PAGE_NX;
20042 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20043 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20044
20045 +#ifdef CONFIG_DEBUG_RODATA
20046 /*
20047 * The .rodata section needs to be read-only. Using the pfn
20048 * catches all aliases.
20049 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20050 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20051 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20052 pgprot_val(forbidden) |= _PAGE_RW;
20053 +#endif
20054
20055 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20056 /*
20057 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20058 }
20059 #endif
20060
20061 +#ifdef CONFIG_PAX_KERNEXEC
20062 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20063 + pgprot_val(forbidden) |= _PAGE_RW;
20064 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20065 + }
20066 +#endif
20067 +
20068 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20069
20070 return prot;
20071 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20072 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20073 {
20074 /* change init_mm */
20075 + pax_open_kernel();
20076 set_pte_atomic(kpte, pte);
20077 +
20078 #ifdef CONFIG_X86_32
20079 if (!SHARED_KERNEL_PMD) {
20080 +
20081 +#ifdef CONFIG_PAX_PER_CPU_PGD
20082 + unsigned long cpu;
20083 +#else
20084 struct page *page;
20085 +#endif
20086
20087 +#ifdef CONFIG_PAX_PER_CPU_PGD
20088 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20089 + pgd_t *pgd = get_cpu_pgd(cpu);
20090 +#else
20091 list_for_each_entry(page, &pgd_list, lru) {
20092 - pgd_t *pgd;
20093 + pgd_t *pgd = (pgd_t *)page_address(page);
20094 +#endif
20095 +
20096 pud_t *pud;
20097 pmd_t *pmd;
20098
20099 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20100 + pgd += pgd_index(address);
20101 pud = pud_offset(pgd, address);
20102 pmd = pmd_offset(pud, address);
20103 set_pte_atomic((pte_t *)pmd, pte);
20104 }
20105 }
20106 #endif
20107 + pax_close_kernel();
20108 }
20109
20110 static int
20111 diff -urNp linux-3.0.3/arch/x86/mm/pageattr-test.c linux-3.0.3/arch/x86/mm/pageattr-test.c
20112 --- linux-3.0.3/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20113 +++ linux-3.0.3/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20114 @@ -36,7 +36,7 @@ enum {
20115
20116 static int pte_testbit(pte_t pte)
20117 {
20118 - return pte_flags(pte) & _PAGE_UNUSED1;
20119 + return pte_flags(pte) & _PAGE_CPA_TEST;
20120 }
20121
20122 struct split_state {
20123 diff -urNp linux-3.0.3/arch/x86/mm/pat.c linux-3.0.3/arch/x86/mm/pat.c
20124 --- linux-3.0.3/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20125 +++ linux-3.0.3/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20126 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20127
20128 if (!entry) {
20129 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20130 - current->comm, current->pid, start, end);
20131 + current->comm, task_pid_nr(current), start, end);
20132 return -EINVAL;
20133 }
20134
20135 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20136 while (cursor < to) {
20137 if (!devmem_is_allowed(pfn)) {
20138 printk(KERN_INFO
20139 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20140 - current->comm, from, to);
20141 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20142 + current->comm, from, to, cursor);
20143 return 0;
20144 }
20145 cursor += PAGE_SIZE;
20146 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20147 printk(KERN_INFO
20148 "%s:%d ioremap_change_attr failed %s "
20149 "for %Lx-%Lx\n",
20150 - current->comm, current->pid,
20151 + current->comm, task_pid_nr(current),
20152 cattr_name(flags),
20153 base, (unsigned long long)(base + size));
20154 return -EINVAL;
20155 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20156 if (want_flags != flags) {
20157 printk(KERN_WARNING
20158 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20159 - current->comm, current->pid,
20160 + current->comm, task_pid_nr(current),
20161 cattr_name(want_flags),
20162 (unsigned long long)paddr,
20163 (unsigned long long)(paddr + size),
20164 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20165 free_memtype(paddr, paddr + size);
20166 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20167 " for %Lx-%Lx, got %s\n",
20168 - current->comm, current->pid,
20169 + current->comm, task_pid_nr(current),
20170 cattr_name(want_flags),
20171 (unsigned long long)paddr,
20172 (unsigned long long)(paddr + size),
20173 diff -urNp linux-3.0.3/arch/x86/mm/pf_in.c linux-3.0.3/arch/x86/mm/pf_in.c
20174 --- linux-3.0.3/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20175 +++ linux-3.0.3/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20176 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20177 int i;
20178 enum reason_type rv = OTHERS;
20179
20180 - p = (unsigned char *)ins_addr;
20181 + p = (unsigned char *)ktla_ktva(ins_addr);
20182 p += skip_prefix(p, &prf);
20183 p += get_opcode(p, &opcode);
20184
20185 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20186 struct prefix_bits prf;
20187 int i;
20188
20189 - p = (unsigned char *)ins_addr;
20190 + p = (unsigned char *)ktla_ktva(ins_addr);
20191 p += skip_prefix(p, &prf);
20192 p += get_opcode(p, &opcode);
20193
20194 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20195 struct prefix_bits prf;
20196 int i;
20197
20198 - p = (unsigned char *)ins_addr;
20199 + p = (unsigned char *)ktla_ktva(ins_addr);
20200 p += skip_prefix(p, &prf);
20201 p += get_opcode(p, &opcode);
20202
20203 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20204 struct prefix_bits prf;
20205 int i;
20206
20207 - p = (unsigned char *)ins_addr;
20208 + p = (unsigned char *)ktla_ktva(ins_addr);
20209 p += skip_prefix(p, &prf);
20210 p += get_opcode(p, &opcode);
20211 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20212 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20213 struct prefix_bits prf;
20214 int i;
20215
20216 - p = (unsigned char *)ins_addr;
20217 + p = (unsigned char *)ktla_ktva(ins_addr);
20218 p += skip_prefix(p, &prf);
20219 p += get_opcode(p, &opcode);
20220 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20221 diff -urNp linux-3.0.3/arch/x86/mm/pgtable_32.c linux-3.0.3/arch/x86/mm/pgtable_32.c
20222 --- linux-3.0.3/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20223 +++ linux-3.0.3/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20224 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20225 return;
20226 }
20227 pte = pte_offset_kernel(pmd, vaddr);
20228 +
20229 + pax_open_kernel();
20230 if (pte_val(pteval))
20231 set_pte_at(&init_mm, vaddr, pte, pteval);
20232 else
20233 pte_clear(&init_mm, vaddr, pte);
20234 + pax_close_kernel();
20235
20236 /*
20237 * It's enough to flush this one mapping.
20238 diff -urNp linux-3.0.3/arch/x86/mm/pgtable.c linux-3.0.3/arch/x86/mm/pgtable.c
20239 --- linux-3.0.3/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20240 +++ linux-3.0.3/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20241 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20242 list_del(&page->lru);
20243 }
20244
20245 -#define UNSHARED_PTRS_PER_PGD \
20246 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20247 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20248 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20249
20250 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20251 +{
20252 + while (count--)
20253 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20254 +}
20255 +#endif
20256 +
20257 +#ifdef CONFIG_PAX_PER_CPU_PGD
20258 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20259 +{
20260 + while (count--)
20261 +
20262 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20263 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20264 +#else
20265 + *dst++ = *src++;
20266 +#endif
20267
20268 +}
20269 +#endif
20270 +
20271 +#ifdef CONFIG_X86_64
20272 +#define pxd_t pud_t
20273 +#define pyd_t pgd_t
20274 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20275 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20276 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20277 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20278 +#define PYD_SIZE PGDIR_SIZE
20279 +#else
20280 +#define pxd_t pmd_t
20281 +#define pyd_t pud_t
20282 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20283 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20284 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20285 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20286 +#define PYD_SIZE PUD_SIZE
20287 +#endif
20288 +
20289 +#ifdef CONFIG_PAX_PER_CPU_PGD
20290 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20291 +static inline void pgd_dtor(pgd_t *pgd) {}
20292 +#else
20293 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20294 {
20295 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20296 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20297 pgd_list_del(pgd);
20298 spin_unlock(&pgd_lock);
20299 }
20300 +#endif
20301
20302 /*
20303 * List of all pgd's needed for non-PAE so it can invalidate entries
20304 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20305 * -- wli
20306 */
20307
20308 -#ifdef CONFIG_X86_PAE
20309 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20310 /*
20311 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20312 * updating the top-level pagetable entries to guarantee the
20313 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20314 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20315 * and initialize the kernel pmds here.
20316 */
20317 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20318 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20319
20320 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20321 {
20322 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20323 */
20324 flush_tlb_mm(mm);
20325 }
20326 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20327 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20328 #else /* !CONFIG_X86_PAE */
20329
20330 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20331 -#define PREALLOCATED_PMDS 0
20332 +#define PREALLOCATED_PXDS 0
20333
20334 #endif /* CONFIG_X86_PAE */
20335
20336 -static void free_pmds(pmd_t *pmds[])
20337 +static void free_pxds(pxd_t *pxds[])
20338 {
20339 int i;
20340
20341 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20342 - if (pmds[i])
20343 - free_page((unsigned long)pmds[i]);
20344 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20345 + if (pxds[i])
20346 + free_page((unsigned long)pxds[i]);
20347 }
20348
20349 -static int preallocate_pmds(pmd_t *pmds[])
20350 +static int preallocate_pxds(pxd_t *pxds[])
20351 {
20352 int i;
20353 bool failed = false;
20354
20355 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20356 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20357 - if (pmd == NULL)
20358 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20359 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20360 + if (pxd == NULL)
20361 failed = true;
20362 - pmds[i] = pmd;
20363 + pxds[i] = pxd;
20364 }
20365
20366 if (failed) {
20367 - free_pmds(pmds);
20368 + free_pxds(pxds);
20369 return -ENOMEM;
20370 }
20371
20372 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20373 * preallocate which never got a corresponding vma will need to be
20374 * freed manually.
20375 */
20376 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20377 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20378 {
20379 int i;
20380
20381 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20382 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20383 pgd_t pgd = pgdp[i];
20384
20385 if (pgd_val(pgd) != 0) {
20386 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20387 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20388
20389 - pgdp[i] = native_make_pgd(0);
20390 + set_pgd(pgdp + i, native_make_pgd(0));
20391
20392 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20393 - pmd_free(mm, pmd);
20394 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20395 + pxd_free(mm, pxd);
20396 }
20397 }
20398 }
20399
20400 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20401 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20402 {
20403 - pud_t *pud;
20404 + pyd_t *pyd;
20405 unsigned long addr;
20406 int i;
20407
20408 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20409 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20410 return;
20411
20412 - pud = pud_offset(pgd, 0);
20413 +#ifdef CONFIG_X86_64
20414 + pyd = pyd_offset(mm, 0L);
20415 +#else
20416 + pyd = pyd_offset(pgd, 0L);
20417 +#endif
20418
20419 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20420 - i++, pud++, addr += PUD_SIZE) {
20421 - pmd_t *pmd = pmds[i];
20422 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20423 + i++, pyd++, addr += PYD_SIZE) {
20424 + pxd_t *pxd = pxds[i];
20425
20426 if (i >= KERNEL_PGD_BOUNDARY)
20427 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20428 - sizeof(pmd_t) * PTRS_PER_PMD);
20429 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20430 + sizeof(pxd_t) * PTRS_PER_PMD);
20431
20432 - pud_populate(mm, pud, pmd);
20433 + pyd_populate(mm, pyd, pxd);
20434 }
20435 }
20436
20437 pgd_t *pgd_alloc(struct mm_struct *mm)
20438 {
20439 pgd_t *pgd;
20440 - pmd_t *pmds[PREALLOCATED_PMDS];
20441 + pxd_t *pxds[PREALLOCATED_PXDS];
20442
20443 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20444
20445 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20446
20447 mm->pgd = pgd;
20448
20449 - if (preallocate_pmds(pmds) != 0)
20450 + if (preallocate_pxds(pxds) != 0)
20451 goto out_free_pgd;
20452
20453 if (paravirt_pgd_alloc(mm) != 0)
20454 - goto out_free_pmds;
20455 + goto out_free_pxds;
20456
20457 /*
20458 * Make sure that pre-populating the pmds is atomic with
20459 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20460 spin_lock(&pgd_lock);
20461
20462 pgd_ctor(mm, pgd);
20463 - pgd_prepopulate_pmd(mm, pgd, pmds);
20464 + pgd_prepopulate_pxd(mm, pgd, pxds);
20465
20466 spin_unlock(&pgd_lock);
20467
20468 return pgd;
20469
20470 -out_free_pmds:
20471 - free_pmds(pmds);
20472 +out_free_pxds:
20473 + free_pxds(pxds);
20474 out_free_pgd:
20475 free_page((unsigned long)pgd);
20476 out:
20477 @@ -295,7 +344,7 @@ out:
20478
20479 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20480 {
20481 - pgd_mop_up_pmds(mm, pgd);
20482 + pgd_mop_up_pxds(mm, pgd);
20483 pgd_dtor(pgd);
20484 paravirt_pgd_free(mm, pgd);
20485 free_page((unsigned long)pgd);
20486 diff -urNp linux-3.0.3/arch/x86/mm/setup_nx.c linux-3.0.3/arch/x86/mm/setup_nx.c
20487 --- linux-3.0.3/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20488 +++ linux-3.0.3/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20489 @@ -5,8 +5,10 @@
20490 #include <asm/pgtable.h>
20491 #include <asm/proto.h>
20492
20493 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20494 static int disable_nx __cpuinitdata;
20495
20496 +#ifndef CONFIG_PAX_PAGEEXEC
20497 /*
20498 * noexec = on|off
20499 *
20500 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20501 return 0;
20502 }
20503 early_param("noexec", noexec_setup);
20504 +#endif
20505 +
20506 +#endif
20507
20508 void __cpuinit x86_configure_nx(void)
20509 {
20510 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20511 if (cpu_has_nx && !disable_nx)
20512 __supported_pte_mask |= _PAGE_NX;
20513 else
20514 +#endif
20515 __supported_pte_mask &= ~_PAGE_NX;
20516 }
20517
20518 diff -urNp linux-3.0.3/arch/x86/mm/tlb.c linux-3.0.3/arch/x86/mm/tlb.c
20519 --- linux-3.0.3/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20520 +++ linux-3.0.3/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20521 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20522 BUG();
20523 cpumask_clear_cpu(cpu,
20524 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20525 +
20526 +#ifndef CONFIG_PAX_PER_CPU_PGD
20527 load_cr3(swapper_pg_dir);
20528 +#endif
20529 +
20530 }
20531 EXPORT_SYMBOL_GPL(leave_mm);
20532
20533 diff -urNp linux-3.0.3/arch/x86/net/bpf_jit_comp.c linux-3.0.3/arch/x86/net/bpf_jit_comp.c
20534 --- linux-3.0.3/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20535 +++ linux-3.0.3/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20536 @@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20537 module_free(NULL, image);
20538 return;
20539 }
20540 + pax_open_kernel();
20541 memcpy(image + proglen, temp, ilen);
20542 + pax_close_kernel();
20543 }
20544 proglen += ilen;
20545 addrs[i] = proglen;
20546 @@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
20547 break;
20548 }
20549 if (proglen == oldproglen) {
20550 - image = module_alloc(max_t(unsigned int,
20551 + image = module_alloc_exec(max_t(unsigned int,
20552 proglen,
20553 sizeof(struct work_struct)));
20554 if (!image)
20555 diff -urNp linux-3.0.3/arch/x86/oprofile/backtrace.c linux-3.0.3/arch/x86/oprofile/backtrace.c
20556 --- linux-3.0.3/arch/x86/oprofile/backtrace.c 2011-08-23 21:44:40.000000000 -0400
20557 +++ linux-3.0.3/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
20558 @@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
20559 {
20560 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20561
20562 - if (!user_mode_vm(regs)) {
20563 + if (!user_mode(regs)) {
20564 unsigned long stack = kernel_stack_pointer(regs);
20565 if (depth)
20566 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20567 diff -urNp linux-3.0.3/arch/x86/pci/mrst.c linux-3.0.3/arch/x86/pci/mrst.c
20568 --- linux-3.0.3/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
20569 +++ linux-3.0.3/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
20570 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20571 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20572 pci_mmcfg_late_init();
20573 pcibios_enable_irq = mrst_pci_irq_enable;
20574 - pci_root_ops = pci_mrst_ops;
20575 + pax_open_kernel();
20576 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20577 + pax_close_kernel();
20578 /* Continue with standard init */
20579 return 1;
20580 }
20581 diff -urNp linux-3.0.3/arch/x86/pci/pcbios.c linux-3.0.3/arch/x86/pci/pcbios.c
20582 --- linux-3.0.3/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
20583 +++ linux-3.0.3/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
20584 @@ -79,50 +79,93 @@ union bios32 {
20585 static struct {
20586 unsigned long address;
20587 unsigned short segment;
20588 -} bios32_indirect = { 0, __KERNEL_CS };
20589 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20590
20591 /*
20592 * Returns the entry point for the given service, NULL on error
20593 */
20594
20595 -static unsigned long bios32_service(unsigned long service)
20596 +static unsigned long __devinit bios32_service(unsigned long service)
20597 {
20598 unsigned char return_code; /* %al */
20599 unsigned long address; /* %ebx */
20600 unsigned long length; /* %ecx */
20601 unsigned long entry; /* %edx */
20602 unsigned long flags;
20603 + struct desc_struct d, *gdt;
20604
20605 local_irq_save(flags);
20606 - __asm__("lcall *(%%edi); cld"
20607 +
20608 + gdt = get_cpu_gdt_table(smp_processor_id());
20609 +
20610 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20611 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20612 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20613 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20614 +
20615 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20616 : "=a" (return_code),
20617 "=b" (address),
20618 "=c" (length),
20619 "=d" (entry)
20620 : "0" (service),
20621 "1" (0),
20622 - "D" (&bios32_indirect));
20623 + "D" (&bios32_indirect),
20624 + "r"(__PCIBIOS_DS)
20625 + : "memory");
20626 +
20627 + pax_open_kernel();
20628 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20629 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20630 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20631 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20632 + pax_close_kernel();
20633 +
20634 local_irq_restore(flags);
20635
20636 switch (return_code) {
20637 - case 0:
20638 - return address + entry;
20639 - case 0x80: /* Not present */
20640 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20641 - return 0;
20642 - default: /* Shouldn't happen */
20643 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20644 - service, return_code);
20645 + case 0: {
20646 + int cpu;
20647 + unsigned char flags;
20648 +
20649 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20650 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20651 + printk(KERN_WARNING "bios32_service: not valid\n");
20652 return 0;
20653 + }
20654 + address = address + PAGE_OFFSET;
20655 + length += 16UL; /* some BIOSs underreport this... */
20656 + flags = 4;
20657 + if (length >= 64*1024*1024) {
20658 + length >>= PAGE_SHIFT;
20659 + flags |= 8;
20660 + }
20661 +
20662 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20663 + gdt = get_cpu_gdt_table(cpu);
20664 + pack_descriptor(&d, address, length, 0x9b, flags);
20665 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20666 + pack_descriptor(&d, address, length, 0x93, flags);
20667 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20668 + }
20669 + return entry;
20670 + }
20671 + case 0x80: /* Not present */
20672 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20673 + return 0;
20674 + default: /* Shouldn't happen */
20675 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20676 + service, return_code);
20677 + return 0;
20678 }
20679 }
20680
20681 static struct {
20682 unsigned long address;
20683 unsigned short segment;
20684 -} pci_indirect = { 0, __KERNEL_CS };
20685 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20686
20687 -static int pci_bios_present;
20688 +static int pci_bios_present __read_only;
20689
20690 static int __devinit check_pcibios(void)
20691 {
20692 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20693 unsigned long flags, pcibios_entry;
20694
20695 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20696 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20697 + pci_indirect.address = pcibios_entry;
20698
20699 local_irq_save(flags);
20700 - __asm__(
20701 - "lcall *(%%edi); cld\n\t"
20702 + __asm__("movw %w6, %%ds\n\t"
20703 + "lcall *%%ss:(%%edi); cld\n\t"
20704 + "push %%ss\n\t"
20705 + "pop %%ds\n\t"
20706 "jc 1f\n\t"
20707 "xor %%ah, %%ah\n"
20708 "1:"
20709 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20710 "=b" (ebx),
20711 "=c" (ecx)
20712 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20713 - "D" (&pci_indirect)
20714 + "D" (&pci_indirect),
20715 + "r" (__PCIBIOS_DS)
20716 : "memory");
20717 local_irq_restore(flags);
20718
20719 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20720
20721 switch (len) {
20722 case 1:
20723 - __asm__("lcall *(%%esi); cld\n\t"
20724 + __asm__("movw %w6, %%ds\n\t"
20725 + "lcall *%%ss:(%%esi); cld\n\t"
20726 + "push %%ss\n\t"
20727 + "pop %%ds\n\t"
20728 "jc 1f\n\t"
20729 "xor %%ah, %%ah\n"
20730 "1:"
20731 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20732 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20733 "b" (bx),
20734 "D" ((long)reg),
20735 - "S" (&pci_indirect));
20736 + "S" (&pci_indirect),
20737 + "r" (__PCIBIOS_DS));
20738 /*
20739 * Zero-extend the result beyond 8 bits, do not trust the
20740 * BIOS having done it:
20741 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20742 *value &= 0xff;
20743 break;
20744 case 2:
20745 - __asm__("lcall *(%%esi); cld\n\t"
20746 + __asm__("movw %w6, %%ds\n\t"
20747 + "lcall *%%ss:(%%esi); cld\n\t"
20748 + "push %%ss\n\t"
20749 + "pop %%ds\n\t"
20750 "jc 1f\n\t"
20751 "xor %%ah, %%ah\n"
20752 "1:"
20753 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20754 : "1" (PCIBIOS_READ_CONFIG_WORD),
20755 "b" (bx),
20756 "D" ((long)reg),
20757 - "S" (&pci_indirect));
20758 + "S" (&pci_indirect),
20759 + "r" (__PCIBIOS_DS));
20760 /*
20761 * Zero-extend the result beyond 16 bits, do not trust the
20762 * BIOS having done it:
20763 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20764 *value &= 0xffff;
20765 break;
20766 case 4:
20767 - __asm__("lcall *(%%esi); cld\n\t"
20768 + __asm__("movw %w6, %%ds\n\t"
20769 + "lcall *%%ss:(%%esi); cld\n\t"
20770 + "push %%ss\n\t"
20771 + "pop %%ds\n\t"
20772 "jc 1f\n\t"
20773 "xor %%ah, %%ah\n"
20774 "1:"
20775 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20776 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20777 "b" (bx),
20778 "D" ((long)reg),
20779 - "S" (&pci_indirect));
20780 + "S" (&pci_indirect),
20781 + "r" (__PCIBIOS_DS));
20782 break;
20783 }
20784
20785 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20786
20787 switch (len) {
20788 case 1:
20789 - __asm__("lcall *(%%esi); cld\n\t"
20790 + __asm__("movw %w6, %%ds\n\t"
20791 + "lcall *%%ss:(%%esi); cld\n\t"
20792 + "push %%ss\n\t"
20793 + "pop %%ds\n\t"
20794 "jc 1f\n\t"
20795 "xor %%ah, %%ah\n"
20796 "1:"
20797 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20798 "c" (value),
20799 "b" (bx),
20800 "D" ((long)reg),
20801 - "S" (&pci_indirect));
20802 + "S" (&pci_indirect),
20803 + "r" (__PCIBIOS_DS));
20804 break;
20805 case 2:
20806 - __asm__("lcall *(%%esi); cld\n\t"
20807 + __asm__("movw %w6, %%ds\n\t"
20808 + "lcall *%%ss:(%%esi); cld\n\t"
20809 + "push %%ss\n\t"
20810 + "pop %%ds\n\t"
20811 "jc 1f\n\t"
20812 "xor %%ah, %%ah\n"
20813 "1:"
20814 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20815 "c" (value),
20816 "b" (bx),
20817 "D" ((long)reg),
20818 - "S" (&pci_indirect));
20819 + "S" (&pci_indirect),
20820 + "r" (__PCIBIOS_DS));
20821 break;
20822 case 4:
20823 - __asm__("lcall *(%%esi); cld\n\t"
20824 + __asm__("movw %w6, %%ds\n\t"
20825 + "lcall *%%ss:(%%esi); cld\n\t"
20826 + "push %%ss\n\t"
20827 + "pop %%ds\n\t"
20828 "jc 1f\n\t"
20829 "xor %%ah, %%ah\n"
20830 "1:"
20831 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20832 "c" (value),
20833 "b" (bx),
20834 "D" ((long)reg),
20835 - "S" (&pci_indirect));
20836 + "S" (&pci_indirect),
20837 + "r" (__PCIBIOS_DS));
20838 break;
20839 }
20840
20841 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20842
20843 DBG("PCI: Fetching IRQ routing table... ");
20844 __asm__("push %%es\n\t"
20845 + "movw %w8, %%ds\n\t"
20846 "push %%ds\n\t"
20847 "pop %%es\n\t"
20848 - "lcall *(%%esi); cld\n\t"
20849 + "lcall *%%ss:(%%esi); cld\n\t"
20850 "pop %%es\n\t"
20851 + "push %%ss\n\t"
20852 + "pop %%ds\n"
20853 "jc 1f\n\t"
20854 "xor %%ah, %%ah\n"
20855 "1:"
20856 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20857 "1" (0),
20858 "D" ((long) &opt),
20859 "S" (&pci_indirect),
20860 - "m" (opt)
20861 + "m" (opt),
20862 + "r" (__PCIBIOS_DS)
20863 : "memory");
20864 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20865 if (ret & 0xff00)
20866 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20867 {
20868 int ret;
20869
20870 - __asm__("lcall *(%%esi); cld\n\t"
20871 + __asm__("movw %w5, %%ds\n\t"
20872 + "lcall *%%ss:(%%esi); cld\n\t"
20873 + "push %%ss\n\t"
20874 + "pop %%ds\n"
20875 "jc 1f\n\t"
20876 "xor %%ah, %%ah\n"
20877 "1:"
20878 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20879 : "0" (PCIBIOS_SET_PCI_HW_INT),
20880 "b" ((dev->bus->number << 8) | dev->devfn),
20881 "c" ((irq << 8) | (pin + 10)),
20882 - "S" (&pci_indirect));
20883 + "S" (&pci_indirect),
20884 + "r" (__PCIBIOS_DS));
20885 return !(ret & 0xff00);
20886 }
20887 EXPORT_SYMBOL(pcibios_set_irq_routing);
20888 diff -urNp linux-3.0.3/arch/x86/platform/efi/efi_32.c linux-3.0.3/arch/x86/platform/efi/efi_32.c
20889 --- linux-3.0.3/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
20890 +++ linux-3.0.3/arch/x86/platform/efi/efi_32.c 2011-08-23 21:47:55.000000000 -0400
20891 @@ -38,70 +38,37 @@
20892 */
20893
20894 static unsigned long efi_rt_eflags;
20895 -static pgd_t efi_bak_pg_dir_pointer[2];
20896 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
20897
20898 -void efi_call_phys_prelog(void)
20899 +void __init efi_call_phys_prelog(void)
20900 {
20901 - unsigned long cr4;
20902 - unsigned long temp;
20903 struct desc_ptr gdt_descr;
20904
20905 local_irq_save(efi_rt_eflags);
20906
20907 - /*
20908 - * If I don't have PAE, I should just duplicate two entries in page
20909 - * directory. If I have PAE, I just need to duplicate one entry in
20910 - * page directory.
20911 - */
20912 - cr4 = read_cr4_safe();
20913 -
20914 - if (cr4 & X86_CR4_PAE) {
20915 - efi_bak_pg_dir_pointer[0].pgd =
20916 - swapper_pg_dir[pgd_index(0)].pgd;
20917 - swapper_pg_dir[0].pgd =
20918 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20919 - } else {
20920 - efi_bak_pg_dir_pointer[0].pgd =
20921 - swapper_pg_dir[pgd_index(0)].pgd;
20922 - efi_bak_pg_dir_pointer[1].pgd =
20923 - swapper_pg_dir[pgd_index(0x400000)].pgd;
20924 - swapper_pg_dir[pgd_index(0)].pgd =
20925 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20926 - temp = PAGE_OFFSET + 0x400000;
20927 - swapper_pg_dir[pgd_index(0x400000)].pgd =
20928 - swapper_pg_dir[pgd_index(temp)].pgd;
20929 - }
20930 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
20931 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20932 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20933
20934 /*
20935 * After the lock is released, the original page table is restored.
20936 */
20937 __flush_tlb_all();
20938
20939 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
20940 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
20941 gdt_descr.size = GDT_SIZE - 1;
20942 load_gdt(&gdt_descr);
20943 }
20944
20945 -void efi_call_phys_epilog(void)
20946 +void __init efi_call_phys_epilog(void)
20947 {
20948 - unsigned long cr4;
20949 struct desc_ptr gdt_descr;
20950
20951 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
20952 + gdt_descr.address = get_cpu_gdt_table(0);
20953 gdt_descr.size = GDT_SIZE - 1;
20954 load_gdt(&gdt_descr);
20955
20956 - cr4 = read_cr4_safe();
20957 -
20958 - if (cr4 & X86_CR4_PAE) {
20959 - swapper_pg_dir[pgd_index(0)].pgd =
20960 - efi_bak_pg_dir_pointer[0].pgd;
20961 - } else {
20962 - swapper_pg_dir[pgd_index(0)].pgd =
20963 - efi_bak_pg_dir_pointer[0].pgd;
20964 - swapper_pg_dir[pgd_index(0x400000)].pgd =
20965 - efi_bak_pg_dir_pointer[1].pgd;
20966 - }
20967 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
20968
20969 /*
20970 * After the lock is released, the original page table is restored.
20971 diff -urNp linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S
20972 --- linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
20973 +++ linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S 2011-08-23 21:47:55.000000000 -0400
20974 @@ -6,6 +6,7 @@
20975 */
20976
20977 #include <linux/linkage.h>
20978 +#include <linux/init.h>
20979 #include <asm/page_types.h>
20980
20981 /*
20982 @@ -20,7 +21,7 @@
20983 * service functions will comply with gcc calling convention, too.
20984 */
20985
20986 -.text
20987 +__INIT
20988 ENTRY(efi_call_phys)
20989 /*
20990 * 0. The function can only be called in Linux kernel. So CS has been
20991 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
20992 * The mapping of lower virtual memory has been created in prelog and
20993 * epilog.
20994 */
20995 - movl $1f, %edx
20996 - subl $__PAGE_OFFSET, %edx
20997 - jmp *%edx
20998 + jmp 1f-__PAGE_OFFSET
20999 1:
21000
21001 /*
21002 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
21003 * parameter 2, ..., param n. To make things easy, we save the return
21004 * address of efi_call_phys in a global variable.
21005 */
21006 - popl %edx
21007 - movl %edx, saved_return_addr
21008 - /* get the function pointer into ECX*/
21009 - popl %ecx
21010 - movl %ecx, efi_rt_function_ptr
21011 - movl $2f, %edx
21012 - subl $__PAGE_OFFSET, %edx
21013 - pushl %edx
21014 + popl (saved_return_addr)
21015 + popl (efi_rt_function_ptr)
21016
21017 /*
21018 * 3. Clear PG bit in %CR0.
21019 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21020 /*
21021 * 5. Call the physical function.
21022 */
21023 - jmp *%ecx
21024 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21025
21026 -2:
21027 /*
21028 * 6. After EFI runtime service returns, control will return to
21029 * following instruction. We'd better readjust stack pointer first.
21030 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21031 movl %cr0, %edx
21032 orl $0x80000000, %edx
21033 movl %edx, %cr0
21034 - jmp 1f
21035 -1:
21036 +
21037 /*
21038 * 8. Now restore the virtual mode from flat mode by
21039 * adding EIP with PAGE_OFFSET.
21040 */
21041 - movl $1f, %edx
21042 - jmp *%edx
21043 + jmp 1f+__PAGE_OFFSET
21044 1:
21045
21046 /*
21047 * 9. Balance the stack. And because EAX contain the return value,
21048 * we'd better not clobber it.
21049 */
21050 - leal efi_rt_function_ptr, %edx
21051 - movl (%edx), %ecx
21052 - pushl %ecx
21053 + pushl (efi_rt_function_ptr)
21054
21055 /*
21056 - * 10. Push the saved return address onto the stack and return.
21057 + * 10. Return to the saved return address.
21058 */
21059 - leal saved_return_addr, %edx
21060 - movl (%edx), %ecx
21061 - pushl %ecx
21062 - ret
21063 + jmpl *(saved_return_addr)
21064 ENDPROC(efi_call_phys)
21065 .previous
21066
21067 -.data
21068 +__INITDATA
21069 saved_return_addr:
21070 .long 0
21071 efi_rt_function_ptr:
21072 diff -urNp linux-3.0.3/arch/x86/platform/mrst/mrst.c linux-3.0.3/arch/x86/platform/mrst/mrst.c
21073 --- linux-3.0.3/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21074 +++ linux-3.0.3/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21075 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21076 }
21077
21078 /* Reboot and power off are handled by the SCU on a MID device */
21079 -static void mrst_power_off(void)
21080 +static __noreturn void mrst_power_off(void)
21081 {
21082 intel_scu_ipc_simple_command(0xf1, 1);
21083 + BUG();
21084 }
21085
21086 -static void mrst_reboot(void)
21087 +static __noreturn void mrst_reboot(void)
21088 {
21089 intel_scu_ipc_simple_command(0xf1, 0);
21090 + BUG();
21091 }
21092
21093 /*
21094 diff -urNp linux-3.0.3/arch/x86/platform/olpc/olpc_dt.c linux-3.0.3/arch/x86/platform/olpc/olpc_dt.c
21095 --- linux-3.0.3/arch/x86/platform/olpc/olpc_dt.c 2011-07-21 22:17:23.000000000 -0400
21096 +++ linux-3.0.3/arch/x86/platform/olpc/olpc_dt.c 2011-08-29 22:31:19.000000000 -0400
21097 @@ -163,7 +163,7 @@ static struct of_pdt_ops prom_olpc_ops _
21098 .getchild = olpc_dt_getchild,
21099 .getsibling = olpc_dt_getsibling,
21100 .pkg2path = olpc_dt_pkg2path,
21101 -};
21102 +} __no_const;
21103
21104 void __init olpc_dt_build_devicetree(void)
21105 {
21106 diff -urNp linux-3.0.3/arch/x86/platform/uv/tlb_uv.c linux-3.0.3/arch/x86/platform/uv/tlb_uv.c
21107 --- linux-3.0.3/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21108 +++ linux-3.0.3/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21109 @@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21110 cpumask_t mask;
21111 struct reset_args reset_args;
21112
21113 + pax_track_stack();
21114 +
21115 reset_args.sender = sender;
21116 cpus_clear(mask);
21117 /* find a single cpu for each uvhub in this distribution mask */
21118 diff -urNp linux-3.0.3/arch/x86/power/cpu.c linux-3.0.3/arch/x86/power/cpu.c
21119 --- linux-3.0.3/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21120 +++ linux-3.0.3/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21121 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21122 static void fix_processor_context(void)
21123 {
21124 int cpu = smp_processor_id();
21125 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21126 + struct tss_struct *t = init_tss + cpu;
21127
21128 set_tss_desc(cpu, t); /*
21129 * This just modifies memory; should not be
21130 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21131 */
21132
21133 #ifdef CONFIG_X86_64
21134 + pax_open_kernel();
21135 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21136 + pax_close_kernel();
21137
21138 syscall_init(); /* This sets MSR_*STAR and related */
21139 #endif
21140 diff -urNp linux-3.0.3/arch/x86/vdso/Makefile linux-3.0.3/arch/x86/vdso/Makefile
21141 --- linux-3.0.3/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21142 +++ linux-3.0.3/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21143 @@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21144 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21145 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21146
21147 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21148 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21149 GCOV_PROFILE := n
21150
21151 #
21152 diff -urNp linux-3.0.3/arch/x86/vdso/vdso32-setup.c linux-3.0.3/arch/x86/vdso/vdso32-setup.c
21153 --- linux-3.0.3/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21154 +++ linux-3.0.3/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21155 @@ -25,6 +25,7 @@
21156 #include <asm/tlbflush.h>
21157 #include <asm/vdso.h>
21158 #include <asm/proto.h>
21159 +#include <asm/mman.h>
21160
21161 enum {
21162 VDSO_DISABLED = 0,
21163 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21164 void enable_sep_cpu(void)
21165 {
21166 int cpu = get_cpu();
21167 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21168 + struct tss_struct *tss = init_tss + cpu;
21169
21170 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21171 put_cpu();
21172 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21173 gate_vma.vm_start = FIXADDR_USER_START;
21174 gate_vma.vm_end = FIXADDR_USER_END;
21175 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21176 - gate_vma.vm_page_prot = __P101;
21177 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21178 /*
21179 * Make sure the vDSO gets into every core dump.
21180 * Dumping its contents makes post-mortem fully interpretable later
21181 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21182 if (compat)
21183 addr = VDSO_HIGH_BASE;
21184 else {
21185 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21186 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21187 if (IS_ERR_VALUE(addr)) {
21188 ret = addr;
21189 goto up_fail;
21190 }
21191 }
21192
21193 - current->mm->context.vdso = (void *)addr;
21194 + current->mm->context.vdso = addr;
21195
21196 if (compat_uses_vma || !compat) {
21197 /*
21198 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21199 }
21200
21201 current_thread_info()->sysenter_return =
21202 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21203 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21204
21205 up_fail:
21206 if (ret)
21207 - current->mm->context.vdso = NULL;
21208 + current->mm->context.vdso = 0;
21209
21210 up_write(&mm->mmap_sem);
21211
21212 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21213
21214 const char *arch_vma_name(struct vm_area_struct *vma)
21215 {
21216 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21217 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21218 return "[vdso]";
21219 +
21220 +#ifdef CONFIG_PAX_SEGMEXEC
21221 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21222 + return "[vdso]";
21223 +#endif
21224 +
21225 return NULL;
21226 }
21227
21228 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21229 * Check to see if the corresponding task was created in compat vdso
21230 * mode.
21231 */
21232 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21233 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21234 return &gate_vma;
21235 return NULL;
21236 }
21237 diff -urNp linux-3.0.3/arch/x86/vdso/vma.c linux-3.0.3/arch/x86/vdso/vma.c
21238 --- linux-3.0.3/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21239 +++ linux-3.0.3/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21240 @@ -15,18 +15,19 @@
21241 #include <asm/proto.h>
21242 #include <asm/vdso.h>
21243
21244 -unsigned int __read_mostly vdso_enabled = 1;
21245 -
21246 extern char vdso_start[], vdso_end[];
21247 extern unsigned short vdso_sync_cpuid;
21248 +extern char __vsyscall_0;
21249
21250 static struct page **vdso_pages;
21251 +static struct page *vsyscall_page;
21252 static unsigned vdso_size;
21253
21254 static int __init init_vdso_vars(void)
21255 {
21256 - int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21257 - int i;
21258 + size_t nbytes = vdso_end - vdso_start;
21259 + size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21260 + size_t i;
21261
21262 vdso_size = npages << PAGE_SHIFT;
21263 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21264 @@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21265 goto oom;
21266 for (i = 0; i < npages; i++) {
21267 struct page *p;
21268 - p = alloc_page(GFP_KERNEL);
21269 + p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21270 if (!p)
21271 goto oom;
21272 vdso_pages[i] = p;
21273 - copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21274 + memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21275 + nbytes -= PAGE_SIZE;
21276 }
21277 + vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21278
21279 return 0;
21280
21281 oom:
21282 - printk("Cannot allocate vdso\n");
21283 - vdso_enabled = 0;
21284 - return -ENOMEM;
21285 + panic("Cannot allocate vdso\n");
21286 }
21287 subsys_initcall(init_vdso_vars);
21288
21289 @@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21290 unsigned long addr;
21291 int ret;
21292
21293 - if (!vdso_enabled)
21294 - return 0;
21295 -
21296 down_write(&mm->mmap_sem);
21297 - addr = vdso_addr(mm->start_stack, vdso_size);
21298 - addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21299 + addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21300 + addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21301 if (IS_ERR_VALUE(addr)) {
21302 ret = addr;
21303 goto up_fail;
21304 }
21305
21306 - current->mm->context.vdso = (void *)addr;
21307 + mm->context.vdso = addr + PAGE_SIZE;
21308
21309 - ret = install_special_mapping(mm, addr, vdso_size,
21310 + ret = install_special_mapping(mm, addr, PAGE_SIZE,
21311 VM_READ|VM_EXEC|
21312 - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21313 + VM_MAYREAD|VM_MAYEXEC|
21314 VM_ALWAYSDUMP,
21315 - vdso_pages);
21316 + &vsyscall_page);
21317 if (ret) {
21318 - current->mm->context.vdso = NULL;
21319 + mm->context.vdso = 0;
21320 goto up_fail;
21321 }
21322
21323 + ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21324 + VM_READ|VM_EXEC|
21325 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21326 + VM_ALWAYSDUMP,
21327 + vdso_pages);
21328 + if (ret)
21329 + mm->context.vdso = 0;
21330 +
21331 up_fail:
21332 up_write(&mm->mmap_sem);
21333 return ret;
21334 }
21335 -
21336 -static __init int vdso_setup(char *s)
21337 -{
21338 - vdso_enabled = simple_strtoul(s, NULL, 0);
21339 - return 0;
21340 -}
21341 -__setup("vdso=", vdso_setup);
21342 diff -urNp linux-3.0.3/arch/x86/xen/enlighten.c linux-3.0.3/arch/x86/xen/enlighten.c
21343 --- linux-3.0.3/arch/x86/xen/enlighten.c 2011-08-23 21:44:40.000000000 -0400
21344 +++ linux-3.0.3/arch/x86/xen/enlighten.c 2011-08-23 21:47:55.000000000 -0400
21345 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21346
21347 struct shared_info xen_dummy_shared_info;
21348
21349 -void *xen_initial_gdt;
21350 -
21351 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21352 __read_mostly int xen_have_vector_callback;
21353 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21354 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21355 #endif
21356 };
21357
21358 -static void xen_reboot(int reason)
21359 +static __noreturn void xen_reboot(int reason)
21360 {
21361 struct sched_shutdown r = { .reason = reason };
21362
21363 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21364 BUG();
21365 }
21366
21367 -static void xen_restart(char *msg)
21368 +static __noreturn void xen_restart(char *msg)
21369 {
21370 xen_reboot(SHUTDOWN_reboot);
21371 }
21372
21373 -static void xen_emergency_restart(void)
21374 +static __noreturn void xen_emergency_restart(void)
21375 {
21376 xen_reboot(SHUTDOWN_reboot);
21377 }
21378
21379 -static void xen_machine_halt(void)
21380 +static __noreturn void xen_machine_halt(void)
21381 {
21382 xen_reboot(SHUTDOWN_poweroff);
21383 }
21384 @@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21385 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21386
21387 /* Work out if we support NX */
21388 - x86_configure_nx();
21389 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21390 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21391 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21392 + unsigned l, h;
21393 +
21394 + __supported_pte_mask |= _PAGE_NX;
21395 + rdmsr(MSR_EFER, l, h);
21396 + l |= EFER_NX;
21397 + wrmsr(MSR_EFER, l, h);
21398 + }
21399 +#endif
21400
21401 xen_setup_features();
21402
21403 @@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21404
21405 machine_ops = xen_machine_ops;
21406
21407 - /*
21408 - * The only reliable way to retain the initial address of the
21409 - * percpu gdt_page is to remember it here, so we can go and
21410 - * mark it RW later, when the initial percpu area is freed.
21411 - */
21412 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21413 -
21414 xen_smp_init();
21415
21416 #ifdef CONFIG_ACPI_NUMA
21417 diff -urNp linux-3.0.3/arch/x86/xen/mmu.c linux-3.0.3/arch/x86/xen/mmu.c
21418 --- linux-3.0.3/arch/x86/xen/mmu.c 2011-07-21 22:17:23.000000000 -0400
21419 +++ linux-3.0.3/arch/x86/xen/mmu.c 2011-08-24 18:10:12.000000000 -0400
21420 @@ -1679,6 +1679,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21421 convert_pfn_mfn(init_level4_pgt);
21422 convert_pfn_mfn(level3_ident_pgt);
21423 convert_pfn_mfn(level3_kernel_pgt);
21424 + convert_pfn_mfn(level3_vmalloc_pgt);
21425 + convert_pfn_mfn(level3_vmemmap_pgt);
21426
21427 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21428 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21429 @@ -1697,7 +1699,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21430 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21431 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21432 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21433 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21434 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21435 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21436 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21437 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21438 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21439
21440 @@ -1909,6 +1914,7 @@ static void __init xen_post_allocator_in
21441 pv_mmu_ops.set_pud = xen_set_pud;
21442 #if PAGETABLE_LEVELS == 4
21443 pv_mmu_ops.set_pgd = xen_set_pgd;
21444 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21445 #endif
21446
21447 /* This will work as long as patching hasn't happened yet
21448 @@ -1990,6 +1996,7 @@ static const struct pv_mmu_ops xen_mmu_o
21449 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21450 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21451 .set_pgd = xen_set_pgd_hyper,
21452 + .set_pgd_batched = xen_set_pgd_hyper,
21453
21454 .alloc_pud = xen_alloc_pmd_init,
21455 .release_pud = xen_release_pmd_init,
21456 diff -urNp linux-3.0.3/arch/x86/xen/smp.c linux-3.0.3/arch/x86/xen/smp.c
21457 --- linux-3.0.3/arch/x86/xen/smp.c 2011-07-21 22:17:23.000000000 -0400
21458 +++ linux-3.0.3/arch/x86/xen/smp.c 2011-08-23 21:47:55.000000000 -0400
21459 @@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
21460 {
21461 BUG_ON(smp_processor_id() != 0);
21462 native_smp_prepare_boot_cpu();
21463 -
21464 - /* We've switched to the "real" per-cpu gdt, so make sure the
21465 - old memory can be recycled */
21466 - make_lowmem_page_readwrite(xen_initial_gdt);
21467 -
21468 xen_filter_cpu_maps();
21469 xen_setup_vcpu_info_placement();
21470 }
21471 @@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
21472 gdt = get_cpu_gdt_table(cpu);
21473
21474 ctxt->flags = VGCF_IN_KERNEL;
21475 - ctxt->user_regs.ds = __USER_DS;
21476 - ctxt->user_regs.es = __USER_DS;
21477 + ctxt->user_regs.ds = __KERNEL_DS;
21478 + ctxt->user_regs.es = __KERNEL_DS;
21479 ctxt->user_regs.ss = __KERNEL_DS;
21480 #ifdef CONFIG_X86_32
21481 ctxt->user_regs.fs = __KERNEL_PERCPU;
21482 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21483 + savesegment(gs, ctxt->user_regs.gs);
21484 #else
21485 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21486 #endif
21487 @@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
21488 int rc;
21489
21490 per_cpu(current_task, cpu) = idle;
21491 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21492 #ifdef CONFIG_X86_32
21493 irq_ctx_init(cpu);
21494 #else
21495 clear_tsk_thread_flag(idle, TIF_FORK);
21496 - per_cpu(kernel_stack, cpu) =
21497 - (unsigned long)task_stack_page(idle) -
21498 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21499 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21500 #endif
21501 xen_setup_runstate_info(cpu);
21502 xen_setup_timer(cpu);
21503 diff -urNp linux-3.0.3/arch/x86/xen/xen-asm_32.S linux-3.0.3/arch/x86/xen/xen-asm_32.S
21504 --- linux-3.0.3/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
21505 +++ linux-3.0.3/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
21506 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21507 ESP_OFFSET=4 # bytes pushed onto stack
21508
21509 /*
21510 - * Store vcpu_info pointer for easy access. Do it this way to
21511 - * avoid having to reload %fs
21512 + * Store vcpu_info pointer for easy access.
21513 */
21514 #ifdef CONFIG_SMP
21515 - GET_THREAD_INFO(%eax)
21516 - movl TI_cpu(%eax), %eax
21517 - movl __per_cpu_offset(,%eax,4), %eax
21518 - mov xen_vcpu(%eax), %eax
21519 + push %fs
21520 + mov $(__KERNEL_PERCPU), %eax
21521 + mov %eax, %fs
21522 + mov PER_CPU_VAR(xen_vcpu), %eax
21523 + pop %fs
21524 #else
21525 movl xen_vcpu, %eax
21526 #endif
21527 diff -urNp linux-3.0.3/arch/x86/xen/xen-head.S linux-3.0.3/arch/x86/xen/xen-head.S
21528 --- linux-3.0.3/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
21529 +++ linux-3.0.3/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
21530 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21531 #ifdef CONFIG_X86_32
21532 mov %esi,xen_start_info
21533 mov $init_thread_union+THREAD_SIZE,%esp
21534 +#ifdef CONFIG_SMP
21535 + movl $cpu_gdt_table,%edi
21536 + movl $__per_cpu_load,%eax
21537 + movw %ax,__KERNEL_PERCPU + 2(%edi)
21538 + rorl $16,%eax
21539 + movb %al,__KERNEL_PERCPU + 4(%edi)
21540 + movb %ah,__KERNEL_PERCPU + 7(%edi)
21541 + movl $__per_cpu_end - 1,%eax
21542 + subl $__per_cpu_start,%eax
21543 + movw %ax,__KERNEL_PERCPU + 0(%edi)
21544 +#endif
21545 #else
21546 mov %rsi,xen_start_info
21547 mov $init_thread_union+THREAD_SIZE,%rsp
21548 diff -urNp linux-3.0.3/arch/x86/xen/xen-ops.h linux-3.0.3/arch/x86/xen/xen-ops.h
21549 --- linux-3.0.3/arch/x86/xen/xen-ops.h 2011-08-23 21:44:40.000000000 -0400
21550 +++ linux-3.0.3/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
21551 @@ -10,8 +10,6 @@
21552 extern const char xen_hypervisor_callback[];
21553 extern const char xen_failsafe_callback[];
21554
21555 -extern void *xen_initial_gdt;
21556 -
21557 struct trap_info;
21558 void xen_copy_trap_info(struct trap_info *traps);
21559
21560 diff -urNp linux-3.0.3/block/blk-iopoll.c linux-3.0.3/block/blk-iopoll.c
21561 --- linux-3.0.3/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
21562 +++ linux-3.0.3/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
21563 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21564 }
21565 EXPORT_SYMBOL(blk_iopoll_complete);
21566
21567 -static void blk_iopoll_softirq(struct softirq_action *h)
21568 +static void blk_iopoll_softirq(void)
21569 {
21570 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21571 int rearm = 0, budget = blk_iopoll_budget;
21572 diff -urNp linux-3.0.3/block/blk-map.c linux-3.0.3/block/blk-map.c
21573 --- linux-3.0.3/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
21574 +++ linux-3.0.3/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
21575 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21576 if (!len || !kbuf)
21577 return -EINVAL;
21578
21579 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21580 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21581 if (do_copy)
21582 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21583 else
21584 diff -urNp linux-3.0.3/block/blk-softirq.c linux-3.0.3/block/blk-softirq.c
21585 --- linux-3.0.3/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
21586 +++ linux-3.0.3/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
21587 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21588 * Softirq action handler - move entries to local list and loop over them
21589 * while passing them to the queue registered handler.
21590 */
21591 -static void blk_done_softirq(struct softirq_action *h)
21592 +static void blk_done_softirq(void)
21593 {
21594 struct list_head *cpu_list, local_list;
21595
21596 diff -urNp linux-3.0.3/block/bsg.c linux-3.0.3/block/bsg.c
21597 --- linux-3.0.3/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
21598 +++ linux-3.0.3/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
21599 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21600 struct sg_io_v4 *hdr, struct bsg_device *bd,
21601 fmode_t has_write_perm)
21602 {
21603 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21604 + unsigned char *cmdptr;
21605 +
21606 if (hdr->request_len > BLK_MAX_CDB) {
21607 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21608 if (!rq->cmd)
21609 return -ENOMEM;
21610 - }
21611 + cmdptr = rq->cmd;
21612 + } else
21613 + cmdptr = tmpcmd;
21614
21615 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21616 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21617 hdr->request_len))
21618 return -EFAULT;
21619
21620 + if (cmdptr != rq->cmd)
21621 + memcpy(rq->cmd, cmdptr, hdr->request_len);
21622 +
21623 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21624 if (blk_verify_command(rq->cmd, has_write_perm))
21625 return -EPERM;
21626 diff -urNp linux-3.0.3/block/scsi_ioctl.c linux-3.0.3/block/scsi_ioctl.c
21627 --- linux-3.0.3/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
21628 +++ linux-3.0.3/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
21629 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21630 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21631 struct sg_io_hdr *hdr, fmode_t mode)
21632 {
21633 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21634 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21635 + unsigned char *cmdptr;
21636 +
21637 + if (rq->cmd != rq->__cmd)
21638 + cmdptr = rq->cmd;
21639 + else
21640 + cmdptr = tmpcmd;
21641 +
21642 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21643 return -EFAULT;
21644 +
21645 + if (cmdptr != rq->cmd)
21646 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21647 +
21648 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21649 return -EPERM;
21650
21651 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21652 int err;
21653 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21654 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21655 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21656 + unsigned char *cmdptr;
21657
21658 if (!sic)
21659 return -EINVAL;
21660 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21661 */
21662 err = -EFAULT;
21663 rq->cmd_len = cmdlen;
21664 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
21665 +
21666 + if (rq->cmd != rq->__cmd)
21667 + cmdptr = rq->cmd;
21668 + else
21669 + cmdptr = tmpcmd;
21670 +
21671 + if (copy_from_user(cmdptr, sic->data, cmdlen))
21672 goto error;
21673
21674 + if (rq->cmd != cmdptr)
21675 + memcpy(rq->cmd, cmdptr, cmdlen);
21676 +
21677 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21678 goto error;
21679
21680 diff -urNp linux-3.0.3/crypto/cryptd.c linux-3.0.3/crypto/cryptd.c
21681 --- linux-3.0.3/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
21682 +++ linux-3.0.3/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
21683 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21684
21685 struct cryptd_blkcipher_request_ctx {
21686 crypto_completion_t complete;
21687 -};
21688 +} __no_const;
21689
21690 struct cryptd_hash_ctx {
21691 struct crypto_shash *child;
21692 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21693
21694 struct cryptd_aead_request_ctx {
21695 crypto_completion_t complete;
21696 -};
21697 +} __no_const;
21698
21699 static void cryptd_queue_worker(struct work_struct *work);
21700
21701 diff -urNp linux-3.0.3/crypto/gf128mul.c linux-3.0.3/crypto/gf128mul.c
21702 --- linux-3.0.3/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
21703 +++ linux-3.0.3/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
21704 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21705 for (i = 0; i < 7; ++i)
21706 gf128mul_x_lle(&p[i + 1], &p[i]);
21707
21708 - memset(r, 0, sizeof(r));
21709 + memset(r, 0, sizeof(*r));
21710 for (i = 0;;) {
21711 u8 ch = ((u8 *)b)[15 - i];
21712
21713 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21714 for (i = 0; i < 7; ++i)
21715 gf128mul_x_bbe(&p[i + 1], &p[i]);
21716
21717 - memset(r, 0, sizeof(r));
21718 + memset(r, 0, sizeof(*r));
21719 for (i = 0;;) {
21720 u8 ch = ((u8 *)b)[i];
21721
21722 diff -urNp linux-3.0.3/crypto/serpent.c linux-3.0.3/crypto/serpent.c
21723 --- linux-3.0.3/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
21724 +++ linux-3.0.3/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
21725 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21726 u32 r0,r1,r2,r3,r4;
21727 int i;
21728
21729 + pax_track_stack();
21730 +
21731 /* Copy key, add padding */
21732
21733 for (i = 0; i < keylen; ++i)
21734 diff -urNp linux-3.0.3/Documentation/dontdiff linux-3.0.3/Documentation/dontdiff
21735 --- linux-3.0.3/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
21736 +++ linux-3.0.3/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
21737 @@ -5,6 +5,7 @@
21738 *.cis
21739 *.cpio
21740 *.csp
21741 +*.dbg
21742 *.dsp
21743 *.dvi
21744 *.elf
21745 @@ -48,9 +49,11 @@
21746 *.tab.h
21747 *.tex
21748 *.ver
21749 +*.vim
21750 *.xml
21751 *.xz
21752 *_MODULES
21753 +*_reg_safe.h
21754 *_vga16.c
21755 *~
21756 \#*#
21757 @@ -70,6 +73,7 @@ Kerntypes
21758 Module.markers
21759 Module.symvers
21760 PENDING
21761 +PERF*
21762 SCCS
21763 System.map*
21764 TAGS
21765 @@ -98,6 +102,8 @@ bzImage*
21766 capability_names.h
21767 capflags.c
21768 classlist.h*
21769 +clut_vga16.c
21770 +common-cmds.h
21771 comp*.log
21772 compile.h*
21773 conf
21774 @@ -126,12 +132,14 @@ fore200e_pca_fw.c*
21775 gconf
21776 gconf.glade.h
21777 gen-devlist
21778 +gen-kdb_cmds.c
21779 gen_crc32table
21780 gen_init_cpio
21781 generated
21782 genheaders
21783 genksyms
21784 *_gray256.c
21785 +hash
21786 hpet_example
21787 hugepage-mmap
21788 hugepage-shm
21789 @@ -146,7 +154,6 @@ int32.c
21790 int4.c
21791 int8.c
21792 kallsyms
21793 -kconfig
21794 keywords.c
21795 ksym.c*
21796 ksym.h*
21797 @@ -154,7 +161,6 @@ kxgettext
21798 lkc_defs.h
21799 lex.c
21800 lex.*.c
21801 -linux
21802 logo_*.c
21803 logo_*_clut224.c
21804 logo_*_mono.c
21805 @@ -174,6 +180,7 @@ mkboot
21806 mkbugboot
21807 mkcpustr
21808 mkdep
21809 +mkpiggy
21810 mkprep
21811 mkregtable
21812 mktables
21813 @@ -209,6 +216,7 @@ r300_reg_safe.h
21814 r420_reg_safe.h
21815 r600_reg_safe.h
21816 recordmcount
21817 +regdb.c
21818 relocs
21819 rlim_names.h
21820 rn50_reg_safe.h
21821 @@ -219,6 +227,7 @@ setup
21822 setup.bin
21823 setup.elf
21824 sImage
21825 +slabinfo
21826 sm_tbl*
21827 split-include
21828 syscalltab.h
21829 @@ -246,7 +255,9 @@ vmlinux
21830 vmlinux-*
21831 vmlinux.aout
21832 vmlinux.bin.all
21833 +vmlinux.bin.bz2
21834 vmlinux.lds
21835 +vmlinux.relocs
21836 vmlinuz
21837 voffset.h
21838 vsyscall.lds
21839 @@ -254,6 +265,7 @@ vsyscall_32.lds
21840 wanxlfw.inc
21841 uImage
21842 unifdef
21843 +utsrelease.h
21844 wakeup.bin
21845 wakeup.elf
21846 wakeup.lds
21847 diff -urNp linux-3.0.3/Documentation/kernel-parameters.txt linux-3.0.3/Documentation/kernel-parameters.txt
21848 --- linux-3.0.3/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
21849 +++ linux-3.0.3/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
21850 @@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
21851 the specified number of seconds. This is to be used if
21852 your oopses keep scrolling off the screen.
21853
21854 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
21855 + virtualization environments that don't cope well with the
21856 + expand down segment used by UDEREF on X86-32 or the frequent
21857 + page table updates on X86-64.
21858 +
21859 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
21860 +
21861 pcbit= [HW,ISDN]
21862
21863 pcd. [PARIDE]
21864 diff -urNp linux-3.0.3/drivers/acpi/apei/cper.c linux-3.0.3/drivers/acpi/apei/cper.c
21865 --- linux-3.0.3/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
21866 +++ linux-3.0.3/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
21867 @@ -38,12 +38,12 @@
21868 */
21869 u64 cper_next_record_id(void)
21870 {
21871 - static atomic64_t seq;
21872 + static atomic64_unchecked_t seq;
21873
21874 - if (!atomic64_read(&seq))
21875 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
21876 + if (!atomic64_read_unchecked(&seq))
21877 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
21878
21879 - return atomic64_inc_return(&seq);
21880 + return atomic64_inc_return_unchecked(&seq);
21881 }
21882 EXPORT_SYMBOL_GPL(cper_next_record_id);
21883
21884 diff -urNp linux-3.0.3/drivers/acpi/ec_sys.c linux-3.0.3/drivers/acpi/ec_sys.c
21885 --- linux-3.0.3/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
21886 +++ linux-3.0.3/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
21887 @@ -11,6 +11,7 @@
21888 #include <linux/kernel.h>
21889 #include <linux/acpi.h>
21890 #include <linux/debugfs.h>
21891 +#include <asm/uaccess.h>
21892 #include "internal.h"
21893
21894 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
21895 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
21896 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
21897 */
21898 unsigned int size = EC_SPACE_SIZE;
21899 - u8 *data = (u8 *) buf;
21900 + u8 data;
21901 loff_t init_off = *off;
21902 int err = 0;
21903
21904 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
21905 size = count;
21906
21907 while (size) {
21908 - err = ec_read(*off, &data[*off - init_off]);
21909 + err = ec_read(*off, &data);
21910 if (err)
21911 return err;
21912 + if (put_user(data, &buf[*off - init_off]))
21913 + return -EFAULT;
21914 *off += 1;
21915 size--;
21916 }
21917 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
21918
21919 unsigned int size = count;
21920 loff_t init_off = *off;
21921 - u8 *data = (u8 *) buf;
21922 int err = 0;
21923
21924 if (*off >= EC_SPACE_SIZE)
21925 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
21926 }
21927
21928 while (size) {
21929 - u8 byte_write = data[*off - init_off];
21930 + u8 byte_write;
21931 + if (get_user(byte_write, &buf[*off - init_off]))
21932 + return -EFAULT;
21933 err = ec_write(*off, byte_write);
21934 if (err)
21935 return err;
21936 diff -urNp linux-3.0.3/drivers/acpi/proc.c linux-3.0.3/drivers/acpi/proc.c
21937 --- linux-3.0.3/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
21938 +++ linux-3.0.3/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
21939 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
21940 size_t count, loff_t * ppos)
21941 {
21942 struct list_head *node, *next;
21943 - char strbuf[5];
21944 - char str[5] = "";
21945 - unsigned int len = count;
21946 -
21947 - if (len > 4)
21948 - len = 4;
21949 - if (len < 0)
21950 - return -EFAULT;
21951 + char strbuf[5] = {0};
21952
21953 - if (copy_from_user(strbuf, buffer, len))
21954 + if (count > 4)
21955 + count = 4;
21956 + if (copy_from_user(strbuf, buffer, count))
21957 return -EFAULT;
21958 - strbuf[len] = '\0';
21959 - sscanf(strbuf, "%s", str);
21960 + strbuf[count] = '\0';
21961
21962 mutex_lock(&acpi_device_lock);
21963 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
21964 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
21965 if (!dev->wakeup.flags.valid)
21966 continue;
21967
21968 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
21969 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
21970 if (device_can_wakeup(&dev->dev)) {
21971 bool enable = !device_may_wakeup(&dev->dev);
21972 device_set_wakeup_enable(&dev->dev, enable);
21973 diff -urNp linux-3.0.3/drivers/acpi/processor_driver.c linux-3.0.3/drivers/acpi/processor_driver.c
21974 --- linux-3.0.3/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
21975 +++ linux-3.0.3/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
21976 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
21977 return 0;
21978 #endif
21979
21980 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
21981 + BUG_ON(pr->id >= nr_cpu_ids);
21982
21983 /*
21984 * Buggy BIOS check
21985 diff -urNp linux-3.0.3/drivers/ata/libata-core.c linux-3.0.3/drivers/ata/libata-core.c
21986 --- linux-3.0.3/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
21987 +++ linux-3.0.3/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
21988 @@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
21989 struct ata_port *ap;
21990 unsigned int tag;
21991
21992 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21993 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21994 ap = qc->ap;
21995
21996 qc->flags = 0;
21997 @@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
21998 struct ata_port *ap;
21999 struct ata_link *link;
22000
22001 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22002 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22003 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22004 ap = qc->ap;
22005 link = qc->dev->link;
22006 @@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
22007 return;
22008
22009 spin_lock(&lock);
22010 + pax_open_kernel();
22011
22012 for (cur = ops->inherits; cur; cur = cur->inherits) {
22013 void **inherit = (void **)cur;
22014 @@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
22015 if (IS_ERR(*pp))
22016 *pp = NULL;
22017
22018 - ops->inherits = NULL;
22019 + *(struct ata_port_operations **)&ops->inherits = NULL;
22020
22021 + pax_close_kernel();
22022 spin_unlock(&lock);
22023 }
22024
22025 diff -urNp linux-3.0.3/drivers/ata/libata-eh.c linux-3.0.3/drivers/ata/libata-eh.c
22026 --- linux-3.0.3/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22027 +++ linux-3.0.3/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22028 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22029 {
22030 struct ata_link *link;
22031
22032 + pax_track_stack();
22033 +
22034 ata_for_each_link(link, ap, HOST_FIRST)
22035 ata_eh_link_report(link);
22036 }
22037 diff -urNp linux-3.0.3/drivers/ata/pata_arasan_cf.c linux-3.0.3/drivers/ata/pata_arasan_cf.c
22038 --- linux-3.0.3/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22039 +++ linux-3.0.3/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22040 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22041 /* Handle platform specific quirks */
22042 if (pdata->quirk) {
22043 if (pdata->quirk & CF_BROKEN_PIO) {
22044 - ap->ops->set_piomode = NULL;
22045 + pax_open_kernel();
22046 + *(void **)&ap->ops->set_piomode = NULL;
22047 + pax_close_kernel();
22048 ap->pio_mask = 0;
22049 }
22050 if (pdata->quirk & CF_BROKEN_MWDMA)
22051 diff -urNp linux-3.0.3/drivers/atm/adummy.c linux-3.0.3/drivers/atm/adummy.c
22052 --- linux-3.0.3/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22053 +++ linux-3.0.3/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22054 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22055 vcc->pop(vcc, skb);
22056 else
22057 dev_kfree_skb_any(skb);
22058 - atomic_inc(&vcc->stats->tx);
22059 + atomic_inc_unchecked(&vcc->stats->tx);
22060
22061 return 0;
22062 }
22063 diff -urNp linux-3.0.3/drivers/atm/ambassador.c linux-3.0.3/drivers/atm/ambassador.c
22064 --- linux-3.0.3/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22065 +++ linux-3.0.3/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22066 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22067 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22068
22069 // VC layer stats
22070 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22071 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22072
22073 // free the descriptor
22074 kfree (tx_descr);
22075 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22076 dump_skb ("<<<", vc, skb);
22077
22078 // VC layer stats
22079 - atomic_inc(&atm_vcc->stats->rx);
22080 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22081 __net_timestamp(skb);
22082 // end of our responsibility
22083 atm_vcc->push (atm_vcc, skb);
22084 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22085 } else {
22086 PRINTK (KERN_INFO, "dropped over-size frame");
22087 // should we count this?
22088 - atomic_inc(&atm_vcc->stats->rx_drop);
22089 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22090 }
22091
22092 } else {
22093 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22094 }
22095
22096 if (check_area (skb->data, skb->len)) {
22097 - atomic_inc(&atm_vcc->stats->tx_err);
22098 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22099 return -ENOMEM; // ?
22100 }
22101
22102 diff -urNp linux-3.0.3/drivers/atm/atmtcp.c linux-3.0.3/drivers/atm/atmtcp.c
22103 --- linux-3.0.3/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22104 +++ linux-3.0.3/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22105 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22106 if (vcc->pop) vcc->pop(vcc,skb);
22107 else dev_kfree_skb(skb);
22108 if (dev_data) return 0;
22109 - atomic_inc(&vcc->stats->tx_err);
22110 + atomic_inc_unchecked(&vcc->stats->tx_err);
22111 return -ENOLINK;
22112 }
22113 size = skb->len+sizeof(struct atmtcp_hdr);
22114 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22115 if (!new_skb) {
22116 if (vcc->pop) vcc->pop(vcc,skb);
22117 else dev_kfree_skb(skb);
22118 - atomic_inc(&vcc->stats->tx_err);
22119 + atomic_inc_unchecked(&vcc->stats->tx_err);
22120 return -ENOBUFS;
22121 }
22122 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22123 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22124 if (vcc->pop) vcc->pop(vcc,skb);
22125 else dev_kfree_skb(skb);
22126 out_vcc->push(out_vcc,new_skb);
22127 - atomic_inc(&vcc->stats->tx);
22128 - atomic_inc(&out_vcc->stats->rx);
22129 + atomic_inc_unchecked(&vcc->stats->tx);
22130 + atomic_inc_unchecked(&out_vcc->stats->rx);
22131 return 0;
22132 }
22133
22134 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22135 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22136 read_unlock(&vcc_sklist_lock);
22137 if (!out_vcc) {
22138 - atomic_inc(&vcc->stats->tx_err);
22139 + atomic_inc_unchecked(&vcc->stats->tx_err);
22140 goto done;
22141 }
22142 skb_pull(skb,sizeof(struct atmtcp_hdr));
22143 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22144 __net_timestamp(new_skb);
22145 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22146 out_vcc->push(out_vcc,new_skb);
22147 - atomic_inc(&vcc->stats->tx);
22148 - atomic_inc(&out_vcc->stats->rx);
22149 + atomic_inc_unchecked(&vcc->stats->tx);
22150 + atomic_inc_unchecked(&out_vcc->stats->rx);
22151 done:
22152 if (vcc->pop) vcc->pop(vcc,skb);
22153 else dev_kfree_skb(skb);
22154 diff -urNp linux-3.0.3/drivers/atm/eni.c linux-3.0.3/drivers/atm/eni.c
22155 --- linux-3.0.3/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22156 +++ linux-3.0.3/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22157 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22158 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22159 vcc->dev->number);
22160 length = 0;
22161 - atomic_inc(&vcc->stats->rx_err);
22162 + atomic_inc_unchecked(&vcc->stats->rx_err);
22163 }
22164 else {
22165 length = ATM_CELL_SIZE-1; /* no HEC */
22166 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22167 size);
22168 }
22169 eff = length = 0;
22170 - atomic_inc(&vcc->stats->rx_err);
22171 + atomic_inc_unchecked(&vcc->stats->rx_err);
22172 }
22173 else {
22174 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22175 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22176 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22177 vcc->dev->number,vcc->vci,length,size << 2,descr);
22178 length = eff = 0;
22179 - atomic_inc(&vcc->stats->rx_err);
22180 + atomic_inc_unchecked(&vcc->stats->rx_err);
22181 }
22182 }
22183 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22184 @@ -771,7 +771,7 @@ rx_dequeued++;
22185 vcc->push(vcc,skb);
22186 pushed++;
22187 }
22188 - atomic_inc(&vcc->stats->rx);
22189 + atomic_inc_unchecked(&vcc->stats->rx);
22190 }
22191 wake_up(&eni_dev->rx_wait);
22192 }
22193 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22194 PCI_DMA_TODEVICE);
22195 if (vcc->pop) vcc->pop(vcc,skb);
22196 else dev_kfree_skb_irq(skb);
22197 - atomic_inc(&vcc->stats->tx);
22198 + atomic_inc_unchecked(&vcc->stats->tx);
22199 wake_up(&eni_dev->tx_wait);
22200 dma_complete++;
22201 }
22202 diff -urNp linux-3.0.3/drivers/atm/firestream.c linux-3.0.3/drivers/atm/firestream.c
22203 --- linux-3.0.3/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22204 +++ linux-3.0.3/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22205 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22206 }
22207 }
22208
22209 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22210 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22211
22212 fs_dprintk (FS_DEBUG_TXMEM, "i");
22213 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22214 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22215 #endif
22216 skb_put (skb, qe->p1 & 0xffff);
22217 ATM_SKB(skb)->vcc = atm_vcc;
22218 - atomic_inc(&atm_vcc->stats->rx);
22219 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22220 __net_timestamp(skb);
22221 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22222 atm_vcc->push (atm_vcc, skb);
22223 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22224 kfree (pe);
22225 }
22226 if (atm_vcc)
22227 - atomic_inc(&atm_vcc->stats->rx_drop);
22228 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22229 break;
22230 case 0x1f: /* Reassembly abort: no buffers. */
22231 /* Silently increment error counter. */
22232 if (atm_vcc)
22233 - atomic_inc(&atm_vcc->stats->rx_drop);
22234 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22235 break;
22236 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22237 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22238 diff -urNp linux-3.0.3/drivers/atm/fore200e.c linux-3.0.3/drivers/atm/fore200e.c
22239 --- linux-3.0.3/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22240 +++ linux-3.0.3/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22241 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22242 #endif
22243 /* check error condition */
22244 if (*entry->status & STATUS_ERROR)
22245 - atomic_inc(&vcc->stats->tx_err);
22246 + atomic_inc_unchecked(&vcc->stats->tx_err);
22247 else
22248 - atomic_inc(&vcc->stats->tx);
22249 + atomic_inc_unchecked(&vcc->stats->tx);
22250 }
22251 }
22252
22253 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22254 if (skb == NULL) {
22255 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22256
22257 - atomic_inc(&vcc->stats->rx_drop);
22258 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22259 return -ENOMEM;
22260 }
22261
22262 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22263
22264 dev_kfree_skb_any(skb);
22265
22266 - atomic_inc(&vcc->stats->rx_drop);
22267 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22268 return -ENOMEM;
22269 }
22270
22271 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22272
22273 vcc->push(vcc, skb);
22274 - atomic_inc(&vcc->stats->rx);
22275 + atomic_inc_unchecked(&vcc->stats->rx);
22276
22277 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22278
22279 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22280 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22281 fore200e->atm_dev->number,
22282 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22283 - atomic_inc(&vcc->stats->rx_err);
22284 + atomic_inc_unchecked(&vcc->stats->rx_err);
22285 }
22286 }
22287
22288 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22289 goto retry_here;
22290 }
22291
22292 - atomic_inc(&vcc->stats->tx_err);
22293 + atomic_inc_unchecked(&vcc->stats->tx_err);
22294
22295 fore200e->tx_sat++;
22296 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22297 diff -urNp linux-3.0.3/drivers/atm/he.c linux-3.0.3/drivers/atm/he.c
22298 --- linux-3.0.3/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22299 +++ linux-3.0.3/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22300 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22301
22302 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22303 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22304 - atomic_inc(&vcc->stats->rx_drop);
22305 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22306 goto return_host_buffers;
22307 }
22308
22309 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22310 RBRQ_LEN_ERR(he_dev->rbrq_head)
22311 ? "LEN_ERR" : "",
22312 vcc->vpi, vcc->vci);
22313 - atomic_inc(&vcc->stats->rx_err);
22314 + atomic_inc_unchecked(&vcc->stats->rx_err);
22315 goto return_host_buffers;
22316 }
22317
22318 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22319 vcc->push(vcc, skb);
22320 spin_lock(&he_dev->global_lock);
22321
22322 - atomic_inc(&vcc->stats->rx);
22323 + atomic_inc_unchecked(&vcc->stats->rx);
22324
22325 return_host_buffers:
22326 ++pdus_assembled;
22327 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22328 tpd->vcc->pop(tpd->vcc, tpd->skb);
22329 else
22330 dev_kfree_skb_any(tpd->skb);
22331 - atomic_inc(&tpd->vcc->stats->tx_err);
22332 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22333 }
22334 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22335 return;
22336 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22337 vcc->pop(vcc, skb);
22338 else
22339 dev_kfree_skb_any(skb);
22340 - atomic_inc(&vcc->stats->tx_err);
22341 + atomic_inc_unchecked(&vcc->stats->tx_err);
22342 return -EINVAL;
22343 }
22344
22345 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22346 vcc->pop(vcc, skb);
22347 else
22348 dev_kfree_skb_any(skb);
22349 - atomic_inc(&vcc->stats->tx_err);
22350 + atomic_inc_unchecked(&vcc->stats->tx_err);
22351 return -EINVAL;
22352 }
22353 #endif
22354 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22355 vcc->pop(vcc, skb);
22356 else
22357 dev_kfree_skb_any(skb);
22358 - atomic_inc(&vcc->stats->tx_err);
22359 + atomic_inc_unchecked(&vcc->stats->tx_err);
22360 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22361 return -ENOMEM;
22362 }
22363 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22364 vcc->pop(vcc, skb);
22365 else
22366 dev_kfree_skb_any(skb);
22367 - atomic_inc(&vcc->stats->tx_err);
22368 + atomic_inc_unchecked(&vcc->stats->tx_err);
22369 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22370 return -ENOMEM;
22371 }
22372 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22373 __enqueue_tpd(he_dev, tpd, cid);
22374 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22375
22376 - atomic_inc(&vcc->stats->tx);
22377 + atomic_inc_unchecked(&vcc->stats->tx);
22378
22379 return 0;
22380 }
22381 diff -urNp linux-3.0.3/drivers/atm/horizon.c linux-3.0.3/drivers/atm/horizon.c
22382 --- linux-3.0.3/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22383 +++ linux-3.0.3/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22384 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22385 {
22386 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22387 // VC layer stats
22388 - atomic_inc(&vcc->stats->rx);
22389 + atomic_inc_unchecked(&vcc->stats->rx);
22390 __net_timestamp(skb);
22391 // end of our responsibility
22392 vcc->push (vcc, skb);
22393 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22394 dev->tx_iovec = NULL;
22395
22396 // VC layer stats
22397 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22398 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22399
22400 // free the skb
22401 hrz_kfree_skb (skb);
22402 diff -urNp linux-3.0.3/drivers/atm/idt77252.c linux-3.0.3/drivers/atm/idt77252.c
22403 --- linux-3.0.3/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22404 +++ linux-3.0.3/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22405 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22406 else
22407 dev_kfree_skb(skb);
22408
22409 - atomic_inc(&vcc->stats->tx);
22410 + atomic_inc_unchecked(&vcc->stats->tx);
22411 }
22412
22413 atomic_dec(&scq->used);
22414 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22415 if ((sb = dev_alloc_skb(64)) == NULL) {
22416 printk("%s: Can't allocate buffers for aal0.\n",
22417 card->name);
22418 - atomic_add(i, &vcc->stats->rx_drop);
22419 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22420 break;
22421 }
22422 if (!atm_charge(vcc, sb->truesize)) {
22423 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22424 card->name);
22425 - atomic_add(i - 1, &vcc->stats->rx_drop);
22426 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22427 dev_kfree_skb(sb);
22428 break;
22429 }
22430 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22431 ATM_SKB(sb)->vcc = vcc;
22432 __net_timestamp(sb);
22433 vcc->push(vcc, sb);
22434 - atomic_inc(&vcc->stats->rx);
22435 + atomic_inc_unchecked(&vcc->stats->rx);
22436
22437 cell += ATM_CELL_PAYLOAD;
22438 }
22439 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22440 "(CDC: %08x)\n",
22441 card->name, len, rpp->len, readl(SAR_REG_CDC));
22442 recycle_rx_pool_skb(card, rpp);
22443 - atomic_inc(&vcc->stats->rx_err);
22444 + atomic_inc_unchecked(&vcc->stats->rx_err);
22445 return;
22446 }
22447 if (stat & SAR_RSQE_CRC) {
22448 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22449 recycle_rx_pool_skb(card, rpp);
22450 - atomic_inc(&vcc->stats->rx_err);
22451 + atomic_inc_unchecked(&vcc->stats->rx_err);
22452 return;
22453 }
22454 if (skb_queue_len(&rpp->queue) > 1) {
22455 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22456 RXPRINTK("%s: Can't alloc RX skb.\n",
22457 card->name);
22458 recycle_rx_pool_skb(card, rpp);
22459 - atomic_inc(&vcc->stats->rx_err);
22460 + atomic_inc_unchecked(&vcc->stats->rx_err);
22461 return;
22462 }
22463 if (!atm_charge(vcc, skb->truesize)) {
22464 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22465 __net_timestamp(skb);
22466
22467 vcc->push(vcc, skb);
22468 - atomic_inc(&vcc->stats->rx);
22469 + atomic_inc_unchecked(&vcc->stats->rx);
22470
22471 return;
22472 }
22473 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22474 __net_timestamp(skb);
22475
22476 vcc->push(vcc, skb);
22477 - atomic_inc(&vcc->stats->rx);
22478 + atomic_inc_unchecked(&vcc->stats->rx);
22479
22480 if (skb->truesize > SAR_FB_SIZE_3)
22481 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22482 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22483 if (vcc->qos.aal != ATM_AAL0) {
22484 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22485 card->name, vpi, vci);
22486 - atomic_inc(&vcc->stats->rx_drop);
22487 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22488 goto drop;
22489 }
22490
22491 if ((sb = dev_alloc_skb(64)) == NULL) {
22492 printk("%s: Can't allocate buffers for AAL0.\n",
22493 card->name);
22494 - atomic_inc(&vcc->stats->rx_err);
22495 + atomic_inc_unchecked(&vcc->stats->rx_err);
22496 goto drop;
22497 }
22498
22499 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22500 ATM_SKB(sb)->vcc = vcc;
22501 __net_timestamp(sb);
22502 vcc->push(vcc, sb);
22503 - atomic_inc(&vcc->stats->rx);
22504 + atomic_inc_unchecked(&vcc->stats->rx);
22505
22506 drop:
22507 skb_pull(queue, 64);
22508 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22509
22510 if (vc == NULL) {
22511 printk("%s: NULL connection in send().\n", card->name);
22512 - atomic_inc(&vcc->stats->tx_err);
22513 + atomic_inc_unchecked(&vcc->stats->tx_err);
22514 dev_kfree_skb(skb);
22515 return -EINVAL;
22516 }
22517 if (!test_bit(VCF_TX, &vc->flags)) {
22518 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22519 - atomic_inc(&vcc->stats->tx_err);
22520 + atomic_inc_unchecked(&vcc->stats->tx_err);
22521 dev_kfree_skb(skb);
22522 return -EINVAL;
22523 }
22524 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22525 break;
22526 default:
22527 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22528 - atomic_inc(&vcc->stats->tx_err);
22529 + atomic_inc_unchecked(&vcc->stats->tx_err);
22530 dev_kfree_skb(skb);
22531 return -EINVAL;
22532 }
22533
22534 if (skb_shinfo(skb)->nr_frags != 0) {
22535 printk("%s: No scatter-gather yet.\n", card->name);
22536 - atomic_inc(&vcc->stats->tx_err);
22537 + atomic_inc_unchecked(&vcc->stats->tx_err);
22538 dev_kfree_skb(skb);
22539 return -EINVAL;
22540 }
22541 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22542
22543 err = queue_skb(card, vc, skb, oam);
22544 if (err) {
22545 - atomic_inc(&vcc->stats->tx_err);
22546 + atomic_inc_unchecked(&vcc->stats->tx_err);
22547 dev_kfree_skb(skb);
22548 return err;
22549 }
22550 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22551 skb = dev_alloc_skb(64);
22552 if (!skb) {
22553 printk("%s: Out of memory in send_oam().\n", card->name);
22554 - atomic_inc(&vcc->stats->tx_err);
22555 + atomic_inc_unchecked(&vcc->stats->tx_err);
22556 return -ENOMEM;
22557 }
22558 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22559 diff -urNp linux-3.0.3/drivers/atm/iphase.c linux-3.0.3/drivers/atm/iphase.c
22560 --- linux-3.0.3/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
22561 +++ linux-3.0.3/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
22562 @@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
22563 status = (u_short) (buf_desc_ptr->desc_mode);
22564 if (status & (RX_CER | RX_PTE | RX_OFL))
22565 {
22566 - atomic_inc(&vcc->stats->rx_err);
22567 + atomic_inc_unchecked(&vcc->stats->rx_err);
22568 IF_ERR(printk("IA: bad packet, dropping it");)
22569 if (status & RX_CER) {
22570 IF_ERR(printk(" cause: packet CRC error\n");)
22571 @@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
22572 len = dma_addr - buf_addr;
22573 if (len > iadev->rx_buf_sz) {
22574 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22575 - atomic_inc(&vcc->stats->rx_err);
22576 + atomic_inc_unchecked(&vcc->stats->rx_err);
22577 goto out_free_desc;
22578 }
22579
22580 @@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
22581 ia_vcc = INPH_IA_VCC(vcc);
22582 if (ia_vcc == NULL)
22583 {
22584 - atomic_inc(&vcc->stats->rx_err);
22585 + atomic_inc_unchecked(&vcc->stats->rx_err);
22586 dev_kfree_skb_any(skb);
22587 atm_return(vcc, atm_guess_pdu2truesize(len));
22588 goto INCR_DLE;
22589 @@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
22590 if ((length > iadev->rx_buf_sz) || (length >
22591 (skb->len - sizeof(struct cpcs_trailer))))
22592 {
22593 - atomic_inc(&vcc->stats->rx_err);
22594 + atomic_inc_unchecked(&vcc->stats->rx_err);
22595 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22596 length, skb->len);)
22597 dev_kfree_skb_any(skb);
22598 @@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
22599
22600 IF_RX(printk("rx_dle_intr: skb push");)
22601 vcc->push(vcc,skb);
22602 - atomic_inc(&vcc->stats->rx);
22603 + atomic_inc_unchecked(&vcc->stats->rx);
22604 iadev->rx_pkt_cnt++;
22605 }
22606 INCR_DLE:
22607 @@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
22608 {
22609 struct k_sonet_stats *stats;
22610 stats = &PRIV(_ia_dev[board])->sonet_stats;
22611 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22612 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22613 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22614 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22615 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22616 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22617 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22618 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22619 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22620 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22621 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22622 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22623 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22624 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22625 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22626 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22627 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22628 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22629 }
22630 ia_cmds.status = 0;
22631 break;
22632 @@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22633 if ((desc == 0) || (desc > iadev->num_tx_desc))
22634 {
22635 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22636 - atomic_inc(&vcc->stats->tx);
22637 + atomic_inc_unchecked(&vcc->stats->tx);
22638 if (vcc->pop)
22639 vcc->pop(vcc, skb);
22640 else
22641 @@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22642 ATM_DESC(skb) = vcc->vci;
22643 skb_queue_tail(&iadev->tx_dma_q, skb);
22644
22645 - atomic_inc(&vcc->stats->tx);
22646 + atomic_inc_unchecked(&vcc->stats->tx);
22647 iadev->tx_pkt_cnt++;
22648 /* Increment transaction counter */
22649 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22650
22651 #if 0
22652 /* add flow control logic */
22653 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22654 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22655 if (iavcc->vc_desc_cnt > 10) {
22656 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22657 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22658 diff -urNp linux-3.0.3/drivers/atm/lanai.c linux-3.0.3/drivers/atm/lanai.c
22659 --- linux-3.0.3/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
22660 +++ linux-3.0.3/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
22661 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22662 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22663 lanai_endtx(lanai, lvcc);
22664 lanai_free_skb(lvcc->tx.atmvcc, skb);
22665 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22666 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22667 }
22668
22669 /* Try to fill the buffer - don't call unless there is backlog */
22670 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22671 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22672 __net_timestamp(skb);
22673 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22674 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22675 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22676 out:
22677 lvcc->rx.buf.ptr = end;
22678 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22679 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22680 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22681 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22682 lanai->stats.service_rxnotaal5++;
22683 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22684 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22685 return 0;
22686 }
22687 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22688 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22689 int bytes;
22690 read_unlock(&vcc_sklist_lock);
22691 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22692 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22693 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22694 lvcc->stats.x.aal5.service_trash++;
22695 bytes = (SERVICE_GET_END(s) * 16) -
22696 (((unsigned long) lvcc->rx.buf.ptr) -
22697 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22698 }
22699 if (s & SERVICE_STREAM) {
22700 read_unlock(&vcc_sklist_lock);
22701 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22702 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22703 lvcc->stats.x.aal5.service_stream++;
22704 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22705 "PDU on VCI %d!\n", lanai->number, vci);
22706 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22707 return 0;
22708 }
22709 DPRINTK("got rx crc error on vci %d\n", vci);
22710 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22711 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22712 lvcc->stats.x.aal5.service_rxcrc++;
22713 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22714 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22715 diff -urNp linux-3.0.3/drivers/atm/nicstar.c linux-3.0.3/drivers/atm/nicstar.c
22716 --- linux-3.0.3/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
22717 +++ linux-3.0.3/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
22718 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22719 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22720 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22721 card->index);
22722 - atomic_inc(&vcc->stats->tx_err);
22723 + atomic_inc_unchecked(&vcc->stats->tx_err);
22724 dev_kfree_skb_any(skb);
22725 return -EINVAL;
22726 }
22727 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22728 if (!vc->tx) {
22729 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22730 card->index);
22731 - atomic_inc(&vcc->stats->tx_err);
22732 + atomic_inc_unchecked(&vcc->stats->tx_err);
22733 dev_kfree_skb_any(skb);
22734 return -EINVAL;
22735 }
22736 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22737 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22738 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22739 card->index);
22740 - atomic_inc(&vcc->stats->tx_err);
22741 + atomic_inc_unchecked(&vcc->stats->tx_err);
22742 dev_kfree_skb_any(skb);
22743 return -EINVAL;
22744 }
22745
22746 if (skb_shinfo(skb)->nr_frags != 0) {
22747 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22748 - atomic_inc(&vcc->stats->tx_err);
22749 + atomic_inc_unchecked(&vcc->stats->tx_err);
22750 dev_kfree_skb_any(skb);
22751 return -EINVAL;
22752 }
22753 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22754 }
22755
22756 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22757 - atomic_inc(&vcc->stats->tx_err);
22758 + atomic_inc_unchecked(&vcc->stats->tx_err);
22759 dev_kfree_skb_any(skb);
22760 return -EIO;
22761 }
22762 - atomic_inc(&vcc->stats->tx);
22763 + atomic_inc_unchecked(&vcc->stats->tx);
22764
22765 return 0;
22766 }
22767 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22768 printk
22769 ("nicstar%d: Can't allocate buffers for aal0.\n",
22770 card->index);
22771 - atomic_add(i, &vcc->stats->rx_drop);
22772 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22773 break;
22774 }
22775 if (!atm_charge(vcc, sb->truesize)) {
22776 RXPRINTK
22777 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22778 card->index);
22779 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22780 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22781 dev_kfree_skb_any(sb);
22782 break;
22783 }
22784 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22785 ATM_SKB(sb)->vcc = vcc;
22786 __net_timestamp(sb);
22787 vcc->push(vcc, sb);
22788 - atomic_inc(&vcc->stats->rx);
22789 + atomic_inc_unchecked(&vcc->stats->rx);
22790 cell += ATM_CELL_PAYLOAD;
22791 }
22792
22793 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22794 if (iovb == NULL) {
22795 printk("nicstar%d: Out of iovec buffers.\n",
22796 card->index);
22797 - atomic_inc(&vcc->stats->rx_drop);
22798 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22799 recycle_rx_buf(card, skb);
22800 return;
22801 }
22802 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22803 small or large buffer itself. */
22804 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22805 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22806 - atomic_inc(&vcc->stats->rx_err);
22807 + atomic_inc_unchecked(&vcc->stats->rx_err);
22808 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22809 NS_MAX_IOVECS);
22810 NS_PRV_IOVCNT(iovb) = 0;
22811 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22812 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22813 card->index);
22814 which_list(card, skb);
22815 - atomic_inc(&vcc->stats->rx_err);
22816 + atomic_inc_unchecked(&vcc->stats->rx_err);
22817 recycle_rx_buf(card, skb);
22818 vc->rx_iov = NULL;
22819 recycle_iov_buf(card, iovb);
22820 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22821 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22822 card->index);
22823 which_list(card, skb);
22824 - atomic_inc(&vcc->stats->rx_err);
22825 + atomic_inc_unchecked(&vcc->stats->rx_err);
22826 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22827 NS_PRV_IOVCNT(iovb));
22828 vc->rx_iov = NULL;
22829 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22830 printk(" - PDU size mismatch.\n");
22831 else
22832 printk(".\n");
22833 - atomic_inc(&vcc->stats->rx_err);
22834 + atomic_inc_unchecked(&vcc->stats->rx_err);
22835 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22836 NS_PRV_IOVCNT(iovb));
22837 vc->rx_iov = NULL;
22838 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22839 /* skb points to a small buffer */
22840 if (!atm_charge(vcc, skb->truesize)) {
22841 push_rxbufs(card, skb);
22842 - atomic_inc(&vcc->stats->rx_drop);
22843 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22844 } else {
22845 skb_put(skb, len);
22846 dequeue_sm_buf(card, skb);
22847 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22848 ATM_SKB(skb)->vcc = vcc;
22849 __net_timestamp(skb);
22850 vcc->push(vcc, skb);
22851 - atomic_inc(&vcc->stats->rx);
22852 + atomic_inc_unchecked(&vcc->stats->rx);
22853 }
22854 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22855 struct sk_buff *sb;
22856 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22857 if (len <= NS_SMBUFSIZE) {
22858 if (!atm_charge(vcc, sb->truesize)) {
22859 push_rxbufs(card, sb);
22860 - atomic_inc(&vcc->stats->rx_drop);
22861 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22862 } else {
22863 skb_put(sb, len);
22864 dequeue_sm_buf(card, sb);
22865 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
22866 ATM_SKB(sb)->vcc = vcc;
22867 __net_timestamp(sb);
22868 vcc->push(vcc, sb);
22869 - atomic_inc(&vcc->stats->rx);
22870 + atomic_inc_unchecked(&vcc->stats->rx);
22871 }
22872
22873 push_rxbufs(card, skb);
22874 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
22875
22876 if (!atm_charge(vcc, skb->truesize)) {
22877 push_rxbufs(card, skb);
22878 - atomic_inc(&vcc->stats->rx_drop);
22879 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22880 } else {
22881 dequeue_lg_buf(card, skb);
22882 #ifdef NS_USE_DESTRUCTORS
22883 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
22884 ATM_SKB(skb)->vcc = vcc;
22885 __net_timestamp(skb);
22886 vcc->push(vcc, skb);
22887 - atomic_inc(&vcc->stats->rx);
22888 + atomic_inc_unchecked(&vcc->stats->rx);
22889 }
22890
22891 push_rxbufs(card, sb);
22892 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
22893 printk
22894 ("nicstar%d: Out of huge buffers.\n",
22895 card->index);
22896 - atomic_inc(&vcc->stats->rx_drop);
22897 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22898 recycle_iovec_rx_bufs(card,
22899 (struct iovec *)
22900 iovb->data,
22901 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
22902 card->hbpool.count++;
22903 } else
22904 dev_kfree_skb_any(hb);
22905 - atomic_inc(&vcc->stats->rx_drop);
22906 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22907 } else {
22908 /* Copy the small buffer to the huge buffer */
22909 sb = (struct sk_buff *)iov->iov_base;
22910 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
22911 #endif /* NS_USE_DESTRUCTORS */
22912 __net_timestamp(hb);
22913 vcc->push(vcc, hb);
22914 - atomic_inc(&vcc->stats->rx);
22915 + atomic_inc_unchecked(&vcc->stats->rx);
22916 }
22917 }
22918
22919 diff -urNp linux-3.0.3/drivers/atm/solos-pci.c linux-3.0.3/drivers/atm/solos-pci.c
22920 --- linux-3.0.3/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
22921 +++ linux-3.0.3/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
22922 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
22923 }
22924 atm_charge(vcc, skb->truesize);
22925 vcc->push(vcc, skb);
22926 - atomic_inc(&vcc->stats->rx);
22927 + atomic_inc_unchecked(&vcc->stats->rx);
22928 break;
22929
22930 case PKT_STATUS:
22931 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
22932 char msg[500];
22933 char item[10];
22934
22935 + pax_track_stack();
22936 +
22937 len = buf->len;
22938 for (i = 0; i < len; i++){
22939 if(i % 8 == 0)
22940 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
22941 vcc = SKB_CB(oldskb)->vcc;
22942
22943 if (vcc) {
22944 - atomic_inc(&vcc->stats->tx);
22945 + atomic_inc_unchecked(&vcc->stats->tx);
22946 solos_pop(vcc, oldskb);
22947 } else
22948 dev_kfree_skb_irq(oldskb);
22949 diff -urNp linux-3.0.3/drivers/atm/suni.c linux-3.0.3/drivers/atm/suni.c
22950 --- linux-3.0.3/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
22951 +++ linux-3.0.3/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
22952 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22953
22954
22955 #define ADD_LIMITED(s,v) \
22956 - atomic_add((v),&stats->s); \
22957 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22958 + atomic_add_unchecked((v),&stats->s); \
22959 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22960
22961
22962 static void suni_hz(unsigned long from_timer)
22963 diff -urNp linux-3.0.3/drivers/atm/uPD98402.c linux-3.0.3/drivers/atm/uPD98402.c
22964 --- linux-3.0.3/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
22965 +++ linux-3.0.3/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
22966 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22967 struct sonet_stats tmp;
22968 int error = 0;
22969
22970 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22971 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22972 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22973 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22974 if (zero && !error) {
22975 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22976
22977
22978 #define ADD_LIMITED(s,v) \
22979 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22980 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22981 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22982 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22983 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22984 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22985
22986
22987 static void stat_event(struct atm_dev *dev)
22988 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
22989 if (reason & uPD98402_INT_PFM) stat_event(dev);
22990 if (reason & uPD98402_INT_PCO) {
22991 (void) GET(PCOCR); /* clear interrupt cause */
22992 - atomic_add(GET(HECCT),
22993 + atomic_add_unchecked(GET(HECCT),
22994 &PRIV(dev)->sonet_stats.uncorr_hcs);
22995 }
22996 if ((reason & uPD98402_INT_RFO) &&
22997 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22998 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22999 uPD98402_INT_LOS),PIMR); /* enable them */
23000 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23001 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23002 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23003 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23004 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23005 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23006 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23007 return 0;
23008 }
23009
23010 diff -urNp linux-3.0.3/drivers/atm/zatm.c linux-3.0.3/drivers/atm/zatm.c
23011 --- linux-3.0.3/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
23012 +++ linux-3.0.3/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
23013 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23014 }
23015 if (!size) {
23016 dev_kfree_skb_irq(skb);
23017 - if (vcc) atomic_inc(&vcc->stats->rx_err);
23018 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23019 continue;
23020 }
23021 if (!atm_charge(vcc,skb->truesize)) {
23022 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23023 skb->len = size;
23024 ATM_SKB(skb)->vcc = vcc;
23025 vcc->push(vcc,skb);
23026 - atomic_inc(&vcc->stats->rx);
23027 + atomic_inc_unchecked(&vcc->stats->rx);
23028 }
23029 zout(pos & 0xffff,MTA(mbx));
23030 #if 0 /* probably a stupid idea */
23031 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23032 skb_queue_head(&zatm_vcc->backlog,skb);
23033 break;
23034 }
23035 - atomic_inc(&vcc->stats->tx);
23036 + atomic_inc_unchecked(&vcc->stats->tx);
23037 wake_up(&zatm_vcc->tx_wait);
23038 }
23039
23040 diff -urNp linux-3.0.3/drivers/base/power/wakeup.c linux-3.0.3/drivers/base/power/wakeup.c
23041 --- linux-3.0.3/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23042 +++ linux-3.0.3/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23043 @@ -29,14 +29,14 @@ bool events_check_enabled;
23044 * They need to be modified together atomically, so it's better to use one
23045 * atomic variable to hold them both.
23046 */
23047 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23048 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23049
23050 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23051 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23052
23053 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23054 {
23055 - unsigned int comb = atomic_read(&combined_event_count);
23056 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
23057
23058 *cnt = (comb >> IN_PROGRESS_BITS);
23059 *inpr = comb & MAX_IN_PROGRESS;
23060 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23061 ws->last_time = ktime_get();
23062
23063 /* Increment the counter of events in progress. */
23064 - atomic_inc(&combined_event_count);
23065 + atomic_inc_unchecked(&combined_event_count);
23066 }
23067
23068 /**
23069 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23070 * Increment the counter of registered wakeup events and decrement the
23071 * couter of wakeup events in progress simultaneously.
23072 */
23073 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23074 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23075 }
23076
23077 /**
23078 diff -urNp linux-3.0.3/drivers/block/cciss.c linux-3.0.3/drivers/block/cciss.c
23079 --- linux-3.0.3/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23080 +++ linux-3.0.3/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23081 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23082 int err;
23083 u32 cp;
23084
23085 + memset(&arg64, 0, sizeof(arg64));
23086 +
23087 err = 0;
23088 err |=
23089 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23090 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23091 while (!list_empty(&h->reqQ)) {
23092 c = list_entry(h->reqQ.next, CommandList_struct, list);
23093 /* can't do anything if fifo is full */
23094 - if ((h->access.fifo_full(h))) {
23095 + if ((h->access->fifo_full(h))) {
23096 dev_warn(&h->pdev->dev, "fifo full\n");
23097 break;
23098 }
23099 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23100 h->Qdepth--;
23101
23102 /* Tell the controller execute command */
23103 - h->access.submit_command(h, c);
23104 + h->access->submit_command(h, c);
23105
23106 /* Put job onto the completed Q */
23107 addQ(&h->cmpQ, c);
23108 @@ -3422,17 +3424,17 @@ startio:
23109
23110 static inline unsigned long get_next_completion(ctlr_info_t *h)
23111 {
23112 - return h->access.command_completed(h);
23113 + return h->access->command_completed(h);
23114 }
23115
23116 static inline int interrupt_pending(ctlr_info_t *h)
23117 {
23118 - return h->access.intr_pending(h);
23119 + return h->access->intr_pending(h);
23120 }
23121
23122 static inline long interrupt_not_for_us(ctlr_info_t *h)
23123 {
23124 - return ((h->access.intr_pending(h) == 0) ||
23125 + return ((h->access->intr_pending(h) == 0) ||
23126 (h->interrupts_enabled == 0));
23127 }
23128
23129 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23130 u32 a;
23131
23132 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23133 - return h->access.command_completed(h);
23134 + return h->access->command_completed(h);
23135
23136 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23137 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23138 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23139 trans_support & CFGTBL_Trans_use_short_tags);
23140
23141 /* Change the access methods to the performant access methods */
23142 - h->access = SA5_performant_access;
23143 + h->access = &SA5_performant_access;
23144 h->transMethod = CFGTBL_Trans_Performant;
23145
23146 return;
23147 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23148 if (prod_index < 0)
23149 return -ENODEV;
23150 h->product_name = products[prod_index].product_name;
23151 - h->access = *(products[prod_index].access);
23152 + h->access = products[prod_index].access;
23153
23154 if (cciss_board_disabled(h)) {
23155 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23156 @@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23157 }
23158
23159 /* make sure the board interrupts are off */
23160 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23161 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23162 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23163 if (rc)
23164 goto clean2;
23165 @@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23166 * fake ones to scoop up any residual completions.
23167 */
23168 spin_lock_irqsave(&h->lock, flags);
23169 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23170 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23171 spin_unlock_irqrestore(&h->lock, flags);
23172 free_irq(h->intr[PERF_MODE_INT], h);
23173 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23174 @@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23175 dev_info(&h->pdev->dev, "Board READY.\n");
23176 dev_info(&h->pdev->dev,
23177 "Waiting for stale completions to drain.\n");
23178 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23179 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23180 msleep(10000);
23181 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23182 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23183
23184 rc = controller_reset_failed(h->cfgtable);
23185 if (rc)
23186 @@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23187 cciss_scsi_setup(h);
23188
23189 /* Turn the interrupts on so we can service requests */
23190 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23191 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23192
23193 /* Get the firmware version */
23194 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23195 @@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23196 kfree(flush_buf);
23197 if (return_code != IO_OK)
23198 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23199 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23200 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23201 free_irq(h->intr[PERF_MODE_INT], h);
23202 }
23203
23204 diff -urNp linux-3.0.3/drivers/block/cciss.h linux-3.0.3/drivers/block/cciss.h
23205 --- linux-3.0.3/drivers/block/cciss.h 2011-08-23 21:44:40.000000000 -0400
23206 +++ linux-3.0.3/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23207 @@ -100,7 +100,7 @@ struct ctlr_info
23208 /* information about each logical volume */
23209 drive_info_struct *drv[CISS_MAX_LUN];
23210
23211 - struct access_method access;
23212 + struct access_method *access;
23213
23214 /* queue and queue Info */
23215 struct list_head reqQ;
23216 diff -urNp linux-3.0.3/drivers/block/cpqarray.c linux-3.0.3/drivers/block/cpqarray.c
23217 --- linux-3.0.3/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23218 +++ linux-3.0.3/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23219 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23220 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23221 goto Enomem4;
23222 }
23223 - hba[i]->access.set_intr_mask(hba[i], 0);
23224 + hba[i]->access->set_intr_mask(hba[i], 0);
23225 if (request_irq(hba[i]->intr, do_ida_intr,
23226 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23227 {
23228 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23229 add_timer(&hba[i]->timer);
23230
23231 /* Enable IRQ now that spinlock and rate limit timer are set up */
23232 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23233 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23234
23235 for(j=0; j<NWD; j++) {
23236 struct gendisk *disk = ida_gendisk[i][j];
23237 @@ -694,7 +694,7 @@ DBGINFO(
23238 for(i=0; i<NR_PRODUCTS; i++) {
23239 if (board_id == products[i].board_id) {
23240 c->product_name = products[i].product_name;
23241 - c->access = *(products[i].access);
23242 + c->access = products[i].access;
23243 break;
23244 }
23245 }
23246 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23247 hba[ctlr]->intr = intr;
23248 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23249 hba[ctlr]->product_name = products[j].product_name;
23250 - hba[ctlr]->access = *(products[j].access);
23251 + hba[ctlr]->access = products[j].access;
23252 hba[ctlr]->ctlr = ctlr;
23253 hba[ctlr]->board_id = board_id;
23254 hba[ctlr]->pci_dev = NULL; /* not PCI */
23255 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23256 struct scatterlist tmp_sg[SG_MAX];
23257 int i, dir, seg;
23258
23259 + pax_track_stack();
23260 +
23261 queue_next:
23262 creq = blk_peek_request(q);
23263 if (!creq)
23264 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23265
23266 while((c = h->reqQ) != NULL) {
23267 /* Can't do anything if we're busy */
23268 - if (h->access.fifo_full(h) == 0)
23269 + if (h->access->fifo_full(h) == 0)
23270 return;
23271
23272 /* Get the first entry from the request Q */
23273 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23274 h->Qdepth--;
23275
23276 /* Tell the controller to do our bidding */
23277 - h->access.submit_command(h, c);
23278 + h->access->submit_command(h, c);
23279
23280 /* Get onto the completion Q */
23281 addQ(&h->cmpQ, c);
23282 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23283 unsigned long flags;
23284 __u32 a,a1;
23285
23286 - istat = h->access.intr_pending(h);
23287 + istat = h->access->intr_pending(h);
23288 /* Is this interrupt for us? */
23289 if (istat == 0)
23290 return IRQ_NONE;
23291 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23292 */
23293 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23294 if (istat & FIFO_NOT_EMPTY) {
23295 - while((a = h->access.command_completed(h))) {
23296 + while((a = h->access->command_completed(h))) {
23297 a1 = a; a &= ~3;
23298 if ((c = h->cmpQ) == NULL)
23299 {
23300 @@ -1449,11 +1451,11 @@ static int sendcmd(
23301 /*
23302 * Disable interrupt
23303 */
23304 - info_p->access.set_intr_mask(info_p, 0);
23305 + info_p->access->set_intr_mask(info_p, 0);
23306 /* Make sure there is room in the command FIFO */
23307 /* Actually it should be completely empty at this time. */
23308 for (i = 200000; i > 0; i--) {
23309 - temp = info_p->access.fifo_full(info_p);
23310 + temp = info_p->access->fifo_full(info_p);
23311 if (temp != 0) {
23312 break;
23313 }
23314 @@ -1466,7 +1468,7 @@ DBG(
23315 /*
23316 * Send the cmd
23317 */
23318 - info_p->access.submit_command(info_p, c);
23319 + info_p->access->submit_command(info_p, c);
23320 complete = pollcomplete(ctlr);
23321
23322 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23323 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23324 * we check the new geometry. Then turn interrupts back on when
23325 * we're done.
23326 */
23327 - host->access.set_intr_mask(host, 0);
23328 + host->access->set_intr_mask(host, 0);
23329 getgeometry(ctlr);
23330 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23331 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23332
23333 for(i=0; i<NWD; i++) {
23334 struct gendisk *disk = ida_gendisk[ctlr][i];
23335 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23336 /* Wait (up to 2 seconds) for a command to complete */
23337
23338 for (i = 200000; i > 0; i--) {
23339 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23340 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23341 if (done == 0) {
23342 udelay(10); /* a short fixed delay */
23343 } else
23344 diff -urNp linux-3.0.3/drivers/block/cpqarray.h linux-3.0.3/drivers/block/cpqarray.h
23345 --- linux-3.0.3/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23346 +++ linux-3.0.3/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23347 @@ -99,7 +99,7 @@ struct ctlr_info {
23348 drv_info_t drv[NWD];
23349 struct proc_dir_entry *proc;
23350
23351 - struct access_method access;
23352 + struct access_method *access;
23353
23354 cmdlist_t *reqQ;
23355 cmdlist_t *cmpQ;
23356 diff -urNp linux-3.0.3/drivers/block/DAC960.c linux-3.0.3/drivers/block/DAC960.c
23357 --- linux-3.0.3/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23358 +++ linux-3.0.3/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23359 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23360 unsigned long flags;
23361 int Channel, TargetID;
23362
23363 + pax_track_stack();
23364 +
23365 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23366 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23367 sizeof(DAC960_SCSI_Inquiry_T) +
23368 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_int.h linux-3.0.3/drivers/block/drbd/drbd_int.h
23369 --- linux-3.0.3/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23370 +++ linux-3.0.3/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23371 @@ -737,7 +737,7 @@ struct drbd_request;
23372 struct drbd_epoch {
23373 struct list_head list;
23374 unsigned int barrier_nr;
23375 - atomic_t epoch_size; /* increased on every request added. */
23376 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23377 atomic_t active; /* increased on every req. added, and dec on every finished. */
23378 unsigned long flags;
23379 };
23380 @@ -1109,7 +1109,7 @@ struct drbd_conf {
23381 void *int_dig_in;
23382 void *int_dig_vv;
23383 wait_queue_head_t seq_wait;
23384 - atomic_t packet_seq;
23385 + atomic_unchecked_t packet_seq;
23386 unsigned int peer_seq;
23387 spinlock_t peer_seq_lock;
23388 unsigned int minor;
23389 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_main.c linux-3.0.3/drivers/block/drbd/drbd_main.c
23390 --- linux-3.0.3/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23391 +++ linux-3.0.3/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23392 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23393 p.sector = sector;
23394 p.block_id = block_id;
23395 p.blksize = blksize;
23396 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23397 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23398
23399 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23400 return false;
23401 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23402 p.sector = cpu_to_be64(req->sector);
23403 p.block_id = (unsigned long)req;
23404 p.seq_num = cpu_to_be32(req->seq_num =
23405 - atomic_add_return(1, &mdev->packet_seq));
23406 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23407
23408 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23409
23410 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23411 atomic_set(&mdev->unacked_cnt, 0);
23412 atomic_set(&mdev->local_cnt, 0);
23413 atomic_set(&mdev->net_cnt, 0);
23414 - atomic_set(&mdev->packet_seq, 0);
23415 + atomic_set_unchecked(&mdev->packet_seq, 0);
23416 atomic_set(&mdev->pp_in_use, 0);
23417 atomic_set(&mdev->pp_in_use_by_net, 0);
23418 atomic_set(&mdev->rs_sect_in, 0);
23419 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23420 mdev->receiver.t_state);
23421
23422 /* no need to lock it, I'm the only thread alive */
23423 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23424 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23425 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23426 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23427 mdev->al_writ_cnt =
23428 mdev->bm_writ_cnt =
23429 mdev->read_cnt =
23430 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_nl.c linux-3.0.3/drivers/block/drbd/drbd_nl.c
23431 --- linux-3.0.3/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23432 +++ linux-3.0.3/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23433 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23434 module_put(THIS_MODULE);
23435 }
23436
23437 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23438 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23439
23440 static unsigned short *
23441 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23442 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23443 cn_reply->id.idx = CN_IDX_DRBD;
23444 cn_reply->id.val = CN_VAL_DRBD;
23445
23446 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23447 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23448 cn_reply->ack = 0; /* not used here. */
23449 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23450 (int)((char *)tl - (char *)reply->tag_list);
23451 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23452 cn_reply->id.idx = CN_IDX_DRBD;
23453 cn_reply->id.val = CN_VAL_DRBD;
23454
23455 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23456 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23457 cn_reply->ack = 0; /* not used here. */
23458 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23459 (int)((char *)tl - (char *)reply->tag_list);
23460 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23461 cn_reply->id.idx = CN_IDX_DRBD;
23462 cn_reply->id.val = CN_VAL_DRBD;
23463
23464 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23465 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23466 cn_reply->ack = 0; // not used here.
23467 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23468 (int)((char*)tl - (char*)reply->tag_list);
23469 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
23470 cn_reply->id.idx = CN_IDX_DRBD;
23471 cn_reply->id.val = CN_VAL_DRBD;
23472
23473 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23474 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23475 cn_reply->ack = 0; /* not used here. */
23476 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23477 (int)((char *)tl - (char *)reply->tag_list);
23478 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_receiver.c linux-3.0.3/drivers/block/drbd/drbd_receiver.c
23479 --- linux-3.0.3/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
23480 +++ linux-3.0.3/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
23481 @@ -894,7 +894,7 @@ retry:
23482 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23483 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23484
23485 - atomic_set(&mdev->packet_seq, 0);
23486 + atomic_set_unchecked(&mdev->packet_seq, 0);
23487 mdev->peer_seq = 0;
23488
23489 drbd_thread_start(&mdev->asender);
23490 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
23491 do {
23492 next_epoch = NULL;
23493
23494 - epoch_size = atomic_read(&epoch->epoch_size);
23495 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23496
23497 switch (ev & ~EV_CLEANUP) {
23498 case EV_PUT:
23499 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
23500 rv = FE_DESTROYED;
23501 } else {
23502 epoch->flags = 0;
23503 - atomic_set(&epoch->epoch_size, 0);
23504 + atomic_set_unchecked(&epoch->epoch_size, 0);
23505 /* atomic_set(&epoch->active, 0); is already zero */
23506 if (rv == FE_STILL_LIVE)
23507 rv = FE_RECYCLED;
23508 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
23509 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23510 drbd_flush(mdev);
23511
23512 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23513 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23514 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23515 if (epoch)
23516 break;
23517 }
23518
23519 epoch = mdev->current_epoch;
23520 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23521 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23522
23523 D_ASSERT(atomic_read(&epoch->active) == 0);
23524 D_ASSERT(epoch->flags == 0);
23525 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
23526 }
23527
23528 epoch->flags = 0;
23529 - atomic_set(&epoch->epoch_size, 0);
23530 + atomic_set_unchecked(&epoch->epoch_size, 0);
23531 atomic_set(&epoch->active, 0);
23532
23533 spin_lock(&mdev->epoch_lock);
23534 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23535 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23536 list_add(&epoch->list, &mdev->current_epoch->list);
23537 mdev->current_epoch = epoch;
23538 mdev->epochs++;
23539 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
23540 spin_unlock(&mdev->peer_seq_lock);
23541
23542 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23543 - atomic_inc(&mdev->current_epoch->epoch_size);
23544 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23545 return drbd_drain_block(mdev, data_size);
23546 }
23547
23548 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
23549
23550 spin_lock(&mdev->epoch_lock);
23551 e->epoch = mdev->current_epoch;
23552 - atomic_inc(&e->epoch->epoch_size);
23553 + atomic_inc_unchecked(&e->epoch->epoch_size);
23554 atomic_inc(&e->epoch->active);
23555 spin_unlock(&mdev->epoch_lock);
23556
23557 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
23558 D_ASSERT(list_empty(&mdev->done_ee));
23559
23560 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23561 - atomic_set(&mdev->current_epoch->epoch_size, 0);
23562 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23563 D_ASSERT(list_empty(&mdev->current_epoch->list));
23564 }
23565
23566 diff -urNp linux-3.0.3/drivers/block/nbd.c linux-3.0.3/drivers/block/nbd.c
23567 --- linux-3.0.3/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
23568 +++ linux-3.0.3/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
23569 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23570 struct kvec iov;
23571 sigset_t blocked, oldset;
23572
23573 + pax_track_stack();
23574 +
23575 if (unlikely(!sock)) {
23576 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23577 lo->disk->disk_name, (send ? "send" : "recv"));
23578 @@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
23579 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23580 unsigned int cmd, unsigned long arg)
23581 {
23582 + pax_track_stack();
23583 +
23584 switch (cmd) {
23585 case NBD_DISCONNECT: {
23586 struct request sreq;
23587 diff -urNp linux-3.0.3/drivers/char/agp/frontend.c linux-3.0.3/drivers/char/agp/frontend.c
23588 --- linux-3.0.3/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
23589 +++ linux-3.0.3/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
23590 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23591 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23592 return -EFAULT;
23593
23594 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23595 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23596 return -EFAULT;
23597
23598 client = agp_find_client_by_pid(reserve.pid);
23599 diff -urNp linux-3.0.3/drivers/char/briq_panel.c linux-3.0.3/drivers/char/briq_panel.c
23600 --- linux-3.0.3/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
23601 +++ linux-3.0.3/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
23602 @@ -9,6 +9,7 @@
23603 #include <linux/types.h>
23604 #include <linux/errno.h>
23605 #include <linux/tty.h>
23606 +#include <linux/mutex.h>
23607 #include <linux/timer.h>
23608 #include <linux/kernel.h>
23609 #include <linux/wait.h>
23610 @@ -34,6 +35,7 @@ static int vfd_is_open;
23611 static unsigned char vfd[40];
23612 static int vfd_cursor;
23613 static unsigned char ledpb, led;
23614 +static DEFINE_MUTEX(vfd_mutex);
23615
23616 static void update_vfd(void)
23617 {
23618 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23619 if (!vfd_is_open)
23620 return -EBUSY;
23621
23622 + mutex_lock(&vfd_mutex);
23623 for (;;) {
23624 char c;
23625 if (!indx)
23626 break;
23627 - if (get_user(c, buf))
23628 + if (get_user(c, buf)) {
23629 + mutex_unlock(&vfd_mutex);
23630 return -EFAULT;
23631 + }
23632 if (esc) {
23633 set_led(c);
23634 esc = 0;
23635 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23636 buf++;
23637 }
23638 update_vfd();
23639 + mutex_unlock(&vfd_mutex);
23640
23641 return len;
23642 }
23643 diff -urNp linux-3.0.3/drivers/char/genrtc.c linux-3.0.3/drivers/char/genrtc.c
23644 --- linux-3.0.3/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
23645 +++ linux-3.0.3/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
23646 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23647 switch (cmd) {
23648
23649 case RTC_PLL_GET:
23650 + memset(&pll, 0, sizeof(pll));
23651 if (get_rtc_pll(&pll))
23652 return -EINVAL;
23653 else
23654 diff -urNp linux-3.0.3/drivers/char/hpet.c linux-3.0.3/drivers/char/hpet.c
23655 --- linux-3.0.3/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
23656 +++ linux-3.0.3/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
23657 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
23658 }
23659
23660 static int
23661 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23662 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23663 struct hpet_info *info)
23664 {
23665 struct hpet_timer __iomem *timer;
23666 diff -urNp linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c
23667 --- linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
23668 +++ linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
23669 @@ -415,7 +415,7 @@ struct ipmi_smi {
23670 struct proc_dir_entry *proc_dir;
23671 char proc_dir_name[10];
23672
23673 - atomic_t stats[IPMI_NUM_STATS];
23674 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23675
23676 /*
23677 * run_to_completion duplicate of smb_info, smi_info
23678 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23679
23680
23681 #define ipmi_inc_stat(intf, stat) \
23682 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23683 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23684 #define ipmi_get_stat(intf, stat) \
23685 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23686 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23687
23688 static int is_lan_addr(struct ipmi_addr *addr)
23689 {
23690 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23691 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23692 init_waitqueue_head(&intf->waitq);
23693 for (i = 0; i < IPMI_NUM_STATS; i++)
23694 - atomic_set(&intf->stats[i], 0);
23695 + atomic_set_unchecked(&intf->stats[i], 0);
23696
23697 intf->proc_dir = NULL;
23698
23699 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
23700 struct ipmi_smi_msg smi_msg;
23701 struct ipmi_recv_msg recv_msg;
23702
23703 + pax_track_stack();
23704 +
23705 si = (struct ipmi_system_interface_addr *) &addr;
23706 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23707 si->channel = IPMI_BMC_CHANNEL;
23708 diff -urNp linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c
23709 --- linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
23710 +++ linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
23711 @@ -277,7 +277,7 @@ struct smi_info {
23712 unsigned char slave_addr;
23713
23714 /* Counters and things for the proc filesystem. */
23715 - atomic_t stats[SI_NUM_STATS];
23716 + atomic_unchecked_t stats[SI_NUM_STATS];
23717
23718 struct task_struct *thread;
23719
23720 @@ -286,9 +286,9 @@ struct smi_info {
23721 };
23722
23723 #define smi_inc_stat(smi, stat) \
23724 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23725 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23726 #define smi_get_stat(smi, stat) \
23727 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23728 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23729
23730 #define SI_MAX_PARMS 4
23731
23732 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
23733 atomic_set(&new_smi->req_events, 0);
23734 new_smi->run_to_completion = 0;
23735 for (i = 0; i < SI_NUM_STATS; i++)
23736 - atomic_set(&new_smi->stats[i], 0);
23737 + atomic_set_unchecked(&new_smi->stats[i], 0);
23738
23739 new_smi->interrupt_disabled = 1;
23740 atomic_set(&new_smi->stop_operation, 0);
23741 diff -urNp linux-3.0.3/drivers/char/Kconfig linux-3.0.3/drivers/char/Kconfig
23742 --- linux-3.0.3/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
23743 +++ linux-3.0.3/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
23744 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23745
23746 config DEVKMEM
23747 bool "/dev/kmem virtual device support"
23748 - default y
23749 + default n
23750 + depends on !GRKERNSEC_KMEM
23751 help
23752 Say Y here if you want to support the /dev/kmem device. The
23753 /dev/kmem device is rarely used, but can be used for certain
23754 @@ -596,6 +597,7 @@ config DEVPORT
23755 bool
23756 depends on !M68K
23757 depends on ISA || PCI
23758 + depends on !GRKERNSEC_KMEM
23759 default y
23760
23761 source "drivers/s390/char/Kconfig"
23762 diff -urNp linux-3.0.3/drivers/char/mem.c linux-3.0.3/drivers/char/mem.c
23763 --- linux-3.0.3/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
23764 +++ linux-3.0.3/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
23765 @@ -18,6 +18,7 @@
23766 #include <linux/raw.h>
23767 #include <linux/tty.h>
23768 #include <linux/capability.h>
23769 +#include <linux/security.h>
23770 #include <linux/ptrace.h>
23771 #include <linux/device.h>
23772 #include <linux/highmem.h>
23773 @@ -34,6 +35,10 @@
23774 # include <linux/efi.h>
23775 #endif
23776
23777 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23778 +extern struct file_operations grsec_fops;
23779 +#endif
23780 +
23781 static inline unsigned long size_inside_page(unsigned long start,
23782 unsigned long size)
23783 {
23784 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23785
23786 while (cursor < to) {
23787 if (!devmem_is_allowed(pfn)) {
23788 +#ifdef CONFIG_GRKERNSEC_KMEM
23789 + gr_handle_mem_readwrite(from, to);
23790 +#else
23791 printk(KERN_INFO
23792 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23793 current->comm, from, to);
23794 +#endif
23795 return 0;
23796 }
23797 cursor += PAGE_SIZE;
23798 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23799 }
23800 return 1;
23801 }
23802 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23803 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23804 +{
23805 + return 0;
23806 +}
23807 #else
23808 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23809 {
23810 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23811
23812 while (count > 0) {
23813 unsigned long remaining;
23814 + char *temp;
23815
23816 sz = size_inside_page(p, count);
23817
23818 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23819 if (!ptr)
23820 return -EFAULT;
23821
23822 - remaining = copy_to_user(buf, ptr, sz);
23823 +#ifdef CONFIG_PAX_USERCOPY
23824 + temp = kmalloc(sz, GFP_KERNEL);
23825 + if (!temp) {
23826 + unxlate_dev_mem_ptr(p, ptr);
23827 + return -ENOMEM;
23828 + }
23829 + memcpy(temp, ptr, sz);
23830 +#else
23831 + temp = ptr;
23832 +#endif
23833 +
23834 + remaining = copy_to_user(buf, temp, sz);
23835 +
23836 +#ifdef CONFIG_PAX_USERCOPY
23837 + kfree(temp);
23838 +#endif
23839 +
23840 unxlate_dev_mem_ptr(p, ptr);
23841 if (remaining)
23842 return -EFAULT;
23843 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23844 size_t count, loff_t *ppos)
23845 {
23846 unsigned long p = *ppos;
23847 - ssize_t low_count, read, sz;
23848 + ssize_t low_count, read, sz, err = 0;
23849 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23850 - int err = 0;
23851
23852 read = 0;
23853 if (p < (unsigned long) high_memory) {
23854 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23855 }
23856 #endif
23857 while (low_count > 0) {
23858 + char *temp;
23859 +
23860 sz = size_inside_page(p, low_count);
23861
23862 /*
23863 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23864 */
23865 kbuf = xlate_dev_kmem_ptr((char *)p);
23866
23867 - if (copy_to_user(buf, kbuf, sz))
23868 +#ifdef CONFIG_PAX_USERCOPY
23869 + temp = kmalloc(sz, GFP_KERNEL);
23870 + if (!temp)
23871 + return -ENOMEM;
23872 + memcpy(temp, kbuf, sz);
23873 +#else
23874 + temp = kbuf;
23875 +#endif
23876 +
23877 + err = copy_to_user(buf, temp, sz);
23878 +
23879 +#ifdef CONFIG_PAX_USERCOPY
23880 + kfree(temp);
23881 +#endif
23882 +
23883 + if (err)
23884 return -EFAULT;
23885 buf += sz;
23886 p += sz;
23887 @@ -866,6 +913,9 @@ static const struct memdev {
23888 #ifdef CONFIG_CRASH_DUMP
23889 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23890 #endif
23891 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23892 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23893 +#endif
23894 };
23895
23896 static int memory_open(struct inode *inode, struct file *filp)
23897 diff -urNp linux-3.0.3/drivers/char/nvram.c linux-3.0.3/drivers/char/nvram.c
23898 --- linux-3.0.3/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
23899 +++ linux-3.0.3/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
23900 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
23901
23902 spin_unlock_irq(&rtc_lock);
23903
23904 - if (copy_to_user(buf, contents, tmp - contents))
23905 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23906 return -EFAULT;
23907
23908 *ppos = i;
23909 diff -urNp linux-3.0.3/drivers/char/random.c linux-3.0.3/drivers/char/random.c
23910 --- linux-3.0.3/drivers/char/random.c 2011-08-23 21:44:40.000000000 -0400
23911 +++ linux-3.0.3/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
23912 @@ -261,8 +261,13 @@
23913 /*
23914 * Configuration information
23915 */
23916 +#ifdef CONFIG_GRKERNSEC_RANDNET
23917 +#define INPUT_POOL_WORDS 512
23918 +#define OUTPUT_POOL_WORDS 128
23919 +#else
23920 #define INPUT_POOL_WORDS 128
23921 #define OUTPUT_POOL_WORDS 32
23922 +#endif
23923 #define SEC_XFER_SIZE 512
23924 #define EXTRACT_SIZE 10
23925
23926 @@ -300,10 +305,17 @@ static struct poolinfo {
23927 int poolwords;
23928 int tap1, tap2, tap3, tap4, tap5;
23929 } poolinfo_table[] = {
23930 +#ifdef CONFIG_GRKERNSEC_RANDNET
23931 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23932 + { 512, 411, 308, 208, 104, 1 },
23933 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23934 + { 128, 103, 76, 51, 25, 1 },
23935 +#else
23936 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23937 { 128, 103, 76, 51, 25, 1 },
23938 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23939 { 32, 26, 20, 14, 7, 1 },
23940 +#endif
23941 #if 0
23942 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
23943 { 2048, 1638, 1231, 819, 411, 1 },
23944 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
23945
23946 extract_buf(r, tmp);
23947 i = min_t(int, nbytes, EXTRACT_SIZE);
23948 - if (copy_to_user(buf, tmp, i)) {
23949 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23950 ret = -EFAULT;
23951 break;
23952 }
23953 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23954 #include <linux/sysctl.h>
23955
23956 static int min_read_thresh = 8, min_write_thresh;
23957 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
23958 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23959 static int max_write_thresh = INPUT_POOL_WORDS * 32;
23960 static char sysctl_bootid[16];
23961
23962 diff -urNp linux-3.0.3/drivers/char/sonypi.c linux-3.0.3/drivers/char/sonypi.c
23963 --- linux-3.0.3/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
23964 +++ linux-3.0.3/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
23965 @@ -55,6 +55,7 @@
23966 #include <asm/uaccess.h>
23967 #include <asm/io.h>
23968 #include <asm/system.h>
23969 +#include <asm/local.h>
23970
23971 #include <linux/sonypi.h>
23972
23973 @@ -491,7 +492,7 @@ static struct sonypi_device {
23974 spinlock_t fifo_lock;
23975 wait_queue_head_t fifo_proc_list;
23976 struct fasync_struct *fifo_async;
23977 - int open_count;
23978 + local_t open_count;
23979 int model;
23980 struct input_dev *input_jog_dev;
23981 struct input_dev *input_key_dev;
23982 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
23983 static int sonypi_misc_release(struct inode *inode, struct file *file)
23984 {
23985 mutex_lock(&sonypi_device.lock);
23986 - sonypi_device.open_count--;
23987 + local_dec(&sonypi_device.open_count);
23988 mutex_unlock(&sonypi_device.lock);
23989 return 0;
23990 }
23991 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
23992 {
23993 mutex_lock(&sonypi_device.lock);
23994 /* Flush input queue on first open */
23995 - if (!sonypi_device.open_count)
23996 + if (!local_read(&sonypi_device.open_count))
23997 kfifo_reset(&sonypi_device.fifo);
23998 - sonypi_device.open_count++;
23999 + local_inc(&sonypi_device.open_count);
24000 mutex_unlock(&sonypi_device.lock);
24001
24002 return 0;
24003 diff -urNp linux-3.0.3/drivers/char/tpm/tpm_bios.c linux-3.0.3/drivers/char/tpm/tpm_bios.c
24004 --- linux-3.0.3/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
24005 +++ linux-3.0.3/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
24006 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24007 event = addr;
24008
24009 if ((event->event_type == 0 && event->event_size == 0) ||
24010 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24011 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24012 return NULL;
24013
24014 return addr;
24015 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24016 return NULL;
24017
24018 if ((event->event_type == 0 && event->event_size == 0) ||
24019 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24020 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24021 return NULL;
24022
24023 (*pos)++;
24024 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24025 int i;
24026
24027 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24028 - seq_putc(m, data[i]);
24029 + if (!seq_putc(m, data[i]))
24030 + return -EFAULT;
24031
24032 return 0;
24033 }
24034 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24035 log->bios_event_log_end = log->bios_event_log + len;
24036
24037 virt = acpi_os_map_memory(start, len);
24038 + if (!virt) {
24039 + kfree(log->bios_event_log);
24040 + log->bios_event_log = NULL;
24041 + return -EFAULT;
24042 + }
24043
24044 memcpy(log->bios_event_log, virt, len);
24045
24046 diff -urNp linux-3.0.3/drivers/char/tpm/tpm.c linux-3.0.3/drivers/char/tpm/tpm.c
24047 --- linux-3.0.3/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24048 +++ linux-3.0.3/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24049 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24050 chip->vendor.req_complete_val)
24051 goto out_recv;
24052
24053 - if ((status == chip->vendor.req_canceled)) {
24054 + if (status == chip->vendor.req_canceled) {
24055 dev_err(chip->dev, "Operation Canceled\n");
24056 rc = -ECANCELED;
24057 goto out;
24058 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24059
24060 struct tpm_chip *chip = dev_get_drvdata(dev);
24061
24062 + pax_track_stack();
24063 +
24064 tpm_cmd.header.in = tpm_readpubek_header;
24065 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24066 "attempting to read the PUBEK");
24067 diff -urNp linux-3.0.3/drivers/crypto/hifn_795x.c linux-3.0.3/drivers/crypto/hifn_795x.c
24068 --- linux-3.0.3/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24069 +++ linux-3.0.3/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24070 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24071 0xCA, 0x34, 0x2B, 0x2E};
24072 struct scatterlist sg;
24073
24074 + pax_track_stack();
24075 +
24076 memset(src, 0, sizeof(src));
24077 memset(ctx.key, 0, sizeof(ctx.key));
24078
24079 diff -urNp linux-3.0.3/drivers/crypto/padlock-aes.c linux-3.0.3/drivers/crypto/padlock-aes.c
24080 --- linux-3.0.3/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24081 +++ linux-3.0.3/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24082 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24083 struct crypto_aes_ctx gen_aes;
24084 int cpu;
24085
24086 + pax_track_stack();
24087 +
24088 if (key_len % 8) {
24089 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24090 return -EINVAL;
24091 diff -urNp linux-3.0.3/drivers/edac/edac_pci_sysfs.c linux-3.0.3/drivers/edac/edac_pci_sysfs.c
24092 --- linux-3.0.3/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24093 +++ linux-3.0.3/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24094 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24095 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24096 static int edac_pci_poll_msec = 1000; /* one second workq period */
24097
24098 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24099 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24100 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24101 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24102
24103 static struct kobject *edac_pci_top_main_kobj;
24104 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24105 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24106 edac_printk(KERN_CRIT, EDAC_PCI,
24107 "Signaled System Error on %s\n",
24108 pci_name(dev));
24109 - atomic_inc(&pci_nonparity_count);
24110 + atomic_inc_unchecked(&pci_nonparity_count);
24111 }
24112
24113 if (status & (PCI_STATUS_PARITY)) {
24114 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24115 "Master Data Parity Error on %s\n",
24116 pci_name(dev));
24117
24118 - atomic_inc(&pci_parity_count);
24119 + atomic_inc_unchecked(&pci_parity_count);
24120 }
24121
24122 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24123 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24124 "Detected Parity Error on %s\n",
24125 pci_name(dev));
24126
24127 - atomic_inc(&pci_parity_count);
24128 + atomic_inc_unchecked(&pci_parity_count);
24129 }
24130 }
24131
24132 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24133 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24134 "Signaled System Error on %s\n",
24135 pci_name(dev));
24136 - atomic_inc(&pci_nonparity_count);
24137 + atomic_inc_unchecked(&pci_nonparity_count);
24138 }
24139
24140 if (status & (PCI_STATUS_PARITY)) {
24141 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24142 "Master Data Parity Error on "
24143 "%s\n", pci_name(dev));
24144
24145 - atomic_inc(&pci_parity_count);
24146 + atomic_inc_unchecked(&pci_parity_count);
24147 }
24148
24149 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24150 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24151 "Detected Parity Error on %s\n",
24152 pci_name(dev));
24153
24154 - atomic_inc(&pci_parity_count);
24155 + atomic_inc_unchecked(&pci_parity_count);
24156 }
24157 }
24158 }
24159 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24160 if (!check_pci_errors)
24161 return;
24162
24163 - before_count = atomic_read(&pci_parity_count);
24164 + before_count = atomic_read_unchecked(&pci_parity_count);
24165
24166 /* scan all PCI devices looking for a Parity Error on devices and
24167 * bridges.
24168 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24169 /* Only if operator has selected panic on PCI Error */
24170 if (edac_pci_get_panic_on_pe()) {
24171 /* If the count is different 'after' from 'before' */
24172 - if (before_count != atomic_read(&pci_parity_count))
24173 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24174 panic("EDAC: PCI Parity Error");
24175 }
24176 }
24177 diff -urNp linux-3.0.3/drivers/edac/i7core_edac.c linux-3.0.3/drivers/edac/i7core_edac.c
24178 --- linux-3.0.3/drivers/edac/i7core_edac.c 2011-07-21 22:17:23.000000000 -0400
24179 +++ linux-3.0.3/drivers/edac/i7core_edac.c 2011-08-23 21:47:55.000000000 -0400
24180 @@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24181 char *type, *optype, *err, *msg;
24182 unsigned long error = m->status & 0x1ff0000l;
24183 u32 optypenum = (m->status >> 4) & 0x07;
24184 - u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24185 + u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24186 u32 dimm = (m->misc >> 16) & 0x3;
24187 u32 channel = (m->misc >> 18) & 0x3;
24188 u32 syndrome = m->misc >> 32;
24189 diff -urNp linux-3.0.3/drivers/edac/mce_amd.h linux-3.0.3/drivers/edac/mce_amd.h
24190 --- linux-3.0.3/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24191 +++ linux-3.0.3/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24192 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24193 bool (*dc_mce)(u16, u8);
24194 bool (*ic_mce)(u16, u8);
24195 bool (*nb_mce)(u16, u8);
24196 -};
24197 +} __no_const;
24198
24199 void amd_report_gart_errors(bool);
24200 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24201 diff -urNp linux-3.0.3/drivers/firewire/core-card.c linux-3.0.3/drivers/firewire/core-card.c
24202 --- linux-3.0.3/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24203 +++ linux-3.0.3/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24204 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24205
24206 void fw_core_remove_card(struct fw_card *card)
24207 {
24208 - struct fw_card_driver dummy_driver = dummy_driver_template;
24209 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24210
24211 card->driver->update_phy_reg(card, 4,
24212 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24213 diff -urNp linux-3.0.3/drivers/firewire/core-cdev.c linux-3.0.3/drivers/firewire/core-cdev.c
24214 --- linux-3.0.3/drivers/firewire/core-cdev.c 2011-08-23 21:44:40.000000000 -0400
24215 +++ linux-3.0.3/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24216 @@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24217 int ret;
24218
24219 if ((request->channels == 0 && request->bandwidth == 0) ||
24220 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24221 - request->bandwidth < 0)
24222 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24223 return -EINVAL;
24224
24225 r = kmalloc(sizeof(*r), GFP_KERNEL);
24226 diff -urNp linux-3.0.3/drivers/firewire/core.h linux-3.0.3/drivers/firewire/core.h
24227 --- linux-3.0.3/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24228 +++ linux-3.0.3/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24229 @@ -101,6 +101,7 @@ struct fw_card_driver {
24230
24231 int (*stop_iso)(struct fw_iso_context *ctx);
24232 };
24233 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24234
24235 void fw_card_initialize(struct fw_card *card,
24236 const struct fw_card_driver *driver, struct device *device);
24237 diff -urNp linux-3.0.3/drivers/firewire/core-transaction.c linux-3.0.3/drivers/firewire/core-transaction.c
24238 --- linux-3.0.3/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24239 +++ linux-3.0.3/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24240 @@ -37,6 +37,7 @@
24241 #include <linux/timer.h>
24242 #include <linux/types.h>
24243 #include <linux/workqueue.h>
24244 +#include <linux/sched.h>
24245
24246 #include <asm/byteorder.h>
24247
24248 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24249 struct transaction_callback_data d;
24250 struct fw_transaction t;
24251
24252 + pax_track_stack();
24253 +
24254 init_timer_on_stack(&t.split_timeout_timer);
24255 init_completion(&d.done);
24256 d.payload = payload;
24257 diff -urNp linux-3.0.3/drivers/firmware/dmi_scan.c linux-3.0.3/drivers/firmware/dmi_scan.c
24258 --- linux-3.0.3/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24259 +++ linux-3.0.3/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24260 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24261 }
24262 }
24263 else {
24264 - /*
24265 - * no iounmap() for that ioremap(); it would be a no-op, but
24266 - * it's so early in setup that sucker gets confused into doing
24267 - * what it shouldn't if we actually call it.
24268 - */
24269 p = dmi_ioremap(0xF0000, 0x10000);
24270 if (p == NULL)
24271 goto error;
24272 diff -urNp linux-3.0.3/drivers/gpio/vr41xx_giu.c linux-3.0.3/drivers/gpio/vr41xx_giu.c
24273 --- linux-3.0.3/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24274 +++ linux-3.0.3/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24275 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24276 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24277 maskl, pendl, maskh, pendh);
24278
24279 - atomic_inc(&irq_err_count);
24280 + atomic_inc_unchecked(&irq_err_count);
24281
24282 return -EINVAL;
24283 }
24284 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c
24285 --- linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24286 +++ linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24287 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24288 struct drm_crtc *tmp;
24289 int crtc_mask = 1;
24290
24291 - WARN(!crtc, "checking null crtc?\n");
24292 + BUG_ON(!crtc);
24293
24294 dev = crtc->dev;
24295
24296 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24297 struct drm_encoder *encoder;
24298 bool ret = true;
24299
24300 + pax_track_stack();
24301 +
24302 crtc->enabled = drm_helper_crtc_in_use(crtc);
24303 if (!crtc->enabled)
24304 return true;
24305 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_drv.c linux-3.0.3/drivers/gpu/drm/drm_drv.c
24306 --- linux-3.0.3/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24307 +++ linux-3.0.3/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24308 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24309
24310 dev = file_priv->minor->dev;
24311 atomic_inc(&dev->ioctl_count);
24312 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24313 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24314 ++file_priv->ioctl_count;
24315
24316 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24317 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_fops.c linux-3.0.3/drivers/gpu/drm/drm_fops.c
24318 --- linux-3.0.3/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24319 +++ linux-3.0.3/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24320 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24321 }
24322
24323 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24324 - atomic_set(&dev->counts[i], 0);
24325 + atomic_set_unchecked(&dev->counts[i], 0);
24326
24327 dev->sigdata.lock = NULL;
24328
24329 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24330
24331 retcode = drm_open_helper(inode, filp, dev);
24332 if (!retcode) {
24333 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24334 - if (!dev->open_count++)
24335 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24336 + if (local_inc_return(&dev->open_count) == 1)
24337 retcode = drm_setup(dev);
24338 }
24339 if (!retcode) {
24340 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24341
24342 mutex_lock(&drm_global_mutex);
24343
24344 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24345 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24346
24347 if (dev->driver->preclose)
24348 dev->driver->preclose(dev, file_priv);
24349 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24350 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24351 task_pid_nr(current),
24352 (long)old_encode_dev(file_priv->minor->device),
24353 - dev->open_count);
24354 + local_read(&dev->open_count));
24355
24356 /* if the master has gone away we can't do anything with the lock */
24357 if (file_priv->minor->master)
24358 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24359 * End inline drm_release
24360 */
24361
24362 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24363 - if (!--dev->open_count) {
24364 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24365 + if (local_dec_and_test(&dev->open_count)) {
24366 if (atomic_read(&dev->ioctl_count)) {
24367 DRM_ERROR("Device busy: %d\n",
24368 atomic_read(&dev->ioctl_count));
24369 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_global.c linux-3.0.3/drivers/gpu/drm/drm_global.c
24370 --- linux-3.0.3/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24371 +++ linux-3.0.3/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24372 @@ -36,7 +36,7 @@
24373 struct drm_global_item {
24374 struct mutex mutex;
24375 void *object;
24376 - int refcount;
24377 + atomic_t refcount;
24378 };
24379
24380 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24381 @@ -49,7 +49,7 @@ void drm_global_init(void)
24382 struct drm_global_item *item = &glob[i];
24383 mutex_init(&item->mutex);
24384 item->object = NULL;
24385 - item->refcount = 0;
24386 + atomic_set(&item->refcount, 0);
24387 }
24388 }
24389
24390 @@ -59,7 +59,7 @@ void drm_global_release(void)
24391 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24392 struct drm_global_item *item = &glob[i];
24393 BUG_ON(item->object != NULL);
24394 - BUG_ON(item->refcount != 0);
24395 + BUG_ON(atomic_read(&item->refcount) != 0);
24396 }
24397 }
24398
24399 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24400 void *object;
24401
24402 mutex_lock(&item->mutex);
24403 - if (item->refcount == 0) {
24404 + if (atomic_read(&item->refcount) == 0) {
24405 item->object = kzalloc(ref->size, GFP_KERNEL);
24406 if (unlikely(item->object == NULL)) {
24407 ret = -ENOMEM;
24408 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24409 goto out_err;
24410
24411 }
24412 - ++item->refcount;
24413 + atomic_inc(&item->refcount);
24414 ref->object = item->object;
24415 object = item->object;
24416 mutex_unlock(&item->mutex);
24417 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24418 struct drm_global_item *item = &glob[ref->global_type];
24419
24420 mutex_lock(&item->mutex);
24421 - BUG_ON(item->refcount == 0);
24422 + BUG_ON(atomic_read(&item->refcount) == 0);
24423 BUG_ON(ref->object != item->object);
24424 - if (--item->refcount == 0) {
24425 + if (atomic_dec_and_test(&item->refcount)) {
24426 ref->release(ref);
24427 item->object = NULL;
24428 }
24429 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_info.c linux-3.0.3/drivers/gpu/drm/drm_info.c
24430 --- linux-3.0.3/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24431 +++ linux-3.0.3/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24432 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24433 struct drm_local_map *map;
24434 struct drm_map_list *r_list;
24435
24436 - /* Hardcoded from _DRM_FRAME_BUFFER,
24437 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24438 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24439 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24440 + static const char * const types[] = {
24441 + [_DRM_FRAME_BUFFER] = "FB",
24442 + [_DRM_REGISTERS] = "REG",
24443 + [_DRM_SHM] = "SHM",
24444 + [_DRM_AGP] = "AGP",
24445 + [_DRM_SCATTER_GATHER] = "SG",
24446 + [_DRM_CONSISTENT] = "PCI",
24447 + [_DRM_GEM] = "GEM" };
24448 const char *type;
24449 int i;
24450
24451 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24452 map = r_list->map;
24453 if (!map)
24454 continue;
24455 - if (map->type < 0 || map->type > 5)
24456 + if (map->type >= ARRAY_SIZE(types))
24457 type = "??";
24458 else
24459 type = types[map->type];
24460 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24461 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24462 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24463 vma->vm_flags & VM_IO ? 'i' : '-',
24464 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24465 + 0);
24466 +#else
24467 vma->vm_pgoff);
24468 +#endif
24469
24470 #if defined(__i386__)
24471 pgprot = pgprot_val(vma->vm_page_prot);
24472 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_ioctl.c linux-3.0.3/drivers/gpu/drm/drm_ioctl.c
24473 --- linux-3.0.3/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
24474 +++ linux-3.0.3/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
24475 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24476 stats->data[i].value =
24477 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24478 else
24479 - stats->data[i].value = atomic_read(&dev->counts[i]);
24480 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24481 stats->data[i].type = dev->types[i];
24482 }
24483
24484 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_lock.c linux-3.0.3/drivers/gpu/drm/drm_lock.c
24485 --- linux-3.0.3/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
24486 +++ linux-3.0.3/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
24487 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24488 if (drm_lock_take(&master->lock, lock->context)) {
24489 master->lock.file_priv = file_priv;
24490 master->lock.lock_time = jiffies;
24491 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24492 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24493 break; /* Got lock */
24494 }
24495
24496 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24497 return -EINVAL;
24498 }
24499
24500 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24501 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24502
24503 if (drm_lock_free(&master->lock, lock->context)) {
24504 /* FIXME: Should really bail out here. */
24505 diff -urNp linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c
24506 --- linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
24507 +++ linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
24508 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24509 dma->buflist[vertex->idx],
24510 vertex->discard, vertex->used);
24511
24512 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24513 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24514 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24515 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24516 sarea_priv->last_enqueue = dev_priv->counter - 1;
24517 sarea_priv->last_dispatch = (int)hw_status[5];
24518
24519 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24520 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24521 mc->last_render);
24522
24523 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24524 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24525 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24526 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24527 sarea_priv->last_enqueue = dev_priv->counter - 1;
24528 sarea_priv->last_dispatch = (int)hw_status[5];
24529
24530 diff -urNp linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h
24531 --- linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
24532 +++ linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
24533 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24534 int page_flipping;
24535
24536 wait_queue_head_t irq_queue;
24537 - atomic_t irq_received;
24538 - atomic_t irq_emitted;
24539 + atomic_unchecked_t irq_received;
24540 + atomic_unchecked_t irq_emitted;
24541
24542 int front_offset;
24543 } drm_i810_private_t;
24544 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c
24545 --- linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
24546 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
24547 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
24548 I915_READ(GTIMR));
24549 }
24550 seq_printf(m, "Interrupts received: %d\n",
24551 - atomic_read(&dev_priv->irq_received));
24552 + atomic_read_unchecked(&dev_priv->irq_received));
24553 for (i = 0; i < I915_NUM_RINGS; i++) {
24554 if (IS_GEN6(dev)) {
24555 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24556 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c
24557 --- linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:44:40.000000000 -0400
24558 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
24559 @@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
24560 bool can_switch;
24561
24562 spin_lock(&dev->count_lock);
24563 - can_switch = (dev->open_count == 0);
24564 + can_switch = (local_read(&dev->open_count) == 0);
24565 spin_unlock(&dev->count_lock);
24566 return can_switch;
24567 }
24568 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h
24569 --- linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
24570 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
24571 @@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
24572 /* render clock increase/decrease */
24573 /* display clock increase/decrease */
24574 /* pll clock increase/decrease */
24575 -};
24576 +} __no_const;
24577
24578 struct intel_device_info {
24579 u8 gen;
24580 @@ -300,7 +300,7 @@ typedef struct drm_i915_private {
24581 int current_page;
24582 int page_flipping;
24583
24584 - atomic_t irq_received;
24585 + atomic_unchecked_t irq_received;
24586
24587 /* protects the irq masks */
24588 spinlock_t irq_lock;
24589 @@ -874,7 +874,7 @@ struct drm_i915_gem_object {
24590 * will be page flipped away on the next vblank. When it
24591 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24592 */
24593 - atomic_t pending_flip;
24594 + atomic_unchecked_t pending_flip;
24595 };
24596
24597 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24598 @@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
24599 extern void intel_teardown_gmbus(struct drm_device *dev);
24600 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24601 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24602 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24603 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24604 {
24605 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24606 }
24607 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24608 --- linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
24609 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
24610 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
24611 i915_gem_clflush_object(obj);
24612
24613 if (obj->base.pending_write_domain)
24614 - cd->flips |= atomic_read(&obj->pending_flip);
24615 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24616
24617 /* The actual obj->write_domain will be updated with
24618 * pending_write_domain after we emit the accumulated flush for all
24619 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c
24620 --- linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:44:40.000000000 -0400
24621 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
24622 @@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
24623 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
24624 struct drm_i915_master_private *master_priv;
24625
24626 - atomic_inc(&dev_priv->irq_received);
24627 + atomic_inc_unchecked(&dev_priv->irq_received);
24628
24629 /* disable master interrupt before clearing iir */
24630 de_ier = I915_READ(DEIER);
24631 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
24632 struct drm_i915_master_private *master_priv;
24633 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
24634
24635 - atomic_inc(&dev_priv->irq_received);
24636 + atomic_inc_unchecked(&dev_priv->irq_received);
24637
24638 if (IS_GEN6(dev))
24639 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
24640 @@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
24641 int ret = IRQ_NONE, pipe;
24642 bool blc_event = false;
24643
24644 - atomic_inc(&dev_priv->irq_received);
24645 + atomic_inc_unchecked(&dev_priv->irq_received);
24646
24647 iir = I915_READ(IIR);
24648
24649 @@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
24650 {
24651 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24652
24653 - atomic_set(&dev_priv->irq_received, 0);
24654 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24655
24656 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24657 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24658 @@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
24659 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24660 int pipe;
24661
24662 - atomic_set(&dev_priv->irq_received, 0);
24663 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24664
24665 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24666 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24667 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/intel_display.c linux-3.0.3/drivers/gpu/drm/i915/intel_display.c
24668 --- linux-3.0.3/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:44:40.000000000 -0400
24669 +++ linux-3.0.3/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
24670 @@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24671
24672 wait_event(dev_priv->pending_flip_queue,
24673 atomic_read(&dev_priv->mm.wedged) ||
24674 - atomic_read(&obj->pending_flip) == 0);
24675 + atomic_read_unchecked(&obj->pending_flip) == 0);
24676
24677 /* Big Hammer, we also need to ensure that any pending
24678 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24679 @@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
24680 obj = to_intel_framebuffer(crtc->fb)->obj;
24681 dev_priv = crtc->dev->dev_private;
24682 wait_event(dev_priv->pending_flip_queue,
24683 - atomic_read(&obj->pending_flip) == 0);
24684 + atomic_read_unchecked(&obj->pending_flip) == 0);
24685 }
24686
24687 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24688 @@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
24689
24690 atomic_clear_mask(1 << intel_crtc->plane,
24691 &obj->pending_flip.counter);
24692 - if (atomic_read(&obj->pending_flip) == 0)
24693 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
24694 wake_up(&dev_priv->pending_flip_queue);
24695
24696 schedule_work(&work->work);
24697 @@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
24698 /* Block clients from rendering to the new back buffer until
24699 * the flip occurs and the object is no longer visible.
24700 */
24701 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24702 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24703
24704 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
24705 if (ret)
24706 @@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
24707 return 0;
24708
24709 cleanup_pending:
24710 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24711 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24712 cleanup_objs:
24713 drm_gem_object_unreference(&work->old_fb_obj->base);
24714 drm_gem_object_unreference(&obj->base);
24715 diff -urNp linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h
24716 --- linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
24717 +++ linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
24718 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24719 u32 clear_cmd;
24720 u32 maccess;
24721
24722 - atomic_t vbl_received; /**< Number of vblanks received. */
24723 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24724 wait_queue_head_t fence_queue;
24725 - atomic_t last_fence_retired;
24726 + atomic_unchecked_t last_fence_retired;
24727 u32 next_fence_to_post;
24728
24729 unsigned int fb_cpp;
24730 diff -urNp linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c
24731 --- linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
24732 +++ linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
24733 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24734 if (crtc != 0)
24735 return 0;
24736
24737 - return atomic_read(&dev_priv->vbl_received);
24738 + return atomic_read_unchecked(&dev_priv->vbl_received);
24739 }
24740
24741
24742 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24743 /* VBLANK interrupt */
24744 if (status & MGA_VLINEPEN) {
24745 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24746 - atomic_inc(&dev_priv->vbl_received);
24747 + atomic_inc_unchecked(&dev_priv->vbl_received);
24748 drm_handle_vblank(dev, 0);
24749 handled = 1;
24750 }
24751 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24752 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24753 MGA_WRITE(MGA_PRIMEND, prim_end);
24754
24755 - atomic_inc(&dev_priv->last_fence_retired);
24756 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
24757 DRM_WAKEUP(&dev_priv->fence_queue);
24758 handled = 1;
24759 }
24760 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24761 * using fences.
24762 */
24763 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24764 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24765 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24766 - *sequence) <= (1 << 23)));
24767
24768 *sequence = cur_fence;
24769 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c
24770 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
24771 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
24772 @@ -200,7 +200,7 @@ struct methods {
24773 const char desc[8];
24774 void (*loadbios)(struct drm_device *, uint8_t *);
24775 const bool rw;
24776 -};
24777 +} __do_const;
24778
24779 static struct methods shadow_methods[] = {
24780 { "PRAMIN", load_vbios_pramin, true },
24781 @@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
24782 struct bit_table {
24783 const char id;
24784 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
24785 -};
24786 +} __no_const;
24787
24788 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
24789
24790 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h
24791 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
24792 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
24793 @@ -227,7 +227,7 @@ struct nouveau_channel {
24794 struct list_head pending;
24795 uint32_t sequence;
24796 uint32_t sequence_ack;
24797 - atomic_t last_sequence_irq;
24798 + atomic_unchecked_t last_sequence_irq;
24799 } fence;
24800
24801 /* DMA push buffer */
24802 @@ -304,7 +304,7 @@ struct nouveau_exec_engine {
24803 u32 handle, u16 class);
24804 void (*set_tile_region)(struct drm_device *dev, int i);
24805 void (*tlb_flush)(struct drm_device *, int engine);
24806 -};
24807 +} __no_const;
24808
24809 struct nouveau_instmem_engine {
24810 void *priv;
24811 @@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
24812 struct nouveau_mc_engine {
24813 int (*init)(struct drm_device *dev);
24814 void (*takedown)(struct drm_device *dev);
24815 -};
24816 +} __no_const;
24817
24818 struct nouveau_timer_engine {
24819 int (*init)(struct drm_device *dev);
24820 void (*takedown)(struct drm_device *dev);
24821 uint64_t (*read)(struct drm_device *dev);
24822 -};
24823 +} __no_const;
24824
24825 struct nouveau_fb_engine {
24826 int num_tiles;
24827 @@ -494,7 +494,7 @@ struct nouveau_vram_engine {
24828 void (*put)(struct drm_device *, struct nouveau_mem **);
24829
24830 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24831 -};
24832 +} __no_const;
24833
24834 struct nouveau_engine {
24835 struct nouveau_instmem_engine instmem;
24836 @@ -640,7 +640,7 @@ struct drm_nouveau_private {
24837 struct drm_global_reference mem_global_ref;
24838 struct ttm_bo_global_ref bo_global_ref;
24839 struct ttm_bo_device bdev;
24840 - atomic_t validate_sequence;
24841 + atomic_unchecked_t validate_sequence;
24842 } ttm;
24843
24844 struct {
24845 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c
24846 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
24847 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
24848 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24849 if (USE_REFCNT(dev))
24850 sequence = nvchan_rd32(chan, 0x48);
24851 else
24852 - sequence = atomic_read(&chan->fence.last_sequence_irq);
24853 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24854
24855 if (chan->fence.sequence_ack == sequence)
24856 goto out;
24857 @@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
24858
24859 INIT_LIST_HEAD(&chan->fence.pending);
24860 spin_lock_init(&chan->fence.lock);
24861 - atomic_set(&chan->fence.last_sequence_irq, 0);
24862 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24863 return 0;
24864 }
24865
24866 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c
24867 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
24868 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
24869 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24870 int trycnt = 0;
24871 int ret, i;
24872
24873 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24874 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24875 retry:
24876 if (++trycnt > 100000) {
24877 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24878 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c
24879 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
24880 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
24881 @@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
24882 bool can_switch;
24883
24884 spin_lock(&dev->count_lock);
24885 - can_switch = (dev->open_count == 0);
24886 + can_switch = (local_read(&dev->open_count) == 0);
24887 spin_unlock(&dev->count_lock);
24888 return can_switch;
24889 }
24890 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c
24891 --- linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
24892 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
24893 @@ -560,7 +560,7 @@ static int
24894 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24895 u32 class, u32 mthd, u32 data)
24896 {
24897 - atomic_set(&chan->fence.last_sequence_irq, data);
24898 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24899 return 0;
24900 }
24901
24902 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c
24903 --- linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
24904 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
24905 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24906
24907 /* GH: Simple idle check.
24908 */
24909 - atomic_set(&dev_priv->idle_count, 0);
24910 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24911
24912 /* We don't support anything other than bus-mastering ring mode,
24913 * but the ring can be in either AGP or PCI space for the ring
24914 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h
24915 --- linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
24916 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
24917 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24918 int is_pci;
24919 unsigned long cce_buffers_offset;
24920
24921 - atomic_t idle_count;
24922 + atomic_unchecked_t idle_count;
24923
24924 int page_flipping;
24925 int current_page;
24926 u32 crtc_offset;
24927 u32 crtc_offset_cntl;
24928
24929 - atomic_t vbl_received;
24930 + atomic_unchecked_t vbl_received;
24931
24932 u32 color_fmt;
24933 unsigned int front_offset;
24934 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c
24935 --- linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
24936 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
24937 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24938 if (crtc != 0)
24939 return 0;
24940
24941 - return atomic_read(&dev_priv->vbl_received);
24942 + return atomic_read_unchecked(&dev_priv->vbl_received);
24943 }
24944
24945 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24946 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24947 /* VBLANK interrupt */
24948 if (status & R128_CRTC_VBLANK_INT) {
24949 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24950 - atomic_inc(&dev_priv->vbl_received);
24951 + atomic_inc_unchecked(&dev_priv->vbl_received);
24952 drm_handle_vblank(dev, 0);
24953 return IRQ_HANDLED;
24954 }
24955 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_state.c linux-3.0.3/drivers/gpu/drm/r128/r128_state.c
24956 --- linux-3.0.3/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
24957 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
24958 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
24959
24960 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
24961 {
24962 - if (atomic_read(&dev_priv->idle_count) == 0)
24963 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
24964 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
24965 else
24966 - atomic_set(&dev_priv->idle_count, 0);
24967 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24968 }
24969
24970 #endif
24971 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/atom.c linux-3.0.3/drivers/gpu/drm/radeon/atom.c
24972 --- linux-3.0.3/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
24973 +++ linux-3.0.3/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
24974 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
24975 char name[512];
24976 int i;
24977
24978 + pax_track_stack();
24979 +
24980 ctx->card = card;
24981 ctx->bios = bios;
24982
24983 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c
24984 --- linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
24985 +++ linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
24986 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
24987 regex_t mask_rex;
24988 regmatch_t match[4];
24989 char buf[1024];
24990 - size_t end;
24991 + long end;
24992 int len;
24993 int done = 0;
24994 int r;
24995 unsigned o;
24996 struct offset *offset;
24997 char last_reg_s[10];
24998 - int last_reg;
24999 + unsigned long last_reg;
25000
25001 if (regcomp
25002 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
25003 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c
25004 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
25005 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
25006 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
25007 struct radeon_gpio_rec gpio;
25008 struct radeon_hpd hpd;
25009
25010 + pax_track_stack();
25011 +
25012 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25013 return false;
25014
25015 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c
25016 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:44:40.000000000 -0400
25017 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
25018 @@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
25019 bool can_switch;
25020
25021 spin_lock(&dev->count_lock);
25022 - can_switch = (dev->open_count == 0);
25023 + can_switch = (local_read(&dev->open_count) == 0);
25024 spin_unlock(&dev->count_lock);
25025 return can_switch;
25026 }
25027 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c
25028 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:44:40.000000000 -0400
25029 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
25030 @@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
25031 uint32_t post_div;
25032 u32 pll_out_min, pll_out_max;
25033
25034 + pax_track_stack();
25035 +
25036 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25037 freq = freq * 1000;
25038
25039 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h
25040 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
25041 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
25042 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25043
25044 /* SW interrupt */
25045 wait_queue_head_t swi_queue;
25046 - atomic_t swi_emitted;
25047 + atomic_unchecked_t swi_emitted;
25048 int vblank_crtc;
25049 uint32_t irq_enable_reg;
25050 uint32_t r500_disp_irq_reg;
25051 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c
25052 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
25053 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
25054 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25055 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25056 return 0;
25057 }
25058 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25059 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25060 if (!rdev->cp.ready)
25061 /* FIXME: cp is not running assume everythings is done right
25062 * away
25063 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25064 return r;
25065 }
25066 radeon_fence_write(rdev, 0);
25067 - atomic_set(&rdev->fence_drv.seq, 0);
25068 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25069 INIT_LIST_HEAD(&rdev->fence_drv.created);
25070 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25071 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25072 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon.h linux-3.0.3/drivers/gpu/drm/radeon/radeon.h
25073 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25074 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25075 @@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25076 */
25077 struct radeon_fence_driver {
25078 uint32_t scratch_reg;
25079 - atomic_t seq;
25080 + atomic_unchecked_t seq;
25081 uint32_t last_seq;
25082 unsigned long last_jiffies;
25083 unsigned long last_timeout;
25084 @@ -960,7 +960,7 @@ struct radeon_asic {
25085 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25086 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25087 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25088 -};
25089 +} __no_const;
25090
25091 /*
25092 * Asic structures
25093 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c
25094 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25095 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
25096 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25097 request = compat_alloc_user_space(sizeof(*request));
25098 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25099 || __put_user(req32.param, &request->param)
25100 - || __put_user((void __user *)(unsigned long)req32.value,
25101 + || __put_user((unsigned long)req32.value,
25102 &request->value))
25103 return -EFAULT;
25104
25105 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c
25106 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
25107 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
25108 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25109 unsigned int ret;
25110 RING_LOCALS;
25111
25112 - atomic_inc(&dev_priv->swi_emitted);
25113 - ret = atomic_read(&dev_priv->swi_emitted);
25114 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25115 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25116
25117 BEGIN_RING(4);
25118 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25119 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25120 drm_radeon_private_t *dev_priv =
25121 (drm_radeon_private_t *) dev->dev_private;
25122
25123 - atomic_set(&dev_priv->swi_emitted, 0);
25124 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25125 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25126
25127 dev->max_vblank_count = 0x001fffff;
25128 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c
25129 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25130 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25131 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25132 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25133 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25134
25135 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25136 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25137 sarea_priv->nbox * sizeof(depth_boxes[0])))
25138 return -EFAULT;
25139
25140 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25141 {
25142 drm_radeon_private_t *dev_priv = dev->dev_private;
25143 drm_radeon_getparam_t *param = data;
25144 - int value;
25145 + int value = 0;
25146
25147 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25148
25149 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c
25150 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25151 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25152 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25153 }
25154 if (unlikely(ttm_vm_ops == NULL)) {
25155 ttm_vm_ops = vma->vm_ops;
25156 - radeon_ttm_vm_ops = *ttm_vm_ops;
25157 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25158 + pax_open_kernel();
25159 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25160 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25161 + pax_close_kernel();
25162 }
25163 vma->vm_ops = &radeon_ttm_vm_ops;
25164 return 0;
25165 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/rs690.c linux-3.0.3/drivers/gpu/drm/radeon/rs690.c
25166 --- linux-3.0.3/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25167 +++ linux-3.0.3/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25168 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25169 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25170 rdev->pm.sideport_bandwidth.full)
25171 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25172 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25173 + read_delay_latency.full = dfixed_const(800 * 1000);
25174 read_delay_latency.full = dfixed_div(read_delay_latency,
25175 rdev->pm.igp_sideport_mclk);
25176 + a.full = dfixed_const(370);
25177 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25178 } else {
25179 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25180 rdev->pm.k8_bandwidth.full)
25181 diff -urNp linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c
25182 --- linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25183 +++ linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25184 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25185 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25186 struct shrink_control *sc)
25187 {
25188 - static atomic_t start_pool = ATOMIC_INIT(0);
25189 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25190 unsigned i;
25191 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25192 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25193 struct ttm_page_pool *pool;
25194 int shrink_pages = sc->nr_to_scan;
25195
25196 diff -urNp linux-3.0.3/drivers/gpu/drm/via/via_drv.h linux-3.0.3/drivers/gpu/drm/via/via_drv.h
25197 --- linux-3.0.3/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25198 +++ linux-3.0.3/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25199 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25200 typedef uint32_t maskarray_t[5];
25201
25202 typedef struct drm_via_irq {
25203 - atomic_t irq_received;
25204 + atomic_unchecked_t irq_received;
25205 uint32_t pending_mask;
25206 uint32_t enable_mask;
25207 wait_queue_head_t irq_queue;
25208 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25209 struct timeval last_vblank;
25210 int last_vblank_valid;
25211 unsigned usec_per_vblank;
25212 - atomic_t vbl_received;
25213 + atomic_unchecked_t vbl_received;
25214 drm_via_state_t hc_state;
25215 char pci_buf[VIA_PCI_BUF_SIZE];
25216 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25217 diff -urNp linux-3.0.3/drivers/gpu/drm/via/via_irq.c linux-3.0.3/drivers/gpu/drm/via/via_irq.c
25218 --- linux-3.0.3/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25219 +++ linux-3.0.3/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25220 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25221 if (crtc != 0)
25222 return 0;
25223
25224 - return atomic_read(&dev_priv->vbl_received);
25225 + return atomic_read_unchecked(&dev_priv->vbl_received);
25226 }
25227
25228 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25229 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25230
25231 status = VIA_READ(VIA_REG_INTERRUPT);
25232 if (status & VIA_IRQ_VBLANK_PENDING) {
25233 - atomic_inc(&dev_priv->vbl_received);
25234 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25235 + atomic_inc_unchecked(&dev_priv->vbl_received);
25236 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25237 do_gettimeofday(&cur_vblank);
25238 if (dev_priv->last_vblank_valid) {
25239 dev_priv->usec_per_vblank =
25240 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25241 dev_priv->last_vblank = cur_vblank;
25242 dev_priv->last_vblank_valid = 1;
25243 }
25244 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25245 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25246 DRM_DEBUG("US per vblank is: %u\n",
25247 dev_priv->usec_per_vblank);
25248 }
25249 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25250
25251 for (i = 0; i < dev_priv->num_irqs; ++i) {
25252 if (status & cur_irq->pending_mask) {
25253 - atomic_inc(&cur_irq->irq_received);
25254 + atomic_inc_unchecked(&cur_irq->irq_received);
25255 DRM_WAKEUP(&cur_irq->irq_queue);
25256 handled = 1;
25257 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25258 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25259 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25260 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25261 masks[irq][4]));
25262 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25263 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25264 } else {
25265 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25266 (((cur_irq_sequence =
25267 - atomic_read(&cur_irq->irq_received)) -
25268 + atomic_read_unchecked(&cur_irq->irq_received)) -
25269 *sequence) <= (1 << 23)));
25270 }
25271 *sequence = cur_irq_sequence;
25272 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25273 }
25274
25275 for (i = 0; i < dev_priv->num_irqs; ++i) {
25276 - atomic_set(&cur_irq->irq_received, 0);
25277 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25278 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25279 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25280 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25281 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25282 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25283 case VIA_IRQ_RELATIVE:
25284 irqwait->request.sequence +=
25285 - atomic_read(&cur_irq->irq_received);
25286 + atomic_read_unchecked(&cur_irq->irq_received);
25287 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25288 case VIA_IRQ_ABSOLUTE:
25289 break;
25290 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25291 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25292 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25293 @@ -240,7 +240,7 @@ struct vmw_private {
25294 * Fencing and IRQs.
25295 */
25296
25297 - atomic_t fence_seq;
25298 + atomic_unchecked_t fence_seq;
25299 wait_queue_head_t fence_queue;
25300 wait_queue_head_t fifo_queue;
25301 atomic_t fence_queue_waiters;
25302 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25303 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25304 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25305 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25306 while (!vmw_lag_lt(queue, us)) {
25307 spin_lock(&queue->lock);
25308 if (list_empty(&queue->head))
25309 - sequence = atomic_read(&dev_priv->fence_seq);
25310 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25311 else {
25312 fence = list_first_entry(&queue->head,
25313 struct vmw_fence, head);
25314 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25315 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25316 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25317 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25318 (unsigned int) min,
25319 (unsigned int) fifo->capabilities);
25320
25321 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25322 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25323 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25324 vmw_fence_queue_init(&fifo->fence_queue);
25325 return vmw_fifo_send_fence(dev_priv, &dummy);
25326 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25327
25328 fm = vmw_fifo_reserve(dev_priv, bytes);
25329 if (unlikely(fm == NULL)) {
25330 - *sequence = atomic_read(&dev_priv->fence_seq);
25331 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25332 ret = -ENOMEM;
25333 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25334 false, 3*HZ);
25335 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25336 }
25337
25338 do {
25339 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25340 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25341 } while (*sequence == 0);
25342
25343 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25344 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25345 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25346 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25347 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25348 * emitted. Then the fence is stale and signaled.
25349 */
25350
25351 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25352 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25353 > VMW_FENCE_WRAP);
25354
25355 return ret;
25356 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25357
25358 if (fifo_idle)
25359 down_read(&fifo_state->rwsem);
25360 - signal_seq = atomic_read(&dev_priv->fence_seq);
25361 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25362 ret = 0;
25363
25364 for (;;) {
25365 diff -urNp linux-3.0.3/drivers/hid/hid-core.c linux-3.0.3/drivers/hid/hid-core.c
25366 --- linux-3.0.3/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25367 +++ linux-3.0.3/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25368 @@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25369
25370 int hid_add_device(struct hid_device *hdev)
25371 {
25372 - static atomic_t id = ATOMIC_INIT(0);
25373 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25374 int ret;
25375
25376 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25377 @@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25378 /* XXX hack, any other cleaner solution after the driver core
25379 * is converted to allow more than 20 bytes as the device name? */
25380 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25381 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25382 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25383
25384 hid_debug_register(hdev, dev_name(&hdev->dev));
25385 ret = device_add(&hdev->dev);
25386 diff -urNp linux-3.0.3/drivers/hid/usbhid/hiddev.c linux-3.0.3/drivers/hid/usbhid/hiddev.c
25387 --- linux-3.0.3/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25388 +++ linux-3.0.3/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25389 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25390 break;
25391
25392 case HIDIOCAPPLICATION:
25393 - if (arg < 0 || arg >= hid->maxapplication)
25394 + if (arg >= hid->maxapplication)
25395 break;
25396
25397 for (i = 0; i < hid->maxcollection; i++)
25398 diff -urNp linux-3.0.3/drivers/hwmon/acpi_power_meter.c linux-3.0.3/drivers/hwmon/acpi_power_meter.c
25399 --- linux-3.0.3/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25400 +++ linux-3.0.3/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25401 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25402 return res;
25403
25404 temp /= 1000;
25405 - if (temp < 0)
25406 - return -EINVAL;
25407
25408 mutex_lock(&resource->lock);
25409 resource->trip[attr->index - 7] = temp;
25410 diff -urNp linux-3.0.3/drivers/hwmon/sht15.c linux-3.0.3/drivers/hwmon/sht15.c
25411 --- linux-3.0.3/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25412 +++ linux-3.0.3/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25413 @@ -166,7 +166,7 @@ struct sht15_data {
25414 int supply_uV;
25415 bool supply_uV_valid;
25416 struct work_struct update_supply_work;
25417 - atomic_t interrupt_handled;
25418 + atomic_unchecked_t interrupt_handled;
25419 };
25420
25421 /**
25422 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25423 return ret;
25424
25425 gpio_direction_input(data->pdata->gpio_data);
25426 - atomic_set(&data->interrupt_handled, 0);
25427 + atomic_set_unchecked(&data->interrupt_handled, 0);
25428
25429 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25430 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25431 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25432 /* Only relevant if the interrupt hasn't occurred. */
25433 - if (!atomic_read(&data->interrupt_handled))
25434 + if (!atomic_read_unchecked(&data->interrupt_handled))
25435 schedule_work(&data->read_work);
25436 }
25437 ret = wait_event_timeout(data->wait_queue,
25438 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25439
25440 /* First disable the interrupt */
25441 disable_irq_nosync(irq);
25442 - atomic_inc(&data->interrupt_handled);
25443 + atomic_inc_unchecked(&data->interrupt_handled);
25444 /* Then schedule a reading work struct */
25445 if (data->state != SHT15_READING_NOTHING)
25446 schedule_work(&data->read_work);
25447 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25448 * If not, then start the interrupt again - care here as could
25449 * have gone low in meantime so verify it hasn't!
25450 */
25451 - atomic_set(&data->interrupt_handled, 0);
25452 + atomic_set_unchecked(&data->interrupt_handled, 0);
25453 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25454 /* If still not occurred or another handler has been scheduled */
25455 if (gpio_get_value(data->pdata->gpio_data)
25456 - || atomic_read(&data->interrupt_handled))
25457 + || atomic_read_unchecked(&data->interrupt_handled))
25458 return;
25459 }
25460
25461 diff -urNp linux-3.0.3/drivers/hwmon/w83791d.c linux-3.0.3/drivers/hwmon/w83791d.c
25462 --- linux-3.0.3/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25463 +++ linux-3.0.3/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25464 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25465 struct i2c_board_info *info);
25466 static int w83791d_remove(struct i2c_client *client);
25467
25468 -static int w83791d_read(struct i2c_client *client, u8 register);
25469 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25470 +static int w83791d_read(struct i2c_client *client, u8 reg);
25471 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25472 static struct w83791d_data *w83791d_update_device(struct device *dev);
25473
25474 #ifdef DEBUG
25475 diff -urNp linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c
25476 --- linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
25477 +++ linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
25478 @@ -43,7 +43,7 @@
25479 extern struct i2c_adapter amd756_smbus;
25480
25481 static struct i2c_adapter *s4882_adapter;
25482 -static struct i2c_algorithm *s4882_algo;
25483 +static i2c_algorithm_no_const *s4882_algo;
25484
25485 /* Wrapper access functions for multiplexed SMBus */
25486 static DEFINE_MUTEX(amd756_lock);
25487 diff -urNp linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c
25488 --- linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
25489 +++ linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
25490 @@ -41,7 +41,7 @@
25491 extern struct i2c_adapter *nforce2_smbus;
25492
25493 static struct i2c_adapter *s4985_adapter;
25494 -static struct i2c_algorithm *s4985_algo;
25495 +static i2c_algorithm_no_const *s4985_algo;
25496
25497 /* Wrapper access functions for multiplexed SMBus */
25498 static DEFINE_MUTEX(nforce2_lock);
25499 diff -urNp linux-3.0.3/drivers/i2c/i2c-mux.c linux-3.0.3/drivers/i2c/i2c-mux.c
25500 --- linux-3.0.3/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
25501 +++ linux-3.0.3/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
25502 @@ -28,7 +28,7 @@
25503 /* multiplexer per channel data */
25504 struct i2c_mux_priv {
25505 struct i2c_adapter adap;
25506 - struct i2c_algorithm algo;
25507 + i2c_algorithm_no_const algo;
25508
25509 struct i2c_adapter *parent;
25510 void *mux_dev; /* the mux chip/device */
25511 diff -urNp linux-3.0.3/drivers/ide/ide-cd.c linux-3.0.3/drivers/ide/ide-cd.c
25512 --- linux-3.0.3/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
25513 +++ linux-3.0.3/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
25514 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25515 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25516 if ((unsigned long)buf & alignment
25517 || blk_rq_bytes(rq) & q->dma_pad_mask
25518 - || object_is_on_stack(buf))
25519 + || object_starts_on_stack(buf))
25520 drive->dma = 0;
25521 }
25522 }
25523 diff -urNp linux-3.0.3/drivers/ide/ide-floppy.c linux-3.0.3/drivers/ide/ide-floppy.c
25524 --- linux-3.0.3/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
25525 +++ linux-3.0.3/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
25526 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25527 u8 pc_buf[256], header_len, desc_cnt;
25528 int i, rc = 1, blocks, length;
25529
25530 + pax_track_stack();
25531 +
25532 ide_debug_log(IDE_DBG_FUNC, "enter");
25533
25534 drive->bios_cyl = 0;
25535 diff -urNp linux-3.0.3/drivers/ide/setup-pci.c linux-3.0.3/drivers/ide/setup-pci.c
25536 --- linux-3.0.3/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
25537 +++ linux-3.0.3/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
25538 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25539 int ret, i, n_ports = dev2 ? 4 : 2;
25540 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25541
25542 + pax_track_stack();
25543 +
25544 for (i = 0; i < n_ports / 2; i++) {
25545 ret = ide_setup_pci_controller(pdev[i], d, !i);
25546 if (ret < 0)
25547 diff -urNp linux-3.0.3/drivers/infiniband/core/cm.c linux-3.0.3/drivers/infiniband/core/cm.c
25548 --- linux-3.0.3/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
25549 +++ linux-3.0.3/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
25550 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25551
25552 struct cm_counter_group {
25553 struct kobject obj;
25554 - atomic_long_t counter[CM_ATTR_COUNT];
25555 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25556 };
25557
25558 struct cm_counter_attribute {
25559 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25560 struct ib_mad_send_buf *msg = NULL;
25561 int ret;
25562
25563 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25564 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25565 counter[CM_REQ_COUNTER]);
25566
25567 /* Quick state check to discard duplicate REQs. */
25568 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25569 if (!cm_id_priv)
25570 return;
25571
25572 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25573 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25574 counter[CM_REP_COUNTER]);
25575 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25576 if (ret)
25577 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25578 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25579 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25580 spin_unlock_irq(&cm_id_priv->lock);
25581 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25582 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25583 counter[CM_RTU_COUNTER]);
25584 goto out;
25585 }
25586 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25587 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25588 dreq_msg->local_comm_id);
25589 if (!cm_id_priv) {
25590 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25591 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25592 counter[CM_DREQ_COUNTER]);
25593 cm_issue_drep(work->port, work->mad_recv_wc);
25594 return -EINVAL;
25595 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25596 case IB_CM_MRA_REP_RCVD:
25597 break;
25598 case IB_CM_TIMEWAIT:
25599 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25600 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25601 counter[CM_DREQ_COUNTER]);
25602 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25603 goto unlock;
25604 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25605 cm_free_msg(msg);
25606 goto deref;
25607 case IB_CM_DREQ_RCVD:
25608 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25609 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25610 counter[CM_DREQ_COUNTER]);
25611 goto unlock;
25612 default:
25613 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25614 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25615 cm_id_priv->msg, timeout)) {
25616 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25617 - atomic_long_inc(&work->port->
25618 + atomic_long_inc_unchecked(&work->port->
25619 counter_group[CM_RECV_DUPLICATES].
25620 counter[CM_MRA_COUNTER]);
25621 goto out;
25622 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25623 break;
25624 case IB_CM_MRA_REQ_RCVD:
25625 case IB_CM_MRA_REP_RCVD:
25626 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25627 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25628 counter[CM_MRA_COUNTER]);
25629 /* fall through */
25630 default:
25631 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25632 case IB_CM_LAP_IDLE:
25633 break;
25634 case IB_CM_MRA_LAP_SENT:
25635 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25636 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25637 counter[CM_LAP_COUNTER]);
25638 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25639 goto unlock;
25640 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25641 cm_free_msg(msg);
25642 goto deref;
25643 case IB_CM_LAP_RCVD:
25644 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25645 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25646 counter[CM_LAP_COUNTER]);
25647 goto unlock;
25648 default:
25649 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25650 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25651 if (cur_cm_id_priv) {
25652 spin_unlock_irq(&cm.lock);
25653 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25654 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25655 counter[CM_SIDR_REQ_COUNTER]);
25656 goto out; /* Duplicate message. */
25657 }
25658 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25659 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25660 msg->retries = 1;
25661
25662 - atomic_long_add(1 + msg->retries,
25663 + atomic_long_add_unchecked(1 + msg->retries,
25664 &port->counter_group[CM_XMIT].counter[attr_index]);
25665 if (msg->retries)
25666 - atomic_long_add(msg->retries,
25667 + atomic_long_add_unchecked(msg->retries,
25668 &port->counter_group[CM_XMIT_RETRIES].
25669 counter[attr_index]);
25670
25671 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25672 }
25673
25674 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25675 - atomic_long_inc(&port->counter_group[CM_RECV].
25676 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25677 counter[attr_id - CM_ATTR_ID_OFFSET]);
25678
25679 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25680 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25681 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25682
25683 return sprintf(buf, "%ld\n",
25684 - atomic_long_read(&group->counter[cm_attr->index]));
25685 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25686 }
25687
25688 static const struct sysfs_ops cm_counter_ops = {
25689 diff -urNp linux-3.0.3/drivers/infiniband/core/fmr_pool.c linux-3.0.3/drivers/infiniband/core/fmr_pool.c
25690 --- linux-3.0.3/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
25691 +++ linux-3.0.3/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
25692 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25693
25694 struct task_struct *thread;
25695
25696 - atomic_t req_ser;
25697 - atomic_t flush_ser;
25698 + atomic_unchecked_t req_ser;
25699 + atomic_unchecked_t flush_ser;
25700
25701 wait_queue_head_t force_wait;
25702 };
25703 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25704 struct ib_fmr_pool *pool = pool_ptr;
25705
25706 do {
25707 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25708 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25709 ib_fmr_batch_release(pool);
25710
25711 - atomic_inc(&pool->flush_ser);
25712 + atomic_inc_unchecked(&pool->flush_ser);
25713 wake_up_interruptible(&pool->force_wait);
25714
25715 if (pool->flush_function)
25716 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25717 }
25718
25719 set_current_state(TASK_INTERRUPTIBLE);
25720 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25721 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25722 !kthread_should_stop())
25723 schedule();
25724 __set_current_state(TASK_RUNNING);
25725 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25726 pool->dirty_watermark = params->dirty_watermark;
25727 pool->dirty_len = 0;
25728 spin_lock_init(&pool->pool_lock);
25729 - atomic_set(&pool->req_ser, 0);
25730 - atomic_set(&pool->flush_ser, 0);
25731 + atomic_set_unchecked(&pool->req_ser, 0);
25732 + atomic_set_unchecked(&pool->flush_ser, 0);
25733 init_waitqueue_head(&pool->force_wait);
25734
25735 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25736 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25737 }
25738 spin_unlock_irq(&pool->pool_lock);
25739
25740 - serial = atomic_inc_return(&pool->req_ser);
25741 + serial = atomic_inc_return_unchecked(&pool->req_ser);
25742 wake_up_process(pool->thread);
25743
25744 if (wait_event_interruptible(pool->force_wait,
25745 - atomic_read(&pool->flush_ser) - serial >= 0))
25746 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25747 return -EINTR;
25748
25749 return 0;
25750 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25751 } else {
25752 list_add_tail(&fmr->list, &pool->dirty_list);
25753 if (++pool->dirty_len >= pool->dirty_watermark) {
25754 - atomic_inc(&pool->req_ser);
25755 + atomic_inc_unchecked(&pool->req_ser);
25756 wake_up_process(pool->thread);
25757 }
25758 }
25759 diff -urNp linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c
25760 --- linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
25761 +++ linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
25762 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25763 int err;
25764 struct fw_ri_tpte tpt;
25765 u32 stag_idx;
25766 - static atomic_t key;
25767 + static atomic_unchecked_t key;
25768
25769 if (c4iw_fatal_error(rdev))
25770 return -EIO;
25771 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25772 &rdev->resource.tpt_fifo_lock);
25773 if (!stag_idx)
25774 return -ENOMEM;
25775 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25776 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25777 }
25778 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25779 __func__, stag_state, type, pdid, stag_idx);
25780 diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c
25781 --- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
25782 +++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
25783 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25784 struct infinipath_counters counters;
25785 struct ipath_devdata *dd;
25786
25787 + pax_track_stack();
25788 +
25789 dd = file->f_path.dentry->d_inode->i_private;
25790 dd->ipath_f_read_counters(dd, &counters);
25791
25792 diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c
25793 --- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
25794 +++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
25795 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25796 struct ib_atomic_eth *ateth;
25797 struct ipath_ack_entry *e;
25798 u64 vaddr;
25799 - atomic64_t *maddr;
25800 + atomic64_unchecked_t *maddr;
25801 u64 sdata;
25802 u32 rkey;
25803 u8 next;
25804 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25805 IB_ACCESS_REMOTE_ATOMIC)))
25806 goto nack_acc_unlck;
25807 /* Perform atomic OP and save result. */
25808 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25809 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25810 sdata = be64_to_cpu(ateth->swap_data);
25811 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25812 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25813 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25814 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25815 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25816 be64_to_cpu(ateth->compare_data),
25817 sdata);
25818 diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c
25819 --- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
25820 +++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
25821 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25822 unsigned long flags;
25823 struct ib_wc wc;
25824 u64 sdata;
25825 - atomic64_t *maddr;
25826 + atomic64_unchecked_t *maddr;
25827 enum ib_wc_status send_status;
25828
25829 /*
25830 @@ -382,11 +382,11 @@ again:
25831 IB_ACCESS_REMOTE_ATOMIC)))
25832 goto acc_err;
25833 /* Perform atomic OP and save result. */
25834 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25835 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25836 sdata = wqe->wr.wr.atomic.compare_add;
25837 *(u64 *) sqp->s_sge.sge.vaddr =
25838 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25839 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25840 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25841 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25842 sdata, wqe->wr.wr.atomic.swap);
25843 goto send_comp;
25844 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes.c linux-3.0.3/drivers/infiniband/hw/nes/nes.c
25845 --- linux-3.0.3/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
25846 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
25847 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25848 LIST_HEAD(nes_adapter_list);
25849 static LIST_HEAD(nes_dev_list);
25850
25851 -atomic_t qps_destroyed;
25852 +atomic_unchecked_t qps_destroyed;
25853
25854 static unsigned int ee_flsh_adapter;
25855 static unsigned int sysfs_nonidx_addr;
25856 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25857 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25858 struct nes_adapter *nesadapter = nesdev->nesadapter;
25859
25860 - atomic_inc(&qps_destroyed);
25861 + atomic_inc_unchecked(&qps_destroyed);
25862
25863 /* Free the control structures */
25864
25865 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c
25866 --- linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
25867 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
25868 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25869 u32 cm_packets_retrans;
25870 u32 cm_packets_created;
25871 u32 cm_packets_received;
25872 -atomic_t cm_listens_created;
25873 -atomic_t cm_listens_destroyed;
25874 +atomic_unchecked_t cm_listens_created;
25875 +atomic_unchecked_t cm_listens_destroyed;
25876 u32 cm_backlog_drops;
25877 -atomic_t cm_loopbacks;
25878 -atomic_t cm_nodes_created;
25879 -atomic_t cm_nodes_destroyed;
25880 -atomic_t cm_accel_dropped_pkts;
25881 -atomic_t cm_resets_recvd;
25882 +atomic_unchecked_t cm_loopbacks;
25883 +atomic_unchecked_t cm_nodes_created;
25884 +atomic_unchecked_t cm_nodes_destroyed;
25885 +atomic_unchecked_t cm_accel_dropped_pkts;
25886 +atomic_unchecked_t cm_resets_recvd;
25887
25888 static inline int mini_cm_accelerated(struct nes_cm_core *,
25889 struct nes_cm_node *);
25890 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25891
25892 static struct nes_cm_core *g_cm_core;
25893
25894 -atomic_t cm_connects;
25895 -atomic_t cm_accepts;
25896 -atomic_t cm_disconnects;
25897 -atomic_t cm_closes;
25898 -atomic_t cm_connecteds;
25899 -atomic_t cm_connect_reqs;
25900 -atomic_t cm_rejects;
25901 +atomic_unchecked_t cm_connects;
25902 +atomic_unchecked_t cm_accepts;
25903 +atomic_unchecked_t cm_disconnects;
25904 +atomic_unchecked_t cm_closes;
25905 +atomic_unchecked_t cm_connecteds;
25906 +atomic_unchecked_t cm_connect_reqs;
25907 +atomic_unchecked_t cm_rejects;
25908
25909
25910 /**
25911 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25912 kfree(listener);
25913 listener = NULL;
25914 ret = 0;
25915 - atomic_inc(&cm_listens_destroyed);
25916 + atomic_inc_unchecked(&cm_listens_destroyed);
25917 } else {
25918 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25919 }
25920 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25921 cm_node->rem_mac);
25922
25923 add_hte_node(cm_core, cm_node);
25924 - atomic_inc(&cm_nodes_created);
25925 + atomic_inc_unchecked(&cm_nodes_created);
25926
25927 return cm_node;
25928 }
25929 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25930 }
25931
25932 atomic_dec(&cm_core->node_cnt);
25933 - atomic_inc(&cm_nodes_destroyed);
25934 + atomic_inc_unchecked(&cm_nodes_destroyed);
25935 nesqp = cm_node->nesqp;
25936 if (nesqp) {
25937 nesqp->cm_node = NULL;
25938 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25939
25940 static void drop_packet(struct sk_buff *skb)
25941 {
25942 - atomic_inc(&cm_accel_dropped_pkts);
25943 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25944 dev_kfree_skb_any(skb);
25945 }
25946
25947 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25948 {
25949
25950 int reset = 0; /* whether to send reset in case of err.. */
25951 - atomic_inc(&cm_resets_recvd);
25952 + atomic_inc_unchecked(&cm_resets_recvd);
25953 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25954 " refcnt=%d\n", cm_node, cm_node->state,
25955 atomic_read(&cm_node->ref_count));
25956 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25957 rem_ref_cm_node(cm_node->cm_core, cm_node);
25958 return NULL;
25959 }
25960 - atomic_inc(&cm_loopbacks);
25961 + atomic_inc_unchecked(&cm_loopbacks);
25962 loopbackremotenode->loopbackpartner = cm_node;
25963 loopbackremotenode->tcp_cntxt.rcv_wscale =
25964 NES_CM_DEFAULT_RCV_WND_SCALE;
25965 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
25966 add_ref_cm_node(cm_node);
25967 } else if (cm_node->state == NES_CM_STATE_TSA) {
25968 rem_ref_cm_node(cm_core, cm_node);
25969 - atomic_inc(&cm_accel_dropped_pkts);
25970 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25971 dev_kfree_skb_any(skb);
25972 break;
25973 }
25974 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
25975
25976 if ((cm_id) && (cm_id->event_handler)) {
25977 if (issue_disconn) {
25978 - atomic_inc(&cm_disconnects);
25979 + atomic_inc_unchecked(&cm_disconnects);
25980 cm_event.event = IW_CM_EVENT_DISCONNECT;
25981 cm_event.status = disconn_status;
25982 cm_event.local_addr = cm_id->local_addr;
25983 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
25984 }
25985
25986 if (issue_close) {
25987 - atomic_inc(&cm_closes);
25988 + atomic_inc_unchecked(&cm_closes);
25989 nes_disconnect(nesqp, 1);
25990
25991 cm_id->provider_data = nesqp;
25992 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
25993
25994 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
25995 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
25996 - atomic_inc(&cm_accepts);
25997 + atomic_inc_unchecked(&cm_accepts);
25998
25999 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
26000 netdev_refcnt_read(nesvnic->netdev));
26001 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
26002
26003 struct nes_cm_core *cm_core;
26004
26005 - atomic_inc(&cm_rejects);
26006 + atomic_inc_unchecked(&cm_rejects);
26007 cm_node = (struct nes_cm_node *) cm_id->provider_data;
26008 loopback = cm_node->loopbackpartner;
26009 cm_core = cm_node->cm_core;
26010 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
26011 ntohl(cm_id->local_addr.sin_addr.s_addr),
26012 ntohs(cm_id->local_addr.sin_port));
26013
26014 - atomic_inc(&cm_connects);
26015 + atomic_inc_unchecked(&cm_connects);
26016 nesqp->active_conn = 1;
26017
26018 /* cache the cm_id in the qp */
26019 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26020 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26021 return err;
26022 }
26023 - atomic_inc(&cm_listens_created);
26024 + atomic_inc_unchecked(&cm_listens_created);
26025 }
26026
26027 cm_id->add_ref(cm_id);
26028 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26029 if (nesqp->destroyed) {
26030 return;
26031 }
26032 - atomic_inc(&cm_connecteds);
26033 + atomic_inc_unchecked(&cm_connecteds);
26034 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26035 " local port 0x%04X. jiffies = %lu.\n",
26036 nesqp->hwqp.qp_id,
26037 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26038
26039 cm_id->add_ref(cm_id);
26040 ret = cm_id->event_handler(cm_id, &cm_event);
26041 - atomic_inc(&cm_closes);
26042 + atomic_inc_unchecked(&cm_closes);
26043 cm_event.event = IW_CM_EVENT_CLOSE;
26044 cm_event.status = 0;
26045 cm_event.provider_data = cm_id->provider_data;
26046 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26047 return;
26048 cm_id = cm_node->cm_id;
26049
26050 - atomic_inc(&cm_connect_reqs);
26051 + atomic_inc_unchecked(&cm_connect_reqs);
26052 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26053 cm_node, cm_id, jiffies);
26054
26055 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26056 return;
26057 cm_id = cm_node->cm_id;
26058
26059 - atomic_inc(&cm_connect_reqs);
26060 + atomic_inc_unchecked(&cm_connect_reqs);
26061 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26062 cm_node, cm_id, jiffies);
26063
26064 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes.h linux-3.0.3/drivers/infiniband/hw/nes/nes.h
26065 --- linux-3.0.3/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26066 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26067 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26068 extern unsigned int wqm_quanta;
26069 extern struct list_head nes_adapter_list;
26070
26071 -extern atomic_t cm_connects;
26072 -extern atomic_t cm_accepts;
26073 -extern atomic_t cm_disconnects;
26074 -extern atomic_t cm_closes;
26075 -extern atomic_t cm_connecteds;
26076 -extern atomic_t cm_connect_reqs;
26077 -extern atomic_t cm_rejects;
26078 -extern atomic_t mod_qp_timouts;
26079 -extern atomic_t qps_created;
26080 -extern atomic_t qps_destroyed;
26081 -extern atomic_t sw_qps_destroyed;
26082 +extern atomic_unchecked_t cm_connects;
26083 +extern atomic_unchecked_t cm_accepts;
26084 +extern atomic_unchecked_t cm_disconnects;
26085 +extern atomic_unchecked_t cm_closes;
26086 +extern atomic_unchecked_t cm_connecteds;
26087 +extern atomic_unchecked_t cm_connect_reqs;
26088 +extern atomic_unchecked_t cm_rejects;
26089 +extern atomic_unchecked_t mod_qp_timouts;
26090 +extern atomic_unchecked_t qps_created;
26091 +extern atomic_unchecked_t qps_destroyed;
26092 +extern atomic_unchecked_t sw_qps_destroyed;
26093 extern u32 mh_detected;
26094 extern u32 mh_pauses_sent;
26095 extern u32 cm_packets_sent;
26096 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26097 extern u32 cm_packets_received;
26098 extern u32 cm_packets_dropped;
26099 extern u32 cm_packets_retrans;
26100 -extern atomic_t cm_listens_created;
26101 -extern atomic_t cm_listens_destroyed;
26102 +extern atomic_unchecked_t cm_listens_created;
26103 +extern atomic_unchecked_t cm_listens_destroyed;
26104 extern u32 cm_backlog_drops;
26105 -extern atomic_t cm_loopbacks;
26106 -extern atomic_t cm_nodes_created;
26107 -extern atomic_t cm_nodes_destroyed;
26108 -extern atomic_t cm_accel_dropped_pkts;
26109 -extern atomic_t cm_resets_recvd;
26110 +extern atomic_unchecked_t cm_loopbacks;
26111 +extern atomic_unchecked_t cm_nodes_created;
26112 +extern atomic_unchecked_t cm_nodes_destroyed;
26113 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26114 +extern atomic_unchecked_t cm_resets_recvd;
26115
26116 extern u32 int_mod_timer_init;
26117 extern u32 int_mod_cq_depth_256;
26118 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c
26119 --- linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26120 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26121 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26122 target_stat_values[++index] = mh_detected;
26123 target_stat_values[++index] = mh_pauses_sent;
26124 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26125 - target_stat_values[++index] = atomic_read(&cm_connects);
26126 - target_stat_values[++index] = atomic_read(&cm_accepts);
26127 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26128 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26129 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26130 - target_stat_values[++index] = atomic_read(&cm_rejects);
26131 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26132 - target_stat_values[++index] = atomic_read(&qps_created);
26133 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26134 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26135 - target_stat_values[++index] = atomic_read(&cm_closes);
26136 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26137 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26138 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26139 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26140 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26141 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26142 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26143 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26144 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26145 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26146 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26147 target_stat_values[++index] = cm_packets_sent;
26148 target_stat_values[++index] = cm_packets_bounced;
26149 target_stat_values[++index] = cm_packets_created;
26150 target_stat_values[++index] = cm_packets_received;
26151 target_stat_values[++index] = cm_packets_dropped;
26152 target_stat_values[++index] = cm_packets_retrans;
26153 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26154 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26155 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26156 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26157 target_stat_values[++index] = cm_backlog_drops;
26158 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26159 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26160 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26161 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26162 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26163 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26164 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26165 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26166 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26167 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26168 target_stat_values[++index] = nesadapter->free_4kpbl;
26169 target_stat_values[++index] = nesadapter->free_256pbl;
26170 target_stat_values[++index] = int_mod_timer_init;
26171 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c
26172 --- linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26173 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26174 @@ -46,9 +46,9 @@
26175
26176 #include <rdma/ib_umem.h>
26177
26178 -atomic_t mod_qp_timouts;
26179 -atomic_t qps_created;
26180 -atomic_t sw_qps_destroyed;
26181 +atomic_unchecked_t mod_qp_timouts;
26182 +atomic_unchecked_t qps_created;
26183 +atomic_unchecked_t sw_qps_destroyed;
26184
26185 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26186
26187 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26188 if (init_attr->create_flags)
26189 return ERR_PTR(-EINVAL);
26190
26191 - atomic_inc(&qps_created);
26192 + atomic_inc_unchecked(&qps_created);
26193 switch (init_attr->qp_type) {
26194 case IB_QPT_RC:
26195 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26196 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26197 struct iw_cm_event cm_event;
26198 int ret;
26199
26200 - atomic_inc(&sw_qps_destroyed);
26201 + atomic_inc_unchecked(&sw_qps_destroyed);
26202 nesqp->destroyed = 1;
26203
26204 /* Blow away the connection if it exists. */
26205 diff -urNp linux-3.0.3/drivers/infiniband/hw/qib/qib.h linux-3.0.3/drivers/infiniband/hw/qib/qib.h
26206 --- linux-3.0.3/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26207 +++ linux-3.0.3/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26208 @@ -51,6 +51,7 @@
26209 #include <linux/completion.h>
26210 #include <linux/kref.h>
26211 #include <linux/sched.h>
26212 +#include <linux/slab.h>
26213
26214 #include "qib_common.h"
26215 #include "qib_verbs.h"
26216 diff -urNp linux-3.0.3/drivers/input/gameport/gameport.c linux-3.0.3/drivers/input/gameport/gameport.c
26217 --- linux-3.0.3/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26218 +++ linux-3.0.3/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26219 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26220 */
26221 static void gameport_init_port(struct gameport *gameport)
26222 {
26223 - static atomic_t gameport_no = ATOMIC_INIT(0);
26224 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26225
26226 __module_get(THIS_MODULE);
26227
26228 mutex_init(&gameport->drv_mutex);
26229 device_initialize(&gameport->dev);
26230 dev_set_name(&gameport->dev, "gameport%lu",
26231 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26232 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26233 gameport->dev.bus = &gameport_bus;
26234 gameport->dev.release = gameport_release_port;
26235 if (gameport->parent)
26236 diff -urNp linux-3.0.3/drivers/input/input.c linux-3.0.3/drivers/input/input.c
26237 --- linux-3.0.3/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26238 +++ linux-3.0.3/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26239 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26240 */
26241 int input_register_device(struct input_dev *dev)
26242 {
26243 - static atomic_t input_no = ATOMIC_INIT(0);
26244 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26245 struct input_handler *handler;
26246 const char *path;
26247 int error;
26248 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26249 dev->setkeycode = input_default_setkeycode;
26250
26251 dev_set_name(&dev->dev, "input%ld",
26252 - (unsigned long) atomic_inc_return(&input_no) - 1);
26253 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26254
26255 error = device_add(&dev->dev);
26256 if (error)
26257 diff -urNp linux-3.0.3/drivers/input/joystick/sidewinder.c linux-3.0.3/drivers/input/joystick/sidewinder.c
26258 --- linux-3.0.3/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26259 +++ linux-3.0.3/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26260 @@ -30,6 +30,7 @@
26261 #include <linux/kernel.h>
26262 #include <linux/module.h>
26263 #include <linux/slab.h>
26264 +#include <linux/sched.h>
26265 #include <linux/init.h>
26266 #include <linux/input.h>
26267 #include <linux/gameport.h>
26268 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26269 unsigned char buf[SW_LENGTH];
26270 int i;
26271
26272 + pax_track_stack();
26273 +
26274 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26275
26276 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26277 diff -urNp linux-3.0.3/drivers/input/joystick/xpad.c linux-3.0.3/drivers/input/joystick/xpad.c
26278 --- linux-3.0.3/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26279 +++ linux-3.0.3/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26280 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26281
26282 static int xpad_led_probe(struct usb_xpad *xpad)
26283 {
26284 - static atomic_t led_seq = ATOMIC_INIT(0);
26285 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26286 long led_no;
26287 struct xpad_led *led;
26288 struct led_classdev *led_cdev;
26289 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26290 if (!led)
26291 return -ENOMEM;
26292
26293 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26294 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26295
26296 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26297 led->xpad = xpad;
26298 diff -urNp linux-3.0.3/drivers/input/mousedev.c linux-3.0.3/drivers/input/mousedev.c
26299 --- linux-3.0.3/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26300 +++ linux-3.0.3/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26301 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26302
26303 spin_unlock_irq(&client->packet_lock);
26304
26305 - if (copy_to_user(buffer, data, count))
26306 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26307 return -EFAULT;
26308
26309 return count;
26310 diff -urNp linux-3.0.3/drivers/input/serio/serio.c linux-3.0.3/drivers/input/serio/serio.c
26311 --- linux-3.0.3/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26312 +++ linux-3.0.3/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26313 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26314 */
26315 static void serio_init_port(struct serio *serio)
26316 {
26317 - static atomic_t serio_no = ATOMIC_INIT(0);
26318 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26319
26320 __module_get(THIS_MODULE);
26321
26322 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26323 mutex_init(&serio->drv_mutex);
26324 device_initialize(&serio->dev);
26325 dev_set_name(&serio->dev, "serio%ld",
26326 - (long)atomic_inc_return(&serio_no) - 1);
26327 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26328 serio->dev.bus = &serio_bus;
26329 serio->dev.release = serio_release_port;
26330 serio->dev.groups = serio_device_attr_groups;
26331 diff -urNp linux-3.0.3/drivers/isdn/capi/capi.c linux-3.0.3/drivers/isdn/capi/capi.c
26332 --- linux-3.0.3/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26333 +++ linux-3.0.3/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26334 @@ -83,8 +83,8 @@ struct capiminor {
26335
26336 struct capi20_appl *ap;
26337 u32 ncci;
26338 - atomic_t datahandle;
26339 - atomic_t msgid;
26340 + atomic_unchecked_t datahandle;
26341 + atomic_unchecked_t msgid;
26342
26343 struct tty_port port;
26344 int ttyinstop;
26345 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26346 capimsg_setu16(s, 2, mp->ap->applid);
26347 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26348 capimsg_setu8 (s, 5, CAPI_RESP);
26349 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26350 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26351 capimsg_setu32(s, 8, mp->ncci);
26352 capimsg_setu16(s, 12, datahandle);
26353 }
26354 @@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26355 mp->outbytes -= len;
26356 spin_unlock_bh(&mp->outlock);
26357
26358 - datahandle = atomic_inc_return(&mp->datahandle);
26359 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26360 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26361 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26362 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26363 capimsg_setu16(skb->data, 2, mp->ap->applid);
26364 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26365 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26366 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26367 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26368 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26369 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26370 capimsg_setu16(skb->data, 16, len); /* Data length */
26371 diff -urNp linux-3.0.3/drivers/isdn/gigaset/common.c linux-3.0.3/drivers/isdn/gigaset/common.c
26372 --- linux-3.0.3/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26373 +++ linux-3.0.3/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26374 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26375 cs->commands_pending = 0;
26376 cs->cur_at_seq = 0;
26377 cs->gotfwver = -1;
26378 - cs->open_count = 0;
26379 + local_set(&cs->open_count, 0);
26380 cs->dev = NULL;
26381 cs->tty = NULL;
26382 cs->tty_dev = NULL;
26383 diff -urNp linux-3.0.3/drivers/isdn/gigaset/gigaset.h linux-3.0.3/drivers/isdn/gigaset/gigaset.h
26384 --- linux-3.0.3/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26385 +++ linux-3.0.3/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26386 @@ -35,6 +35,7 @@
26387 #include <linux/tty_driver.h>
26388 #include <linux/list.h>
26389 #include <asm/atomic.h>
26390 +#include <asm/local.h>
26391
26392 #define GIG_VERSION {0, 5, 0, 0}
26393 #define GIG_COMPAT {0, 4, 0, 0}
26394 @@ -433,7 +434,7 @@ struct cardstate {
26395 spinlock_t cmdlock;
26396 unsigned curlen, cmdbytes;
26397
26398 - unsigned open_count;
26399 + local_t open_count;
26400 struct tty_struct *tty;
26401 struct tasklet_struct if_wake_tasklet;
26402 unsigned control_state;
26403 diff -urNp linux-3.0.3/drivers/isdn/gigaset/interface.c linux-3.0.3/drivers/isdn/gigaset/interface.c
26404 --- linux-3.0.3/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26405 +++ linux-3.0.3/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26406 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26407 }
26408 tty->driver_data = cs;
26409
26410 - ++cs->open_count;
26411 -
26412 - if (cs->open_count == 1) {
26413 + if (local_inc_return(&cs->open_count) == 1) {
26414 spin_lock_irqsave(&cs->lock, flags);
26415 cs->tty = tty;
26416 spin_unlock_irqrestore(&cs->lock, flags);
26417 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26418
26419 if (!cs->connected)
26420 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26421 - else if (!cs->open_count)
26422 + else if (!local_read(&cs->open_count))
26423 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26424 else {
26425 - if (!--cs->open_count) {
26426 + if (!local_dec_return(&cs->open_count)) {
26427 spin_lock_irqsave(&cs->lock, flags);
26428 cs->tty = NULL;
26429 spin_unlock_irqrestore(&cs->lock, flags);
26430 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26431 if (!cs->connected) {
26432 gig_dbg(DEBUG_IF, "not connected");
26433 retval = -ENODEV;
26434 - } else if (!cs->open_count)
26435 + } else if (!local_read(&cs->open_count))
26436 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26437 else {
26438 retval = 0;
26439 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26440 retval = -ENODEV;
26441 goto done;
26442 }
26443 - if (!cs->open_count) {
26444 + if (!local_read(&cs->open_count)) {
26445 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26446 retval = -ENODEV;
26447 goto done;
26448 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26449 if (!cs->connected) {
26450 gig_dbg(DEBUG_IF, "not connected");
26451 retval = -ENODEV;
26452 - } else if (!cs->open_count)
26453 + } else if (!local_read(&cs->open_count))
26454 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26455 else if (cs->mstate != MS_LOCKED) {
26456 dev_warn(cs->dev, "can't write to unlocked device\n");
26457 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26458
26459 if (!cs->connected)
26460 gig_dbg(DEBUG_IF, "not connected");
26461 - else if (!cs->open_count)
26462 + else if (!local_read(&cs->open_count))
26463 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26464 else if (cs->mstate != MS_LOCKED)
26465 dev_warn(cs->dev, "can't write to unlocked device\n");
26466 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26467
26468 if (!cs->connected)
26469 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26470 - else if (!cs->open_count)
26471 + else if (!local_read(&cs->open_count))
26472 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26473 else
26474 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26475 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
26476
26477 if (!cs->connected)
26478 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26479 - else if (!cs->open_count)
26480 + else if (!local_read(&cs->open_count))
26481 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26482 else
26483 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26484 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
26485 goto out;
26486 }
26487
26488 - if (!cs->open_count) {
26489 + if (!local_read(&cs->open_count)) {
26490 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26491 goto out;
26492 }
26493 diff -urNp linux-3.0.3/drivers/isdn/hardware/avm/b1.c linux-3.0.3/drivers/isdn/hardware/avm/b1.c
26494 --- linux-3.0.3/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
26495 +++ linux-3.0.3/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
26496 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26497 }
26498 if (left) {
26499 if (t4file->user) {
26500 - if (copy_from_user(buf, dp, left))
26501 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26502 return -EFAULT;
26503 } else {
26504 memcpy(buf, dp, left);
26505 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26506 }
26507 if (left) {
26508 if (config->user) {
26509 - if (copy_from_user(buf, dp, left))
26510 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26511 return -EFAULT;
26512 } else {
26513 memcpy(buf, dp, left);
26514 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c
26515 --- linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
26516 +++ linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
26517 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26518 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26519 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26520
26521 + pax_track_stack();
26522
26523 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26524 {
26525 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c
26526 --- linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
26527 +++ linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
26528 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26529 IDI_SYNC_REQ req;
26530 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26531
26532 + pax_track_stack();
26533 +
26534 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26535
26536 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26537 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c
26538 --- linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
26539 +++ linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
26540 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26541 IDI_SYNC_REQ req;
26542 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26543
26544 + pax_track_stack();
26545 +
26546 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26547
26548 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26549 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c
26550 --- linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
26551 +++ linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
26552 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
26553 IDI_SYNC_REQ req;
26554 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26555
26556 + pax_track_stack();
26557 +
26558 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26559
26560 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26561 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h
26562 --- linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
26563 +++ linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
26564 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26565 } diva_didd_add_adapter_t;
26566 typedef struct _diva_didd_remove_adapter {
26567 IDI_CALL p_request;
26568 -} diva_didd_remove_adapter_t;
26569 +} __no_const diva_didd_remove_adapter_t;
26570 typedef struct _diva_didd_read_adapter_array {
26571 void * buffer;
26572 dword length;
26573 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c
26574 --- linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
26575 +++ linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
26576 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26577 IDI_SYNC_REQ req;
26578 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26579
26580 + pax_track_stack();
26581 +
26582 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26583
26584 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26585 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/message.c linux-3.0.3/drivers/isdn/hardware/eicon/message.c
26586 --- linux-3.0.3/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
26587 +++ linux-3.0.3/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
26588 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
26589 dword d;
26590 word w;
26591
26592 + pax_track_stack();
26593 +
26594 a = plci->adapter;
26595 Id = ((word)plci->Id<<8)|a->Id;
26596 PUT_WORD(&SS_Ind[4],0x0000);
26597 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
26598 word j, n, w;
26599 dword d;
26600
26601 + pax_track_stack();
26602 +
26603
26604 for(i=0;i<8;i++) bp_parms[i].length = 0;
26605 for(i=0;i<2;i++) global_config[i].length = 0;
26606 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
26607 const byte llc3[] = {4,3,2,2,6,6,0};
26608 const byte header[] = {0,2,3,3,0,0,0};
26609
26610 + pax_track_stack();
26611 +
26612 for(i=0;i<8;i++) bp_parms[i].length = 0;
26613 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26614 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26615 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
26616 word appl_number_group_type[MAX_APPL];
26617 PLCI *auxplci;
26618
26619 + pax_track_stack();
26620 +
26621 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26622
26623 if(!a->group_optimization_enabled)
26624 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c
26625 --- linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
26626 +++ linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
26627 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26628 IDI_SYNC_REQ req;
26629 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26630
26631 + pax_track_stack();
26632 +
26633 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26634
26635 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26636 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h
26637 --- linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
26638 +++ linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
26639 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26640 typedef struct _diva_os_idi_adapter_interface {
26641 diva_init_card_proc_t cleanup_adapter_proc;
26642 diva_cmd_card_proc_t cmd_proc;
26643 -} diva_os_idi_adapter_interface_t;
26644 +} __no_const diva_os_idi_adapter_interface_t;
26645
26646 typedef struct _diva_os_xdi_adapter {
26647 struct list_head link;
26648 diff -urNp linux-3.0.3/drivers/isdn/i4l/isdn_common.c linux-3.0.3/drivers/isdn/i4l/isdn_common.c
26649 --- linux-3.0.3/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
26650 +++ linux-3.0.3/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
26651 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
26652 } iocpar;
26653 void __user *argp = (void __user *)arg;
26654
26655 + pax_track_stack();
26656 +
26657 #define name iocpar.name
26658 #define bname iocpar.bname
26659 #define iocts iocpar.iocts
26660 diff -urNp linux-3.0.3/drivers/isdn/icn/icn.c linux-3.0.3/drivers/isdn/icn/icn.c
26661 --- linux-3.0.3/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
26662 +++ linux-3.0.3/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
26663 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26664 if (count > len)
26665 count = len;
26666 if (user) {
26667 - if (copy_from_user(msg, buf, count))
26668 + if (count > sizeof msg || copy_from_user(msg, buf, count))
26669 return -EFAULT;
26670 } else
26671 memcpy(msg, buf, count);
26672 diff -urNp linux-3.0.3/drivers/lguest/core.c linux-3.0.3/drivers/lguest/core.c
26673 --- linux-3.0.3/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
26674 +++ linux-3.0.3/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
26675 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26676 * it's worked so far. The end address needs +1 because __get_vm_area
26677 * allocates an extra guard page, so we need space for that.
26678 */
26679 +
26680 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26681 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26682 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26683 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26684 +#else
26685 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26686 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26687 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26688 +#endif
26689 +
26690 if (!switcher_vma) {
26691 err = -ENOMEM;
26692 printk("lguest: could not map switcher pages high\n");
26693 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26694 * Now the Switcher is mapped at the right address, we can't fail!
26695 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26696 */
26697 - memcpy(switcher_vma->addr, start_switcher_text,
26698 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26699 end_switcher_text - start_switcher_text);
26700
26701 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26702 diff -urNp linux-3.0.3/drivers/lguest/x86/core.c linux-3.0.3/drivers/lguest/x86/core.c
26703 --- linux-3.0.3/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
26704 +++ linux-3.0.3/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
26705 @@ -59,7 +59,7 @@ static struct {
26706 /* Offset from where switcher.S was compiled to where we've copied it */
26707 static unsigned long switcher_offset(void)
26708 {
26709 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26710 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26711 }
26712
26713 /* This cpu's struct lguest_pages. */
26714 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26715 * These copies are pretty cheap, so we do them unconditionally: */
26716 /* Save the current Host top-level page directory.
26717 */
26718 +
26719 +#ifdef CONFIG_PAX_PER_CPU_PGD
26720 + pages->state.host_cr3 = read_cr3();
26721 +#else
26722 pages->state.host_cr3 = __pa(current->mm->pgd);
26723 +#endif
26724 +
26725 /*
26726 * Set up the Guest's page tables to see this CPU's pages (and no
26727 * other CPU's pages).
26728 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26729 * compiled-in switcher code and the high-mapped copy we just made.
26730 */
26731 for (i = 0; i < IDT_ENTRIES; i++)
26732 - default_idt_entries[i] += switcher_offset();
26733 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26734
26735 /*
26736 * Set up the Switcher's per-cpu areas.
26737 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26738 * it will be undisturbed when we switch. To change %cs and jump we
26739 * need this structure to feed to Intel's "lcall" instruction.
26740 */
26741 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26742 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26743 lguest_entry.segment = LGUEST_CS;
26744
26745 /*
26746 diff -urNp linux-3.0.3/drivers/lguest/x86/switcher_32.S linux-3.0.3/drivers/lguest/x86/switcher_32.S
26747 --- linux-3.0.3/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
26748 +++ linux-3.0.3/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
26749 @@ -87,6 +87,7 @@
26750 #include <asm/page.h>
26751 #include <asm/segment.h>
26752 #include <asm/lguest.h>
26753 +#include <asm/processor-flags.h>
26754
26755 // We mark the start of the code to copy
26756 // It's placed in .text tho it's never run here
26757 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26758 // Changes type when we load it: damn Intel!
26759 // For after we switch over our page tables
26760 // That entry will be read-only: we'd crash.
26761 +
26762 +#ifdef CONFIG_PAX_KERNEXEC
26763 + mov %cr0, %edx
26764 + xor $X86_CR0_WP, %edx
26765 + mov %edx, %cr0
26766 +#endif
26767 +
26768 movl $(GDT_ENTRY_TSS*8), %edx
26769 ltr %dx
26770
26771 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26772 // Let's clear it again for our return.
26773 // The GDT descriptor of the Host
26774 // Points to the table after two "size" bytes
26775 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26776 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26777 // Clear "used" from type field (byte 5, bit 2)
26778 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26779 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26780 +
26781 +#ifdef CONFIG_PAX_KERNEXEC
26782 + mov %cr0, %eax
26783 + xor $X86_CR0_WP, %eax
26784 + mov %eax, %cr0
26785 +#endif
26786
26787 // Once our page table's switched, the Guest is live!
26788 // The Host fades as we run this final step.
26789 @@ -295,13 +309,12 @@ deliver_to_host:
26790 // I consulted gcc, and it gave
26791 // These instructions, which I gladly credit:
26792 leal (%edx,%ebx,8), %eax
26793 - movzwl (%eax),%edx
26794 - movl 4(%eax), %eax
26795 - xorw %ax, %ax
26796 - orl %eax, %edx
26797 + movl 4(%eax), %edx
26798 + movw (%eax), %dx
26799 // Now the address of the handler's in %edx
26800 // We call it now: its "iret" drops us home.
26801 - jmp *%edx
26802 + ljmp $__KERNEL_CS, $1f
26803 +1: jmp *%edx
26804
26805 // Every interrupt can come to us here
26806 // But we must truly tell each apart.
26807 diff -urNp linux-3.0.3/drivers/md/dm.c linux-3.0.3/drivers/md/dm.c
26808 --- linux-3.0.3/drivers/md/dm.c 2011-08-23 21:44:40.000000000 -0400
26809 +++ linux-3.0.3/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
26810 @@ -164,9 +164,9 @@ struct mapped_device {
26811 /*
26812 * Event handling.
26813 */
26814 - atomic_t event_nr;
26815 + atomic_unchecked_t event_nr;
26816 wait_queue_head_t eventq;
26817 - atomic_t uevent_seq;
26818 + atomic_unchecked_t uevent_seq;
26819 struct list_head uevent_list;
26820 spinlock_t uevent_lock; /* Protect access to uevent_list */
26821
26822 @@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
26823 rwlock_init(&md->map_lock);
26824 atomic_set(&md->holders, 1);
26825 atomic_set(&md->open_count, 0);
26826 - atomic_set(&md->event_nr, 0);
26827 - atomic_set(&md->uevent_seq, 0);
26828 + atomic_set_unchecked(&md->event_nr, 0);
26829 + atomic_set_unchecked(&md->uevent_seq, 0);
26830 INIT_LIST_HEAD(&md->uevent_list);
26831 spin_lock_init(&md->uevent_lock);
26832
26833 @@ -1977,7 +1977,7 @@ static void event_callback(void *context
26834
26835 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26836
26837 - atomic_inc(&md->event_nr);
26838 + atomic_inc_unchecked(&md->event_nr);
26839 wake_up(&md->eventq);
26840 }
26841
26842 @@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
26843
26844 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26845 {
26846 - return atomic_add_return(1, &md->uevent_seq);
26847 + return atomic_add_return_unchecked(1, &md->uevent_seq);
26848 }
26849
26850 uint32_t dm_get_event_nr(struct mapped_device *md)
26851 {
26852 - return atomic_read(&md->event_nr);
26853 + return atomic_read_unchecked(&md->event_nr);
26854 }
26855
26856 int dm_wait_event(struct mapped_device *md, int event_nr)
26857 {
26858 return wait_event_interruptible(md->eventq,
26859 - (event_nr != atomic_read(&md->event_nr)));
26860 + (event_nr != atomic_read_unchecked(&md->event_nr)));
26861 }
26862
26863 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26864 diff -urNp linux-3.0.3/drivers/md/dm-ioctl.c linux-3.0.3/drivers/md/dm-ioctl.c
26865 --- linux-3.0.3/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
26866 +++ linux-3.0.3/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
26867 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26868 cmd == DM_LIST_VERSIONS_CMD)
26869 return 0;
26870
26871 - if ((cmd == DM_DEV_CREATE_CMD)) {
26872 + if (cmd == DM_DEV_CREATE_CMD) {
26873 if (!*param->name) {
26874 DMWARN("name not supplied when creating device");
26875 return -EINVAL;
26876 diff -urNp linux-3.0.3/drivers/md/dm-raid1.c linux-3.0.3/drivers/md/dm-raid1.c
26877 --- linux-3.0.3/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
26878 +++ linux-3.0.3/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
26879 @@ -40,7 +40,7 @@ enum dm_raid1_error {
26880
26881 struct mirror {
26882 struct mirror_set *ms;
26883 - atomic_t error_count;
26884 + atomic_unchecked_t error_count;
26885 unsigned long error_type;
26886 struct dm_dev *dev;
26887 sector_t offset;
26888 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
26889 struct mirror *m;
26890
26891 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26892 - if (!atomic_read(&m->error_count))
26893 + if (!atomic_read_unchecked(&m->error_count))
26894 return m;
26895
26896 return NULL;
26897 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
26898 * simple way to tell if a device has encountered
26899 * errors.
26900 */
26901 - atomic_inc(&m->error_count);
26902 + atomic_inc_unchecked(&m->error_count);
26903
26904 if (test_and_set_bit(error_type, &m->error_type))
26905 return;
26906 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
26907 struct mirror *m = get_default_mirror(ms);
26908
26909 do {
26910 - if (likely(!atomic_read(&m->error_count)))
26911 + if (likely(!atomic_read_unchecked(&m->error_count)))
26912 return m;
26913
26914 if (m-- == ms->mirror)
26915 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
26916 {
26917 struct mirror *default_mirror = get_default_mirror(m->ms);
26918
26919 - return !atomic_read(&default_mirror->error_count);
26920 + return !atomic_read_unchecked(&default_mirror->error_count);
26921 }
26922
26923 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26924 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
26925 */
26926 if (likely(region_in_sync(ms, region, 1)))
26927 m = choose_mirror(ms, bio->bi_sector);
26928 - else if (m && atomic_read(&m->error_count))
26929 + else if (m && atomic_read_unchecked(&m->error_count))
26930 m = NULL;
26931
26932 if (likely(m))
26933 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
26934 }
26935
26936 ms->mirror[mirror].ms = ms;
26937 - atomic_set(&(ms->mirror[mirror].error_count), 0);
26938 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26939 ms->mirror[mirror].error_type = 0;
26940 ms->mirror[mirror].offset = offset;
26941
26942 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26943 */
26944 static char device_status_char(struct mirror *m)
26945 {
26946 - if (!atomic_read(&(m->error_count)))
26947 + if (!atomic_read_unchecked(&(m->error_count)))
26948 return 'A';
26949
26950 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26951 diff -urNp linux-3.0.3/drivers/md/dm-stripe.c linux-3.0.3/drivers/md/dm-stripe.c
26952 --- linux-3.0.3/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
26953 +++ linux-3.0.3/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
26954 @@ -20,7 +20,7 @@ struct stripe {
26955 struct dm_dev *dev;
26956 sector_t physical_start;
26957
26958 - atomic_t error_count;
26959 + atomic_unchecked_t error_count;
26960 };
26961
26962 struct stripe_c {
26963 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
26964 kfree(sc);
26965 return r;
26966 }
26967 - atomic_set(&(sc->stripe[i].error_count), 0);
26968 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
26969 }
26970
26971 ti->private = sc;
26972 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
26973 DMEMIT("%d ", sc->stripes);
26974 for (i = 0; i < sc->stripes; i++) {
26975 DMEMIT("%s ", sc->stripe[i].dev->name);
26976 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
26977 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
26978 'D' : 'A';
26979 }
26980 buffer[i] = '\0';
26981 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
26982 */
26983 for (i = 0; i < sc->stripes; i++)
26984 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
26985 - atomic_inc(&(sc->stripe[i].error_count));
26986 - if (atomic_read(&(sc->stripe[i].error_count)) <
26987 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
26988 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
26989 DM_IO_ERROR_THRESHOLD)
26990 schedule_work(&sc->trigger_event);
26991 }
26992 diff -urNp linux-3.0.3/drivers/md/dm-table.c linux-3.0.3/drivers/md/dm-table.c
26993 --- linux-3.0.3/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
26994 +++ linux-3.0.3/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
26995 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
26996 if (!dev_size)
26997 return 0;
26998
26999 - if ((start >= dev_size) || (start + len > dev_size)) {
27000 + if ((start >= dev_size) || (len > dev_size - start)) {
27001 DMWARN("%s: %s too small for target: "
27002 "start=%llu, len=%llu, dev_size=%llu",
27003 dm_device_name(ti->table->md), bdevname(bdev, b),
27004 diff -urNp linux-3.0.3/drivers/md/md.c linux-3.0.3/drivers/md/md.c
27005 --- linux-3.0.3/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
27006 +++ linux-3.0.3/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
27007 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
27008 * start build, activate spare
27009 */
27010 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
27011 -static atomic_t md_event_count;
27012 +static atomic_unchecked_t md_event_count;
27013 void md_new_event(mddev_t *mddev)
27014 {
27015 - atomic_inc(&md_event_count);
27016 + atomic_inc_unchecked(&md_event_count);
27017 wake_up(&md_event_waiters);
27018 }
27019 EXPORT_SYMBOL_GPL(md_new_event);
27020 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27021 */
27022 static void md_new_event_inintr(mddev_t *mddev)
27023 {
27024 - atomic_inc(&md_event_count);
27025 + atomic_inc_unchecked(&md_event_count);
27026 wake_up(&md_event_waiters);
27027 }
27028
27029 @@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
27030
27031 rdev->preferred_minor = 0xffff;
27032 rdev->data_offset = le64_to_cpu(sb->data_offset);
27033 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27034 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27035
27036 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27037 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27038 @@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27039 else
27040 sb->resync_offset = cpu_to_le64(0);
27041
27042 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27043 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27044
27045 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27046 sb->size = cpu_to_le64(mddev->dev_sectors);
27047 @@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27048 static ssize_t
27049 errors_show(mdk_rdev_t *rdev, char *page)
27050 {
27051 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27052 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27053 }
27054
27055 static ssize_t
27056 @@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27057 char *e;
27058 unsigned long n = simple_strtoul(buf, &e, 10);
27059 if (*buf && (*e == 0 || *e == '\n')) {
27060 - atomic_set(&rdev->corrected_errors, n);
27061 + atomic_set_unchecked(&rdev->corrected_errors, n);
27062 return len;
27063 }
27064 return -EINVAL;
27065 @@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27066 rdev->last_read_error.tv_sec = 0;
27067 rdev->last_read_error.tv_nsec = 0;
27068 atomic_set(&rdev->nr_pending, 0);
27069 - atomic_set(&rdev->read_errors, 0);
27070 - atomic_set(&rdev->corrected_errors, 0);
27071 + atomic_set_unchecked(&rdev->read_errors, 0);
27072 + atomic_set_unchecked(&rdev->corrected_errors, 0);
27073
27074 INIT_LIST_HEAD(&rdev->same_set);
27075 init_waitqueue_head(&rdev->blocked_wait);
27076 @@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27077
27078 spin_unlock(&pers_lock);
27079 seq_printf(seq, "\n");
27080 - mi->event = atomic_read(&md_event_count);
27081 + mi->event = atomic_read_unchecked(&md_event_count);
27082 return 0;
27083 }
27084 if (v == (void*)2) {
27085 @@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27086 chunk_kb ? "KB" : "B");
27087 if (bitmap->file) {
27088 seq_printf(seq, ", file: ");
27089 - seq_path(seq, &bitmap->file->f_path, " \t\n");
27090 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27091 }
27092
27093 seq_printf(seq, "\n");
27094 @@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27095 else {
27096 struct seq_file *p = file->private_data;
27097 p->private = mi;
27098 - mi->event = atomic_read(&md_event_count);
27099 + mi->event = atomic_read_unchecked(&md_event_count);
27100 }
27101 return error;
27102 }
27103 @@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27104 /* always allow read */
27105 mask = POLLIN | POLLRDNORM;
27106
27107 - if (mi->event != atomic_read(&md_event_count))
27108 + if (mi->event != atomic_read_unchecked(&md_event_count))
27109 mask |= POLLERR | POLLPRI;
27110 return mask;
27111 }
27112 @@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27113 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27114 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27115 (int)part_stat_read(&disk->part0, sectors[1]) -
27116 - atomic_read(&disk->sync_io);
27117 + atomic_read_unchecked(&disk->sync_io);
27118 /* sync IO will cause sync_io to increase before the disk_stats
27119 * as sync_io is counted when a request starts, and
27120 * disk_stats is counted when it completes.
27121 diff -urNp linux-3.0.3/drivers/md/md.h linux-3.0.3/drivers/md/md.h
27122 --- linux-3.0.3/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27123 +++ linux-3.0.3/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27124 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27125 * only maintained for arrays that
27126 * support hot removal
27127 */
27128 - atomic_t read_errors; /* number of consecutive read errors that
27129 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27130 * we have tried to ignore.
27131 */
27132 struct timespec last_read_error; /* monotonic time since our
27133 * last read error
27134 */
27135 - atomic_t corrected_errors; /* number of corrected read errors,
27136 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27137 * for reporting to userspace and storing
27138 * in superblock.
27139 */
27140 @@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27141
27142 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27143 {
27144 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27145 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27146 }
27147
27148 struct mdk_personality
27149 diff -urNp linux-3.0.3/drivers/md/raid10.c linux-3.0.3/drivers/md/raid10.c
27150 --- linux-3.0.3/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27151 +++ linux-3.0.3/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27152 @@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27153 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27154 set_bit(R10BIO_Uptodate, &r10_bio->state);
27155 else {
27156 - atomic_add(r10_bio->sectors,
27157 + atomic_add_unchecked(r10_bio->sectors,
27158 &conf->mirrors[d].rdev->corrected_errors);
27159 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27160 md_error(r10_bio->mddev,
27161 @@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27162 {
27163 struct timespec cur_time_mon;
27164 unsigned long hours_since_last;
27165 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27166 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27167
27168 ktime_get_ts(&cur_time_mon);
27169
27170 @@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27171 * overflowing the shift of read_errors by hours_since_last.
27172 */
27173 if (hours_since_last >= 8 * sizeof(read_errors))
27174 - atomic_set(&rdev->read_errors, 0);
27175 + atomic_set_unchecked(&rdev->read_errors, 0);
27176 else
27177 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27178 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27179 }
27180
27181 /*
27182 @@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27183 return;
27184
27185 check_decay_read_errors(mddev, rdev);
27186 - atomic_inc(&rdev->read_errors);
27187 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
27188 + atomic_inc_unchecked(&rdev->read_errors);
27189 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27190 char b[BDEVNAME_SIZE];
27191 bdevname(rdev->bdev, b);
27192
27193 @@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27194 "md/raid10:%s: %s: Raid device exceeded "
27195 "read_error threshold [cur %d:max %d]\n",
27196 mdname(mddev), b,
27197 - atomic_read(&rdev->read_errors), max_read_errors);
27198 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27199 printk(KERN_NOTICE
27200 "md/raid10:%s: %s: Failing raid device\n",
27201 mdname(mddev), b);
27202 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27203 test_bit(In_sync, &rdev->flags)) {
27204 atomic_inc(&rdev->nr_pending);
27205 rcu_read_unlock();
27206 - atomic_add(s, &rdev->corrected_errors);
27207 + atomic_add_unchecked(s, &rdev->corrected_errors);
27208 if (sync_page_io(rdev,
27209 r10_bio->devs[sl].addr +
27210 sect,
27211 diff -urNp linux-3.0.3/drivers/md/raid1.c linux-3.0.3/drivers/md/raid1.c
27212 --- linux-3.0.3/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27213 +++ linux-3.0.3/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27214 @@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27215 rdev_dec_pending(rdev, mddev);
27216 md_error(mddev, rdev);
27217 } else
27218 - atomic_add(s, &rdev->corrected_errors);
27219 + atomic_add_unchecked(s, &rdev->corrected_errors);
27220 }
27221 d = start;
27222 while (d != r1_bio->read_disk) {
27223 @@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27224 /* Well, this device is dead */
27225 md_error(mddev, rdev);
27226 else {
27227 - atomic_add(s, &rdev->corrected_errors);
27228 + atomic_add_unchecked(s, &rdev->corrected_errors);
27229 printk(KERN_INFO
27230 "md/raid1:%s: read error corrected "
27231 "(%d sectors at %llu on %s)\n",
27232 diff -urNp linux-3.0.3/drivers/md/raid5.c linux-3.0.3/drivers/md/raid5.c
27233 --- linux-3.0.3/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27234 +++ linux-3.0.3/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27235 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27236 bi->bi_next = NULL;
27237 if ((rw & WRITE) &&
27238 test_bit(R5_ReWrite, &sh->dev[i].flags))
27239 - atomic_add(STRIPE_SECTORS,
27240 + atomic_add_unchecked(STRIPE_SECTORS,
27241 &rdev->corrected_errors);
27242 generic_make_request(bi);
27243 } else {
27244 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27245 clear_bit(R5_ReadError, &sh->dev[i].flags);
27246 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27247 }
27248 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27249 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27250 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27251 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27252 } else {
27253 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27254 int retry = 0;
27255 rdev = conf->disks[i].rdev;
27256
27257 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27258 - atomic_inc(&rdev->read_errors);
27259 + atomic_inc_unchecked(&rdev->read_errors);
27260 if (conf->mddev->degraded >= conf->max_degraded)
27261 printk_rl(KERN_WARNING
27262 "md/raid:%s: read error not correctable "
27263 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27264 (unsigned long long)(sh->sector
27265 + rdev->data_offset),
27266 bdn);
27267 - else if (atomic_read(&rdev->read_errors)
27268 + else if (atomic_read_unchecked(&rdev->read_errors)
27269 > conf->max_nr_stripes)
27270 printk(KERN_WARNING
27271 "md/raid:%s: Too many read errors, failing device %s.\n",
27272 @@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27273 sector_t r_sector;
27274 struct stripe_head sh2;
27275
27276 + pax_track_stack();
27277
27278 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27279 stripe = new_sector;
27280 diff -urNp linux-3.0.3/drivers/media/common/saa7146_hlp.c linux-3.0.3/drivers/media/common/saa7146_hlp.c
27281 --- linux-3.0.3/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27282 +++ linux-3.0.3/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27283 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27284
27285 int x[32], y[32], w[32], h[32];
27286
27287 + pax_track_stack();
27288 +
27289 /* clear out memory */
27290 memset(&line_list[0], 0x00, sizeof(u32)*32);
27291 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27292 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27293 --- linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27294 +++ linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27295 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27296 u8 buf[HOST_LINK_BUF_SIZE];
27297 int i;
27298
27299 + pax_track_stack();
27300 +
27301 dprintk("%s\n", __func__);
27302
27303 /* check if we have space for a link buf in the rx_buffer */
27304 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27305 unsigned long timeout;
27306 int written;
27307
27308 + pax_track_stack();
27309 +
27310 dprintk("%s\n", __func__);
27311
27312 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27313 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h
27314 --- linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
27315 +++ linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
27316 @@ -68,12 +68,12 @@ struct dvb_demux_feed {
27317 union {
27318 struct dmx_ts_feed ts;
27319 struct dmx_section_feed sec;
27320 - } feed;
27321 + } __no_const feed;
27322
27323 union {
27324 dmx_ts_cb ts;
27325 dmx_section_cb sec;
27326 - } cb;
27327 + } __no_const cb;
27328
27329 struct dvb_demux *demux;
27330 void *priv;
27331 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c
27332 --- linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
27333 +++ linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
27334 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27335 const struct dvb_device *template, void *priv, int type)
27336 {
27337 struct dvb_device *dvbdev;
27338 - struct file_operations *dvbdevfops;
27339 + file_operations_no_const *dvbdevfops;
27340 struct device *clsdev;
27341 int minor;
27342 int id;
27343 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c
27344 --- linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
27345 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
27346 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27347 struct dib0700_adapter_state {
27348 int (*set_param_save) (struct dvb_frontend *,
27349 struct dvb_frontend_parameters *);
27350 -};
27351 +} __no_const;
27352
27353 static int dib7070_set_param_override(struct dvb_frontend *fe,
27354 struct dvb_frontend_parameters *fep)
27355 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c
27356 --- linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27357 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27358 @@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27359 if (!buf)
27360 return -ENOMEM;
27361
27362 + pax_track_stack();
27363 +
27364 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27365 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27366 hx.addr, hx.len, hx.chk);
27367 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h
27368 --- linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
27369 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
27370 @@ -97,7 +97,7 @@
27371 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
27372
27373 struct dibusb_state {
27374 - struct dib_fe_xfer_ops ops;
27375 + dib_fe_xfer_ops_no_const ops;
27376 int mt2060_present;
27377 u8 tuner_addr;
27378 };
27379 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c
27380 --- linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
27381 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
27382 @@ -95,7 +95,7 @@ struct su3000_state {
27383
27384 struct s6x0_state {
27385 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27386 -};
27387 +} __no_const;
27388
27389 /* debug */
27390 static int dvb_usb_dw2102_debug;
27391 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c
27392 --- linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27393 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27394 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27395 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27396 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27397
27398 + pax_track_stack();
27399
27400 data[0] = 0x8a;
27401 len_in = 1;
27402 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27403 int ret = 0, len_in;
27404 u8 data[512] = {0};
27405
27406 + pax_track_stack();
27407 +
27408 data[0] = 0x0a;
27409 len_in = 1;
27410 info("FRM Firmware Cold Reset");
27411 diff -urNp linux-3.0.3/drivers/media/dvb/frontends/dib3000.h linux-3.0.3/drivers/media/dvb/frontends/dib3000.h
27412 --- linux-3.0.3/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
27413 +++ linux-3.0.3/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
27414 @@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27415 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27416 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27417 };
27418 +typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27419
27420 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27421 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27422 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27423 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27424 #else
27425 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27426 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27427 diff -urNp linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c
27428 --- linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27429 +++ linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27430 @@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27431 static struct dvb_frontend_ops dib3000mb_ops;
27432
27433 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27434 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27435 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27436 {
27437 struct dib3000_state* state = NULL;
27438
27439 diff -urNp linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c
27440 --- linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27441 +++ linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27442 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27443 int ret = -1;
27444 int sync;
27445
27446 + pax_track_stack();
27447 +
27448 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27449
27450 fcp = 3000;
27451 diff -urNp linux-3.0.3/drivers/media/dvb/frontends/or51211.c linux-3.0.3/drivers/media/dvb/frontends/or51211.c
27452 --- linux-3.0.3/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27453 +++ linux-3.0.3/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27454 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27455 u8 tudata[585];
27456 int i;
27457
27458 + pax_track_stack();
27459 +
27460 dprintk("Firmware is %zd bytes\n",fw->size);
27461
27462 /* Get eprom data */
27463 diff -urNp linux-3.0.3/drivers/media/video/cx18/cx18-driver.c linux-3.0.3/drivers/media/video/cx18/cx18-driver.c
27464 --- linux-3.0.3/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27465 +++ linux-3.0.3/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27466 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27467 struct i2c_client c;
27468 u8 eedata[256];
27469
27470 + pax_track_stack();
27471 +
27472 memset(&c, 0, sizeof(c));
27473 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27474 c.adapter = &cx->i2c_adap[0];
27475 diff -urNp linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c
27476 --- linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
27477 +++ linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
27478 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27479 bool handle = false;
27480 struct ir_raw_event ir_core_event[64];
27481
27482 + pax_track_stack();
27483 +
27484 do {
27485 num = 0;
27486 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27487 diff -urNp linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27488 --- linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
27489 +++ linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
27490 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27491 u8 *eeprom;
27492 struct tveeprom tvdata;
27493
27494 + pax_track_stack();
27495 +
27496 memset(&tvdata,0,sizeof(tvdata));
27497
27498 eeprom = pvr2_eeprom_fetch(hdw);
27499 diff -urNp linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c
27500 --- linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
27501 +++ linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
27502 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27503 unsigned char localPAT[256];
27504 unsigned char localPMT[256];
27505
27506 + pax_track_stack();
27507 +
27508 /* Set video format - must be done first as it resets other settings */
27509 set_reg8(client, 0x41, h->video_format);
27510
27511 diff -urNp linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c
27512 --- linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
27513 +++ linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
27514 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27515 u8 tmp[512];
27516 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27517
27518 + pax_track_stack();
27519 +
27520 /* While any outstand message on the bus exists... */
27521 do {
27522
27523 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27524 u8 tmp[512];
27525 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27526
27527 + pax_track_stack();
27528 +
27529 while (loop) {
27530
27531 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27532 diff -urNp linux-3.0.3/drivers/media/video/timblogiw.c linux-3.0.3/drivers/media/video/timblogiw.c
27533 --- linux-3.0.3/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
27534 +++ linux-3.0.3/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
27535 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
27536
27537 /* Platform device functions */
27538
27539 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27540 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
27541 .vidioc_querycap = timblogiw_querycap,
27542 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27543 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27544 diff -urNp linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c
27545 --- linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
27546 +++ linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
27547 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
27548 unsigned char rv, gv, bv;
27549 static unsigned char *Y, *U, *V;
27550
27551 + pax_track_stack();
27552 +
27553 frame = usbvision->cur_frame;
27554 image_size = frame->frmwidth * frame->frmheight;
27555 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27556 diff -urNp linux-3.0.3/drivers/media/video/videobuf-dma-sg.c linux-3.0.3/drivers/media/video/videobuf-dma-sg.c
27557 --- linux-3.0.3/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
27558 +++ linux-3.0.3/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
27559 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27560 {
27561 struct videobuf_queue q;
27562
27563 + pax_track_stack();
27564 +
27565 /* Required to make generic handler to call __videobuf_alloc */
27566 q.int_ops = &sg_ops;
27567
27568 diff -urNp linux-3.0.3/drivers/message/fusion/mptbase.c linux-3.0.3/drivers/message/fusion/mptbase.c
27569 --- linux-3.0.3/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
27570 +++ linux-3.0.3/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
27571 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
27572 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27573 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27574
27575 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27576 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27577 +#else
27578 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27579 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27580 +#endif
27581 +
27582 /*
27583 * Rounding UP to nearest 4-kB boundary here...
27584 */
27585 diff -urNp linux-3.0.3/drivers/message/fusion/mptsas.c linux-3.0.3/drivers/message/fusion/mptsas.c
27586 --- linux-3.0.3/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
27587 +++ linux-3.0.3/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
27588 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27589 return 0;
27590 }
27591
27592 +static inline void
27593 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27594 +{
27595 + if (phy_info->port_details) {
27596 + phy_info->port_details->rphy = rphy;
27597 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27598 + ioc->name, rphy));
27599 + }
27600 +
27601 + if (rphy) {
27602 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27603 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27604 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27605 + ioc->name, rphy, rphy->dev.release));
27606 + }
27607 +}
27608 +
27609 /* no mutex */
27610 static void
27611 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27612 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27613 return NULL;
27614 }
27615
27616 -static inline void
27617 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27618 -{
27619 - if (phy_info->port_details) {
27620 - phy_info->port_details->rphy = rphy;
27621 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27622 - ioc->name, rphy));
27623 - }
27624 -
27625 - if (rphy) {
27626 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27627 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27628 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27629 - ioc->name, rphy, rphy->dev.release));
27630 - }
27631 -}
27632 -
27633 static inline struct sas_port *
27634 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27635 {
27636 diff -urNp linux-3.0.3/drivers/message/fusion/mptscsih.c linux-3.0.3/drivers/message/fusion/mptscsih.c
27637 --- linux-3.0.3/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
27638 +++ linux-3.0.3/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
27639 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27640
27641 h = shost_priv(SChost);
27642
27643 - if (h) {
27644 - if (h->info_kbuf == NULL)
27645 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27646 - return h->info_kbuf;
27647 - h->info_kbuf[0] = '\0';
27648 + if (!h)
27649 + return NULL;
27650
27651 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27652 - h->info_kbuf[size-1] = '\0';
27653 - }
27654 + if (h->info_kbuf == NULL)
27655 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27656 + return h->info_kbuf;
27657 + h->info_kbuf[0] = '\0';
27658 +
27659 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27660 + h->info_kbuf[size-1] = '\0';
27661
27662 return h->info_kbuf;
27663 }
27664 diff -urNp linux-3.0.3/drivers/message/i2o/i2o_config.c linux-3.0.3/drivers/message/i2o/i2o_config.c
27665 --- linux-3.0.3/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
27666 +++ linux-3.0.3/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
27667 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27668 struct i2o_message *msg;
27669 unsigned int iop;
27670
27671 + pax_track_stack();
27672 +
27673 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27674 return -EFAULT;
27675
27676 diff -urNp linux-3.0.3/drivers/message/i2o/i2o_proc.c linux-3.0.3/drivers/message/i2o/i2o_proc.c
27677 --- linux-3.0.3/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
27678 +++ linux-3.0.3/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
27679 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27680 "Array Controller Device"
27681 };
27682
27683 -static char *chtostr(u8 * chars, int n)
27684 -{
27685 - char tmp[256];
27686 - tmp[0] = 0;
27687 - return strncat(tmp, (char *)chars, n);
27688 -}
27689 -
27690 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27691 char *group)
27692 {
27693 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27694
27695 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27696 seq_printf(seq, "%-#8x", ddm_table.module_id);
27697 - seq_printf(seq, "%-29s",
27698 - chtostr(ddm_table.module_name_version, 28));
27699 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27700 seq_printf(seq, "%9d ", ddm_table.data_size);
27701 seq_printf(seq, "%8d", ddm_table.code_size);
27702
27703 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27704
27705 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27706 seq_printf(seq, "%-#8x", dst->module_id);
27707 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27708 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27709 + seq_printf(seq, "%-.28s", dst->module_name_version);
27710 + seq_printf(seq, "%-.8s", dst->date);
27711 seq_printf(seq, "%8d ", dst->module_size);
27712 seq_printf(seq, "%8d ", dst->mpb_size);
27713 seq_printf(seq, "0x%04x", dst->module_flags);
27714 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27715 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27716 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27717 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27718 - seq_printf(seq, "Vendor info : %s\n",
27719 - chtostr((u8 *) (work32 + 2), 16));
27720 - seq_printf(seq, "Product info : %s\n",
27721 - chtostr((u8 *) (work32 + 6), 16));
27722 - seq_printf(seq, "Description : %s\n",
27723 - chtostr((u8 *) (work32 + 10), 16));
27724 - seq_printf(seq, "Product rev. : %s\n",
27725 - chtostr((u8 *) (work32 + 14), 8));
27726 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27727 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27728 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27729 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27730
27731 seq_printf(seq, "Serial number : ");
27732 print_serial_number(seq, (u8 *) (work32 + 16),
27733 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27734 }
27735
27736 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27737 - seq_printf(seq, "Module name : %s\n",
27738 - chtostr(result.module_name, 24));
27739 - seq_printf(seq, "Module revision : %s\n",
27740 - chtostr(result.module_rev, 8));
27741 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
27742 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27743
27744 seq_printf(seq, "Serial number : ");
27745 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27746 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27747 return 0;
27748 }
27749
27750 - seq_printf(seq, "Device name : %s\n",
27751 - chtostr(result.device_name, 64));
27752 - seq_printf(seq, "Service name : %s\n",
27753 - chtostr(result.service_name, 64));
27754 - seq_printf(seq, "Physical name : %s\n",
27755 - chtostr(result.physical_location, 64));
27756 - seq_printf(seq, "Instance number : %s\n",
27757 - chtostr(result.instance_number, 4));
27758 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
27759 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
27760 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27761 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27762
27763 return 0;
27764 }
27765 diff -urNp linux-3.0.3/drivers/message/i2o/iop.c linux-3.0.3/drivers/message/i2o/iop.c
27766 --- linux-3.0.3/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
27767 +++ linux-3.0.3/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
27768 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27769
27770 spin_lock_irqsave(&c->context_list_lock, flags);
27771
27772 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27773 - atomic_inc(&c->context_list_counter);
27774 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27775 + atomic_inc_unchecked(&c->context_list_counter);
27776
27777 - entry->context = atomic_read(&c->context_list_counter);
27778 + entry->context = atomic_read_unchecked(&c->context_list_counter);
27779
27780 list_add(&entry->list, &c->context_list);
27781
27782 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27783
27784 #if BITS_PER_LONG == 64
27785 spin_lock_init(&c->context_list_lock);
27786 - atomic_set(&c->context_list_counter, 0);
27787 + atomic_set_unchecked(&c->context_list_counter, 0);
27788 INIT_LIST_HEAD(&c->context_list);
27789 #endif
27790
27791 diff -urNp linux-3.0.3/drivers/mfd/abx500-core.c linux-3.0.3/drivers/mfd/abx500-core.c
27792 --- linux-3.0.3/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
27793 +++ linux-3.0.3/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
27794 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27795
27796 struct abx500_device_entry {
27797 struct list_head list;
27798 - struct abx500_ops ops;
27799 + abx500_ops_no_const ops;
27800 struct device *dev;
27801 };
27802
27803 diff -urNp linux-3.0.3/drivers/mfd/janz-cmodio.c linux-3.0.3/drivers/mfd/janz-cmodio.c
27804 --- linux-3.0.3/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
27805 +++ linux-3.0.3/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
27806 @@ -13,6 +13,7 @@
27807
27808 #include <linux/kernel.h>
27809 #include <linux/module.h>
27810 +#include <linux/slab.h>
27811 #include <linux/init.h>
27812 #include <linux/pci.h>
27813 #include <linux/interrupt.h>
27814 diff -urNp linux-3.0.3/drivers/mfd/wm8350-i2c.c linux-3.0.3/drivers/mfd/wm8350-i2c.c
27815 --- linux-3.0.3/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
27816 +++ linux-3.0.3/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
27817 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27818 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27819 int ret;
27820
27821 + pax_track_stack();
27822 +
27823 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27824 return -EINVAL;
27825
27826 diff -urNp linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c
27827 --- linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
27828 +++ linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
27829 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27830 * the lid is closed. This leads to interrupts as soon as a little move
27831 * is done.
27832 */
27833 - atomic_inc(&lis3_dev.count);
27834 + atomic_inc_unchecked(&lis3_dev.count);
27835
27836 wake_up_interruptible(&lis3_dev.misc_wait);
27837 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27838 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27839 if (lis3_dev.pm_dev)
27840 pm_runtime_get_sync(lis3_dev.pm_dev);
27841
27842 - atomic_set(&lis3_dev.count, 0);
27843 + atomic_set_unchecked(&lis3_dev.count, 0);
27844 return 0;
27845 }
27846
27847 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27848 add_wait_queue(&lis3_dev.misc_wait, &wait);
27849 while (true) {
27850 set_current_state(TASK_INTERRUPTIBLE);
27851 - data = atomic_xchg(&lis3_dev.count, 0);
27852 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27853 if (data)
27854 break;
27855
27856 @@ -583,7 +583,7 @@ out:
27857 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27858 {
27859 poll_wait(file, &lis3_dev.misc_wait, wait);
27860 - if (atomic_read(&lis3_dev.count))
27861 + if (atomic_read_unchecked(&lis3_dev.count))
27862 return POLLIN | POLLRDNORM;
27863 return 0;
27864 }
27865 diff -urNp linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h
27866 --- linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
27867 +++ linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
27868 @@ -265,7 +265,7 @@ struct lis3lv02d {
27869 struct input_polled_dev *idev; /* input device */
27870 struct platform_device *pdev; /* platform device */
27871 struct regulator_bulk_data regulators[2];
27872 - atomic_t count; /* interrupt count after last read */
27873 + atomic_unchecked_t count; /* interrupt count after last read */
27874 union axis_conversion ac; /* hw -> logical axis */
27875 int mapped_btns[3];
27876
27877 diff -urNp linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c
27878 --- linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
27879 +++ linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
27880 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27881 unsigned long nsec;
27882
27883 nsec = CLKS2NSEC(clks);
27884 - atomic_long_inc(&mcs_op_statistics[op].count);
27885 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
27886 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
27887 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
27888 if (mcs_op_statistics[op].max < nsec)
27889 mcs_op_statistics[op].max = nsec;
27890 }
27891 diff -urNp linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c
27892 --- linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
27893 +++ linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
27894 @@ -32,9 +32,9 @@
27895
27896 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
27897
27898 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
27899 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
27900 {
27901 - unsigned long val = atomic_long_read(v);
27902 + unsigned long val = atomic_long_read_unchecked(v);
27903
27904 seq_printf(s, "%16lu %s\n", val, id);
27905 }
27906 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
27907
27908 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
27909 for (op = 0; op < mcsop_last; op++) {
27910 - count = atomic_long_read(&mcs_op_statistics[op].count);
27911 - total = atomic_long_read(&mcs_op_statistics[op].total);
27912 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
27913 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
27914 max = mcs_op_statistics[op].max;
27915 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
27916 count ? total / count : 0, max);
27917 diff -urNp linux-3.0.3/drivers/misc/sgi-gru/grutables.h linux-3.0.3/drivers/misc/sgi-gru/grutables.h
27918 --- linux-3.0.3/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
27919 +++ linux-3.0.3/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
27920 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
27921 * GRU statistics.
27922 */
27923 struct gru_stats_s {
27924 - atomic_long_t vdata_alloc;
27925 - atomic_long_t vdata_free;
27926 - atomic_long_t gts_alloc;
27927 - atomic_long_t gts_free;
27928 - atomic_long_t gms_alloc;
27929 - atomic_long_t gms_free;
27930 - atomic_long_t gts_double_allocate;
27931 - atomic_long_t assign_context;
27932 - atomic_long_t assign_context_failed;
27933 - atomic_long_t free_context;
27934 - atomic_long_t load_user_context;
27935 - atomic_long_t load_kernel_context;
27936 - atomic_long_t lock_kernel_context;
27937 - atomic_long_t unlock_kernel_context;
27938 - atomic_long_t steal_user_context;
27939 - atomic_long_t steal_kernel_context;
27940 - atomic_long_t steal_context_failed;
27941 - atomic_long_t nopfn;
27942 - atomic_long_t asid_new;
27943 - atomic_long_t asid_next;
27944 - atomic_long_t asid_wrap;
27945 - atomic_long_t asid_reuse;
27946 - atomic_long_t intr;
27947 - atomic_long_t intr_cbr;
27948 - atomic_long_t intr_tfh;
27949 - atomic_long_t intr_spurious;
27950 - atomic_long_t intr_mm_lock_failed;
27951 - atomic_long_t call_os;
27952 - atomic_long_t call_os_wait_queue;
27953 - atomic_long_t user_flush_tlb;
27954 - atomic_long_t user_unload_context;
27955 - atomic_long_t user_exception;
27956 - atomic_long_t set_context_option;
27957 - atomic_long_t check_context_retarget_intr;
27958 - atomic_long_t check_context_unload;
27959 - atomic_long_t tlb_dropin;
27960 - atomic_long_t tlb_preload_page;
27961 - atomic_long_t tlb_dropin_fail_no_asid;
27962 - atomic_long_t tlb_dropin_fail_upm;
27963 - atomic_long_t tlb_dropin_fail_invalid;
27964 - atomic_long_t tlb_dropin_fail_range_active;
27965 - atomic_long_t tlb_dropin_fail_idle;
27966 - atomic_long_t tlb_dropin_fail_fmm;
27967 - atomic_long_t tlb_dropin_fail_no_exception;
27968 - atomic_long_t tfh_stale_on_fault;
27969 - atomic_long_t mmu_invalidate_range;
27970 - atomic_long_t mmu_invalidate_page;
27971 - atomic_long_t flush_tlb;
27972 - atomic_long_t flush_tlb_gru;
27973 - atomic_long_t flush_tlb_gru_tgh;
27974 - atomic_long_t flush_tlb_gru_zero_asid;
27975 -
27976 - atomic_long_t copy_gpa;
27977 - atomic_long_t read_gpa;
27978 -
27979 - atomic_long_t mesq_receive;
27980 - atomic_long_t mesq_receive_none;
27981 - atomic_long_t mesq_send;
27982 - atomic_long_t mesq_send_failed;
27983 - atomic_long_t mesq_noop;
27984 - atomic_long_t mesq_send_unexpected_error;
27985 - atomic_long_t mesq_send_lb_overflow;
27986 - atomic_long_t mesq_send_qlimit_reached;
27987 - atomic_long_t mesq_send_amo_nacked;
27988 - atomic_long_t mesq_send_put_nacked;
27989 - atomic_long_t mesq_page_overflow;
27990 - atomic_long_t mesq_qf_locked;
27991 - atomic_long_t mesq_qf_noop_not_full;
27992 - atomic_long_t mesq_qf_switch_head_failed;
27993 - atomic_long_t mesq_qf_unexpected_error;
27994 - atomic_long_t mesq_noop_unexpected_error;
27995 - atomic_long_t mesq_noop_lb_overflow;
27996 - atomic_long_t mesq_noop_qlimit_reached;
27997 - atomic_long_t mesq_noop_amo_nacked;
27998 - atomic_long_t mesq_noop_put_nacked;
27999 - atomic_long_t mesq_noop_page_overflow;
28000 + atomic_long_unchecked_t vdata_alloc;
28001 + atomic_long_unchecked_t vdata_free;
28002 + atomic_long_unchecked_t gts_alloc;
28003 + atomic_long_unchecked_t gts_free;
28004 + atomic_long_unchecked_t gms_alloc;
28005 + atomic_long_unchecked_t gms_free;
28006 + atomic_long_unchecked_t gts_double_allocate;
28007 + atomic_long_unchecked_t assign_context;
28008 + atomic_long_unchecked_t assign_context_failed;
28009 + atomic_long_unchecked_t free_context;
28010 + atomic_long_unchecked_t load_user_context;
28011 + atomic_long_unchecked_t load_kernel_context;
28012 + atomic_long_unchecked_t lock_kernel_context;
28013 + atomic_long_unchecked_t unlock_kernel_context;
28014 + atomic_long_unchecked_t steal_user_context;
28015 + atomic_long_unchecked_t steal_kernel_context;
28016 + atomic_long_unchecked_t steal_context_failed;
28017 + atomic_long_unchecked_t nopfn;
28018 + atomic_long_unchecked_t asid_new;
28019 + atomic_long_unchecked_t asid_next;
28020 + atomic_long_unchecked_t asid_wrap;
28021 + atomic_long_unchecked_t asid_reuse;
28022 + atomic_long_unchecked_t intr;
28023 + atomic_long_unchecked_t intr_cbr;
28024 + atomic_long_unchecked_t intr_tfh;
28025 + atomic_long_unchecked_t intr_spurious;
28026 + atomic_long_unchecked_t intr_mm_lock_failed;
28027 + atomic_long_unchecked_t call_os;
28028 + atomic_long_unchecked_t call_os_wait_queue;
28029 + atomic_long_unchecked_t user_flush_tlb;
28030 + atomic_long_unchecked_t user_unload_context;
28031 + atomic_long_unchecked_t user_exception;
28032 + atomic_long_unchecked_t set_context_option;
28033 + atomic_long_unchecked_t check_context_retarget_intr;
28034 + atomic_long_unchecked_t check_context_unload;
28035 + atomic_long_unchecked_t tlb_dropin;
28036 + atomic_long_unchecked_t tlb_preload_page;
28037 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28038 + atomic_long_unchecked_t tlb_dropin_fail_upm;
28039 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
28040 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
28041 + atomic_long_unchecked_t tlb_dropin_fail_idle;
28042 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
28043 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28044 + atomic_long_unchecked_t tfh_stale_on_fault;
28045 + atomic_long_unchecked_t mmu_invalidate_range;
28046 + atomic_long_unchecked_t mmu_invalidate_page;
28047 + atomic_long_unchecked_t flush_tlb;
28048 + atomic_long_unchecked_t flush_tlb_gru;
28049 + atomic_long_unchecked_t flush_tlb_gru_tgh;
28050 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28051 +
28052 + atomic_long_unchecked_t copy_gpa;
28053 + atomic_long_unchecked_t read_gpa;
28054 +
28055 + atomic_long_unchecked_t mesq_receive;
28056 + atomic_long_unchecked_t mesq_receive_none;
28057 + atomic_long_unchecked_t mesq_send;
28058 + atomic_long_unchecked_t mesq_send_failed;
28059 + atomic_long_unchecked_t mesq_noop;
28060 + atomic_long_unchecked_t mesq_send_unexpected_error;
28061 + atomic_long_unchecked_t mesq_send_lb_overflow;
28062 + atomic_long_unchecked_t mesq_send_qlimit_reached;
28063 + atomic_long_unchecked_t mesq_send_amo_nacked;
28064 + atomic_long_unchecked_t mesq_send_put_nacked;
28065 + atomic_long_unchecked_t mesq_page_overflow;
28066 + atomic_long_unchecked_t mesq_qf_locked;
28067 + atomic_long_unchecked_t mesq_qf_noop_not_full;
28068 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
28069 + atomic_long_unchecked_t mesq_qf_unexpected_error;
28070 + atomic_long_unchecked_t mesq_noop_unexpected_error;
28071 + atomic_long_unchecked_t mesq_noop_lb_overflow;
28072 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
28073 + atomic_long_unchecked_t mesq_noop_amo_nacked;
28074 + atomic_long_unchecked_t mesq_noop_put_nacked;
28075 + atomic_long_unchecked_t mesq_noop_page_overflow;
28076
28077 };
28078
28079 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28080 tghop_invalidate, mcsop_last};
28081
28082 struct mcs_op_statistic {
28083 - atomic_long_t count;
28084 - atomic_long_t total;
28085 + atomic_long_unchecked_t count;
28086 + atomic_long_unchecked_t total;
28087 unsigned long max;
28088 };
28089
28090 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28091
28092 #define STAT(id) do { \
28093 if (gru_options & OPT_STATS) \
28094 - atomic_long_inc(&gru_stats.id); \
28095 + atomic_long_inc_unchecked(&gru_stats.id); \
28096 } while (0)
28097
28098 #ifdef CONFIG_SGI_GRU_DEBUG
28099 diff -urNp linux-3.0.3/drivers/misc/sgi-xp/xp.h linux-3.0.3/drivers/misc/sgi-xp/xp.h
28100 --- linux-3.0.3/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
28101 +++ linux-3.0.3/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
28102 @@ -289,7 +289,7 @@ struct xpc_interface {
28103 xpc_notify_func, void *);
28104 void (*received) (short, int, void *);
28105 enum xp_retval (*partid_to_nasids) (short, void *);
28106 -};
28107 +} __no_const;
28108
28109 extern struct xpc_interface xpc_interface;
28110
28111 diff -urNp linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c
28112 --- linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
28113 +++ linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
28114 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28115 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28116 unsigned long timeo = jiffies + HZ;
28117
28118 + pax_track_stack();
28119 +
28120 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28121 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28122 goto sleep;
28123 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28124 unsigned long initial_adr;
28125 int initial_len = len;
28126
28127 + pax_track_stack();
28128 +
28129 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28130 adr += chip->start;
28131 initial_adr = adr;
28132 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28133 int retries = 3;
28134 int ret;
28135
28136 + pax_track_stack();
28137 +
28138 adr += chip->start;
28139
28140 retry:
28141 diff -urNp linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c
28142 --- linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
28143 +++ linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
28144 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28145 unsigned long cmd_addr;
28146 struct cfi_private *cfi = map->fldrv_priv;
28147
28148 + pax_track_stack();
28149 +
28150 adr += chip->start;
28151
28152 /* Ensure cmd read/writes are aligned. */
28153 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28154 DECLARE_WAITQUEUE(wait, current);
28155 int wbufsize, z;
28156
28157 + pax_track_stack();
28158 +
28159 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28160 if (adr & (map_bankwidth(map)-1))
28161 return -EINVAL;
28162 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28163 DECLARE_WAITQUEUE(wait, current);
28164 int ret = 0;
28165
28166 + pax_track_stack();
28167 +
28168 adr += chip->start;
28169
28170 /* Let's determine this according to the interleave only once */
28171 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28172 unsigned long timeo = jiffies + HZ;
28173 DECLARE_WAITQUEUE(wait, current);
28174
28175 + pax_track_stack();
28176 +
28177 adr += chip->start;
28178
28179 /* Let's determine this according to the interleave only once */
28180 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28181 unsigned long timeo = jiffies + HZ;
28182 DECLARE_WAITQUEUE(wait, current);
28183
28184 + pax_track_stack();
28185 +
28186 adr += chip->start;
28187
28188 /* Let's determine this according to the interleave only once */
28189 diff -urNp linux-3.0.3/drivers/mtd/devices/doc2000.c linux-3.0.3/drivers/mtd/devices/doc2000.c
28190 --- linux-3.0.3/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
28191 +++ linux-3.0.3/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
28192 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28193
28194 /* The ECC will not be calculated correctly if less than 512 is written */
28195 /* DBB-
28196 - if (len != 0x200 && eccbuf)
28197 + if (len != 0x200)
28198 printk(KERN_WARNING
28199 "ECC needs a full sector write (adr: %lx size %lx)\n",
28200 (long) to, (long) len);
28201 diff -urNp linux-3.0.3/drivers/mtd/devices/doc2001.c linux-3.0.3/drivers/mtd/devices/doc2001.c
28202 --- linux-3.0.3/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
28203 +++ linux-3.0.3/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
28204 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28205 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28206
28207 /* Don't allow read past end of device */
28208 - if (from >= this->totlen)
28209 + if (from >= this->totlen || !len)
28210 return -EINVAL;
28211
28212 /* Don't allow a single read to cross a 512-byte block boundary */
28213 diff -urNp linux-3.0.3/drivers/mtd/ftl.c linux-3.0.3/drivers/mtd/ftl.c
28214 --- linux-3.0.3/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
28215 +++ linux-3.0.3/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
28216 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28217 loff_t offset;
28218 uint16_t srcunitswap = cpu_to_le16(srcunit);
28219
28220 + pax_track_stack();
28221 +
28222 eun = &part->EUNInfo[srcunit];
28223 xfer = &part->XferInfo[xferunit];
28224 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28225 diff -urNp linux-3.0.3/drivers/mtd/inftlcore.c linux-3.0.3/drivers/mtd/inftlcore.c
28226 --- linux-3.0.3/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28227 +++ linux-3.0.3/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28228 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28229 struct inftl_oob oob;
28230 size_t retlen;
28231
28232 + pax_track_stack();
28233 +
28234 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28235 "pending=%d)\n", inftl, thisVUC, pendingblock);
28236
28237 diff -urNp linux-3.0.3/drivers/mtd/inftlmount.c linux-3.0.3/drivers/mtd/inftlmount.c
28238 --- linux-3.0.3/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28239 +++ linux-3.0.3/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28240 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28241 struct INFTLPartition *ip;
28242 size_t retlen;
28243
28244 + pax_track_stack();
28245 +
28246 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28247
28248 /*
28249 diff -urNp linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c
28250 --- linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28251 +++ linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28252 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28253 {
28254 map_word pfow_val[4];
28255
28256 + pax_track_stack();
28257 +
28258 /* Check identification string */
28259 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28260 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28261 diff -urNp linux-3.0.3/drivers/mtd/mtdchar.c linux-3.0.3/drivers/mtd/mtdchar.c
28262 --- linux-3.0.3/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28263 +++ linux-3.0.3/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28264 @@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28265 u_long size;
28266 struct mtd_info_user info;
28267
28268 + pax_track_stack();
28269 +
28270 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28271
28272 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28273 diff -urNp linux-3.0.3/drivers/mtd/nand/denali.c linux-3.0.3/drivers/mtd/nand/denali.c
28274 --- linux-3.0.3/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28275 +++ linux-3.0.3/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28276 @@ -26,6 +26,7 @@
28277 #include <linux/pci.h>
28278 #include <linux/mtd/mtd.h>
28279 #include <linux/module.h>
28280 +#include <linux/slab.h>
28281
28282 #include "denali.h"
28283
28284 diff -urNp linux-3.0.3/drivers/mtd/nftlcore.c linux-3.0.3/drivers/mtd/nftlcore.c
28285 --- linux-3.0.3/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28286 +++ linux-3.0.3/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28287 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28288 int inplace = 1;
28289 size_t retlen;
28290
28291 + pax_track_stack();
28292 +
28293 memset(BlockMap, 0xff, sizeof(BlockMap));
28294 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28295
28296 diff -urNp linux-3.0.3/drivers/mtd/nftlmount.c linux-3.0.3/drivers/mtd/nftlmount.c
28297 --- linux-3.0.3/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28298 +++ linux-3.0.3/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28299 @@ -24,6 +24,7 @@
28300 #include <asm/errno.h>
28301 #include <linux/delay.h>
28302 #include <linux/slab.h>
28303 +#include <linux/sched.h>
28304 #include <linux/mtd/mtd.h>
28305 #include <linux/mtd/nand.h>
28306 #include <linux/mtd/nftl.h>
28307 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28308 struct mtd_info *mtd = nftl->mbd.mtd;
28309 unsigned int i;
28310
28311 + pax_track_stack();
28312 +
28313 /* Assume logical EraseSize == physical erasesize for starting the scan.
28314 We'll sort it out later if we find a MediaHeader which says otherwise */
28315 /* Actually, we won't. The new DiskOnChip driver has already scanned
28316 diff -urNp linux-3.0.3/drivers/mtd/ubi/build.c linux-3.0.3/drivers/mtd/ubi/build.c
28317 --- linux-3.0.3/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28318 +++ linux-3.0.3/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28319 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28320 static int __init bytes_str_to_int(const char *str)
28321 {
28322 char *endp;
28323 - unsigned long result;
28324 + unsigned long result, scale = 1;
28325
28326 result = simple_strtoul(str, &endp, 0);
28327 if (str == endp || result >= INT_MAX) {
28328 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28329
28330 switch (*endp) {
28331 case 'G':
28332 - result *= 1024;
28333 + scale *= 1024;
28334 case 'M':
28335 - result *= 1024;
28336 + scale *= 1024;
28337 case 'K':
28338 - result *= 1024;
28339 + scale *= 1024;
28340 if (endp[1] == 'i' && endp[2] == 'B')
28341 endp += 2;
28342 case '\0':
28343 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28344 return -EINVAL;
28345 }
28346
28347 - return result;
28348 + if ((intoverflow_t)result*scale >= INT_MAX) {
28349 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28350 + str);
28351 + return -EINVAL;
28352 + }
28353 +
28354 + return result*scale;
28355 }
28356
28357 /**
28358 diff -urNp linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c
28359 --- linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28360 +++ linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28361 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28362 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28363 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28364
28365 -static struct bfa_ioc_hwif nw_hwif_ct;
28366 +static struct bfa_ioc_hwif nw_hwif_ct = {
28367 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28368 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28369 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28370 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28371 + .ioc_map_port = bfa_ioc_ct_map_port,
28372 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28373 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28374 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28375 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28376 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28377 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28378 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28379 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28380 +};
28381
28382 /**
28383 * Called from bfa_ioc_attach() to map asic specific calls.
28384 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28385 void
28386 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28387 {
28388 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28389 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28390 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28391 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28392 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28393 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28394 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28395 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28396 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28397 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28398 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28399 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28400 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28401 -
28402 ioc->ioc_hwif = &nw_hwif_ct;
28403 }
28404
28405 diff -urNp linux-3.0.3/drivers/net/bna/bnad.c linux-3.0.3/drivers/net/bna/bnad.c
28406 --- linux-3.0.3/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28407 +++ linux-3.0.3/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28408 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28409 struct bna_intr_info *intr_info =
28410 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28411 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28412 - struct bna_tx_event_cbfn tx_cbfn;
28413 + static struct bna_tx_event_cbfn tx_cbfn = {
28414 + /* Initialize the tx event handlers */
28415 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28416 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28417 + .tx_stall_cbfn = bnad_cb_tx_stall,
28418 + .tx_resume_cbfn = bnad_cb_tx_resume,
28419 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28420 + };
28421 struct bna_tx *tx;
28422 unsigned long flags;
28423
28424 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28425 tx_config->txq_depth = bnad->txq_depth;
28426 tx_config->tx_type = BNA_TX_T_REGULAR;
28427
28428 - /* Initialize the tx event handlers */
28429 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28430 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28431 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28432 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28433 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28434 -
28435 /* Get BNA's resource requirement for one tx object */
28436 spin_lock_irqsave(&bnad->bna_lock, flags);
28437 bna_tx_res_req(bnad->num_txq_per_tx,
28438 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28439 struct bna_intr_info *intr_info =
28440 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28441 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28442 - struct bna_rx_event_cbfn rx_cbfn;
28443 + static struct bna_rx_event_cbfn rx_cbfn = {
28444 + /* Initialize the Rx event handlers */
28445 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28446 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28447 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28448 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28449 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28450 + .rx_post_cbfn = bnad_cb_rx_post
28451 + };
28452 struct bna_rx *rx;
28453 unsigned long flags;
28454
28455 /* Initialize the Rx object configuration */
28456 bnad_init_rx_config(bnad, rx_config);
28457
28458 - /* Initialize the Rx event handlers */
28459 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28460 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28461 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28462 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28463 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28464 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28465 -
28466 /* Get BNA's resource requirement for one Rx object */
28467 spin_lock_irqsave(&bnad->bna_lock, flags);
28468 bna_rx_res_req(rx_config, res_info);
28469 diff -urNp linux-3.0.3/drivers/net/bnx2.c linux-3.0.3/drivers/net/bnx2.c
28470 --- linux-3.0.3/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
28471 +++ linux-3.0.3/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
28472 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28473 int rc = 0;
28474 u32 magic, csum;
28475
28476 + pax_track_stack();
28477 +
28478 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28479 goto test_nvram_done;
28480
28481 diff -urNp linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c
28482 --- linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
28483 +++ linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
28484 @@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
28485 int i, rc;
28486 u32 magic, crc;
28487
28488 + pax_track_stack();
28489 +
28490 if (BP_NOMCP(bp))
28491 return 0;
28492
28493 diff -urNp linux-3.0.3/drivers/net/cxgb3/l2t.h linux-3.0.3/drivers/net/cxgb3/l2t.h
28494 --- linux-3.0.3/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
28495 +++ linux-3.0.3/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
28496 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28497 */
28498 struct l2t_skb_cb {
28499 arp_failure_handler_func arp_failure_handler;
28500 -};
28501 +} __no_const;
28502
28503 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28504
28505 diff -urNp linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c
28506 --- linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
28507 +++ linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
28508 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
28509 unsigned int nchan = adap->params.nports;
28510 struct msix_entry entries[MAX_INGQ + 1];
28511
28512 + pax_track_stack();
28513 +
28514 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28515 entries[i].entry = i;
28516
28517 diff -urNp linux-3.0.3/drivers/net/cxgb4/t4_hw.c linux-3.0.3/drivers/net/cxgb4/t4_hw.c
28518 --- linux-3.0.3/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
28519 +++ linux-3.0.3/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
28520 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28521 u8 vpd[VPD_LEN], csum;
28522 unsigned int vpdr_len, kw_offset, id_len;
28523
28524 + pax_track_stack();
28525 +
28526 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28527 if (ret < 0)
28528 return ret;
28529 diff -urNp linux-3.0.3/drivers/net/e1000e/82571.c linux-3.0.3/drivers/net/e1000e/82571.c
28530 --- linux-3.0.3/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
28531 +++ linux-3.0.3/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
28532 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28533 {
28534 struct e1000_hw *hw = &adapter->hw;
28535 struct e1000_mac_info *mac = &hw->mac;
28536 - struct e1000_mac_operations *func = &mac->ops;
28537 + e1000_mac_operations_no_const *func = &mac->ops;
28538 u32 swsm = 0;
28539 u32 swsm2 = 0;
28540 bool force_clear_smbi = false;
28541 diff -urNp linux-3.0.3/drivers/net/e1000e/es2lan.c linux-3.0.3/drivers/net/e1000e/es2lan.c
28542 --- linux-3.0.3/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
28543 +++ linux-3.0.3/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
28544 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28545 {
28546 struct e1000_hw *hw = &adapter->hw;
28547 struct e1000_mac_info *mac = &hw->mac;
28548 - struct e1000_mac_operations *func = &mac->ops;
28549 + e1000_mac_operations_no_const *func = &mac->ops;
28550
28551 /* Set media type */
28552 switch (adapter->pdev->device) {
28553 diff -urNp linux-3.0.3/drivers/net/e1000e/hw.h linux-3.0.3/drivers/net/e1000e/hw.h
28554 --- linux-3.0.3/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
28555 +++ linux-3.0.3/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
28556 @@ -776,6 +776,7 @@ struct e1000_mac_operations {
28557 void (*write_vfta)(struct e1000_hw *, u32, u32);
28558 s32 (*read_mac_addr)(struct e1000_hw *);
28559 };
28560 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28561
28562 /* Function pointers for the PHY. */
28563 struct e1000_phy_operations {
28564 @@ -799,6 +800,7 @@ struct e1000_phy_operations {
28565 void (*power_up)(struct e1000_hw *);
28566 void (*power_down)(struct e1000_hw *);
28567 };
28568 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28569
28570 /* Function pointers for the NVM. */
28571 struct e1000_nvm_operations {
28572 @@ -810,9 +812,10 @@ struct e1000_nvm_operations {
28573 s32 (*validate)(struct e1000_hw *);
28574 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28575 };
28576 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28577
28578 struct e1000_mac_info {
28579 - struct e1000_mac_operations ops;
28580 + e1000_mac_operations_no_const ops;
28581 u8 addr[ETH_ALEN];
28582 u8 perm_addr[ETH_ALEN];
28583
28584 @@ -853,7 +856,7 @@ struct e1000_mac_info {
28585 };
28586
28587 struct e1000_phy_info {
28588 - struct e1000_phy_operations ops;
28589 + e1000_phy_operations_no_const ops;
28590
28591 enum e1000_phy_type type;
28592
28593 @@ -887,7 +890,7 @@ struct e1000_phy_info {
28594 };
28595
28596 struct e1000_nvm_info {
28597 - struct e1000_nvm_operations ops;
28598 + e1000_nvm_operations_no_const ops;
28599
28600 enum e1000_nvm_type type;
28601 enum e1000_nvm_override override;
28602 diff -urNp linux-3.0.3/drivers/net/hamradio/6pack.c linux-3.0.3/drivers/net/hamradio/6pack.c
28603 --- linux-3.0.3/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
28604 +++ linux-3.0.3/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
28605 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28606 unsigned char buf[512];
28607 int count1;
28608
28609 + pax_track_stack();
28610 +
28611 if (!count)
28612 return;
28613
28614 diff -urNp linux-3.0.3/drivers/net/igb/e1000_hw.h linux-3.0.3/drivers/net/igb/e1000_hw.h
28615 --- linux-3.0.3/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
28616 +++ linux-3.0.3/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
28617 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28618 s32 (*read_mac_addr)(struct e1000_hw *);
28619 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28620 };
28621 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28622
28623 struct e1000_phy_operations {
28624 s32 (*acquire)(struct e1000_hw *);
28625 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28626 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28627 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28628 };
28629 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28630
28631 struct e1000_nvm_operations {
28632 s32 (*acquire)(struct e1000_hw *);
28633 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28634 s32 (*update)(struct e1000_hw *);
28635 s32 (*validate)(struct e1000_hw *);
28636 };
28637 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28638
28639 struct e1000_info {
28640 s32 (*get_invariants)(struct e1000_hw *);
28641 @@ -350,7 +353,7 @@ struct e1000_info {
28642 extern const struct e1000_info e1000_82575_info;
28643
28644 struct e1000_mac_info {
28645 - struct e1000_mac_operations ops;
28646 + e1000_mac_operations_no_const ops;
28647
28648 u8 addr[6];
28649 u8 perm_addr[6];
28650 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28651 };
28652
28653 struct e1000_phy_info {
28654 - struct e1000_phy_operations ops;
28655 + e1000_phy_operations_no_const ops;
28656
28657 enum e1000_phy_type type;
28658
28659 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28660 };
28661
28662 struct e1000_nvm_info {
28663 - struct e1000_nvm_operations ops;
28664 + e1000_nvm_operations_no_const ops;
28665 enum e1000_nvm_type type;
28666 enum e1000_nvm_override override;
28667
28668 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28669 s32 (*check_for_ack)(struct e1000_hw *, u16);
28670 s32 (*check_for_rst)(struct e1000_hw *, u16);
28671 };
28672 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28673
28674 struct e1000_mbx_stats {
28675 u32 msgs_tx;
28676 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28677 };
28678
28679 struct e1000_mbx_info {
28680 - struct e1000_mbx_operations ops;
28681 + e1000_mbx_operations_no_const ops;
28682 struct e1000_mbx_stats stats;
28683 u32 timeout;
28684 u32 usec_delay;
28685 diff -urNp linux-3.0.3/drivers/net/igbvf/vf.h linux-3.0.3/drivers/net/igbvf/vf.h
28686 --- linux-3.0.3/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
28687 +++ linux-3.0.3/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
28688 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28689 s32 (*read_mac_addr)(struct e1000_hw *);
28690 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28691 };
28692 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28693
28694 struct e1000_mac_info {
28695 - struct e1000_mac_operations ops;
28696 + e1000_mac_operations_no_const ops;
28697 u8 addr[6];
28698 u8 perm_addr[6];
28699
28700 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28701 s32 (*check_for_ack)(struct e1000_hw *);
28702 s32 (*check_for_rst)(struct e1000_hw *);
28703 };
28704 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28705
28706 struct e1000_mbx_stats {
28707 u32 msgs_tx;
28708 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28709 };
28710
28711 struct e1000_mbx_info {
28712 - struct e1000_mbx_operations ops;
28713 + e1000_mbx_operations_no_const ops;
28714 struct e1000_mbx_stats stats;
28715 u32 timeout;
28716 u32 usec_delay;
28717 diff -urNp linux-3.0.3/drivers/net/ixgb/ixgb_main.c linux-3.0.3/drivers/net/ixgb/ixgb_main.c
28718 --- linux-3.0.3/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
28719 +++ linux-3.0.3/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
28720 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
28721 u32 rctl;
28722 int i;
28723
28724 + pax_track_stack();
28725 +
28726 /* Check for Promiscuous and All Multicast modes */
28727
28728 rctl = IXGB_READ_REG(hw, RCTL);
28729 diff -urNp linux-3.0.3/drivers/net/ixgb/ixgb_param.c linux-3.0.3/drivers/net/ixgb/ixgb_param.c
28730 --- linux-3.0.3/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
28731 +++ linux-3.0.3/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
28732 @@ -261,6 +261,9 @@ void __devinit
28733 ixgb_check_options(struct ixgb_adapter *adapter)
28734 {
28735 int bd = adapter->bd_number;
28736 +
28737 + pax_track_stack();
28738 +
28739 if (bd >= IXGB_MAX_NIC) {
28740 pr_notice("Warning: no configuration for board #%i\n", bd);
28741 pr_notice("Using defaults for all values\n");
28742 diff -urNp linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h
28743 --- linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
28744 +++ linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
28745 @@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
28746 s32 (*update_checksum)(struct ixgbe_hw *);
28747 u16 (*calc_checksum)(struct ixgbe_hw *);
28748 };
28749 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28750
28751 struct ixgbe_mac_operations {
28752 s32 (*init_hw)(struct ixgbe_hw *);
28753 @@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
28754 /* Flow Control */
28755 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28756 };
28757 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28758
28759 struct ixgbe_phy_operations {
28760 s32 (*identify)(struct ixgbe_hw *);
28761 @@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
28762 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28763 s32 (*check_overtemp)(struct ixgbe_hw *);
28764 };
28765 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28766
28767 struct ixgbe_eeprom_info {
28768 - struct ixgbe_eeprom_operations ops;
28769 + ixgbe_eeprom_operations_no_const ops;
28770 enum ixgbe_eeprom_type type;
28771 u32 semaphore_delay;
28772 u16 word_size;
28773 @@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
28774
28775 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28776 struct ixgbe_mac_info {
28777 - struct ixgbe_mac_operations ops;
28778 + ixgbe_mac_operations_no_const ops;
28779 enum ixgbe_mac_type type;
28780 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28781 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28782 @@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
28783 };
28784
28785 struct ixgbe_phy_info {
28786 - struct ixgbe_phy_operations ops;
28787 + ixgbe_phy_operations_no_const ops;
28788 struct mdio_if_info mdio;
28789 enum ixgbe_phy_type type;
28790 u32 id;
28791 @@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
28792 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28793 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28794 };
28795 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28796
28797 struct ixgbe_mbx_stats {
28798 u32 msgs_tx;
28799 @@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
28800 };
28801
28802 struct ixgbe_mbx_info {
28803 - struct ixgbe_mbx_operations ops;
28804 + ixgbe_mbx_operations_no_const ops;
28805 struct ixgbe_mbx_stats stats;
28806 u32 timeout;
28807 u32 usec_delay;
28808 diff -urNp linux-3.0.3/drivers/net/ixgbevf/vf.h linux-3.0.3/drivers/net/ixgbevf/vf.h
28809 --- linux-3.0.3/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
28810 +++ linux-3.0.3/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
28811 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
28812 s32 (*clear_vfta)(struct ixgbe_hw *);
28813 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28814 };
28815 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28816
28817 enum ixgbe_mac_type {
28818 ixgbe_mac_unknown = 0,
28819 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
28820 };
28821
28822 struct ixgbe_mac_info {
28823 - struct ixgbe_mac_operations ops;
28824 + ixgbe_mac_operations_no_const ops;
28825 u8 addr[6];
28826 u8 perm_addr[6];
28827
28828 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
28829 s32 (*check_for_ack)(struct ixgbe_hw *);
28830 s32 (*check_for_rst)(struct ixgbe_hw *);
28831 };
28832 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28833
28834 struct ixgbe_mbx_stats {
28835 u32 msgs_tx;
28836 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
28837 };
28838
28839 struct ixgbe_mbx_info {
28840 - struct ixgbe_mbx_operations ops;
28841 + ixgbe_mbx_operations_no_const ops;
28842 struct ixgbe_mbx_stats stats;
28843 u32 timeout;
28844 u32 udelay;
28845 diff -urNp linux-3.0.3/drivers/net/ksz884x.c linux-3.0.3/drivers/net/ksz884x.c
28846 --- linux-3.0.3/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
28847 +++ linux-3.0.3/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
28848 @@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
28849 int rc;
28850 u64 counter[TOTAL_PORT_COUNTER_NUM];
28851
28852 + pax_track_stack();
28853 +
28854 mutex_lock(&hw_priv->lock);
28855 n = SWITCH_PORT_NUM;
28856 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28857 diff -urNp linux-3.0.3/drivers/net/mlx4/main.c linux-3.0.3/drivers/net/mlx4/main.c
28858 --- linux-3.0.3/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
28859 +++ linux-3.0.3/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
28860 @@ -40,6 +40,7 @@
28861 #include <linux/dma-mapping.h>
28862 #include <linux/slab.h>
28863 #include <linux/io-mapping.h>
28864 +#include <linux/sched.h>
28865
28866 #include <linux/mlx4/device.h>
28867 #include <linux/mlx4/doorbell.h>
28868 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28869 u64 icm_size;
28870 int err;
28871
28872 + pax_track_stack();
28873 +
28874 err = mlx4_QUERY_FW(dev);
28875 if (err) {
28876 if (err == -EACCES)
28877 diff -urNp linux-3.0.3/drivers/net/niu.c linux-3.0.3/drivers/net/niu.c
28878 --- linux-3.0.3/drivers/net/niu.c 2011-08-23 21:44:40.000000000 -0400
28879 +++ linux-3.0.3/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
28880 @@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
28881 int i, num_irqs, err;
28882 u8 first_ldg;
28883
28884 + pax_track_stack();
28885 +
28886 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
28887 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
28888 ldg_num_map[i] = first_ldg + i;
28889 diff -urNp linux-3.0.3/drivers/net/pcnet32.c linux-3.0.3/drivers/net/pcnet32.c
28890 --- linux-3.0.3/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
28891 +++ linux-3.0.3/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
28892 @@ -82,7 +82,7 @@ static int cards_found;
28893 /*
28894 * VLB I/O addresses
28895 */
28896 -static unsigned int pcnet32_portlist[] __initdata =
28897 +static unsigned int pcnet32_portlist[] __devinitdata =
28898 { 0x300, 0x320, 0x340, 0x360, 0 };
28899
28900 static int pcnet32_debug;
28901 @@ -270,7 +270,7 @@ struct pcnet32_private {
28902 struct sk_buff **rx_skbuff;
28903 dma_addr_t *tx_dma_addr;
28904 dma_addr_t *rx_dma_addr;
28905 - struct pcnet32_access a;
28906 + struct pcnet32_access *a;
28907 spinlock_t lock; /* Guard lock */
28908 unsigned int cur_rx, cur_tx; /* The next free ring entry */
28909 unsigned int rx_ring_size; /* current rx ring size */
28910 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
28911 u16 val;
28912
28913 netif_wake_queue(dev);
28914 - val = lp->a.read_csr(ioaddr, CSR3);
28915 + val = lp->a->read_csr(ioaddr, CSR3);
28916 val &= 0x00ff;
28917 - lp->a.write_csr(ioaddr, CSR3, val);
28918 + lp->a->write_csr(ioaddr, CSR3, val);
28919 napi_enable(&lp->napi);
28920 }
28921
28922 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
28923 r = mii_link_ok(&lp->mii_if);
28924 } else if (lp->chip_version >= PCNET32_79C970A) {
28925 ulong ioaddr = dev->base_addr; /* card base I/O address */
28926 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
28927 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
28928 } else { /* can not detect link on really old chips */
28929 r = 1;
28930 }
28931 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
28932 pcnet32_netif_stop(dev);
28933
28934 spin_lock_irqsave(&lp->lock, flags);
28935 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28936 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28937
28938 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
28939
28940 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
28941 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
28942 {
28943 struct pcnet32_private *lp = netdev_priv(dev);
28944 - struct pcnet32_access *a = &lp->a; /* access to registers */
28945 + struct pcnet32_access *a = lp->a; /* access to registers */
28946 ulong ioaddr = dev->base_addr; /* card base I/O address */
28947 struct sk_buff *skb; /* sk buff */
28948 int x, i; /* counters */
28949 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
28950 pcnet32_netif_stop(dev);
28951
28952 spin_lock_irqsave(&lp->lock, flags);
28953 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28954 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28955
28956 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
28957
28958 /* Reset the PCNET32 */
28959 - lp->a.reset(ioaddr);
28960 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28961 + lp->a->reset(ioaddr);
28962 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28963
28964 /* switch pcnet32 to 32bit mode */
28965 - lp->a.write_bcr(ioaddr, 20, 2);
28966 + lp->a->write_bcr(ioaddr, 20, 2);
28967
28968 /* purge & init rings but don't actually restart */
28969 pcnet32_restart(dev, 0x0000);
28970
28971 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28972 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28973
28974 /* Initialize Transmit buffers. */
28975 size = data_len + 15;
28976 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
28977
28978 /* set int loopback in CSR15 */
28979 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
28980 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
28981 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
28982
28983 teststatus = cpu_to_le16(0x8000);
28984 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28985 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28986
28987 /* Check status of descriptors */
28988 for (x = 0; x < numbuffs; x++) {
28989 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
28990 }
28991 }
28992
28993 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28994 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28995 wmb();
28996 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
28997 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
28998 @@ -1015,7 +1015,7 @@ clean_up:
28999 pcnet32_restart(dev, CSR0_NORMAL);
29000 } else {
29001 pcnet32_purge_rx_ring(dev);
29002 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29003 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29004 }
29005 spin_unlock_irqrestore(&lp->lock, flags);
29006
29007 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
29008 enum ethtool_phys_id_state state)
29009 {
29010 struct pcnet32_private *lp = netdev_priv(dev);
29011 - struct pcnet32_access *a = &lp->a;
29012 + struct pcnet32_access *a = lp->a;
29013 ulong ioaddr = dev->base_addr;
29014 unsigned long flags;
29015 int i;
29016 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
29017 {
29018 int csr5;
29019 struct pcnet32_private *lp = netdev_priv(dev);
29020 - struct pcnet32_access *a = &lp->a;
29021 + struct pcnet32_access *a = lp->a;
29022 ulong ioaddr = dev->base_addr;
29023 int ticks;
29024
29025 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
29026 spin_lock_irqsave(&lp->lock, flags);
29027 if (pcnet32_tx(dev)) {
29028 /* reset the chip to clear the error condition, then restart */
29029 - lp->a.reset(ioaddr);
29030 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29031 + lp->a->reset(ioaddr);
29032 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29033 pcnet32_restart(dev, CSR0_START);
29034 netif_wake_queue(dev);
29035 }
29036 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29037 __napi_complete(napi);
29038
29039 /* clear interrupt masks */
29040 - val = lp->a.read_csr(ioaddr, CSR3);
29041 + val = lp->a->read_csr(ioaddr, CSR3);
29042 val &= 0x00ff;
29043 - lp->a.write_csr(ioaddr, CSR3, val);
29044 + lp->a->write_csr(ioaddr, CSR3, val);
29045
29046 /* Set interrupt enable. */
29047 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29048 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29049
29050 spin_unlock_irqrestore(&lp->lock, flags);
29051 }
29052 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29053 int i, csr0;
29054 u16 *buff = ptr;
29055 struct pcnet32_private *lp = netdev_priv(dev);
29056 - struct pcnet32_access *a = &lp->a;
29057 + struct pcnet32_access *a = lp->a;
29058 ulong ioaddr = dev->base_addr;
29059 unsigned long flags;
29060
29061 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29062 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29063 if (lp->phymask & (1 << j)) {
29064 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29065 - lp->a.write_bcr(ioaddr, 33,
29066 + lp->a->write_bcr(ioaddr, 33,
29067 (j << 5) | i);
29068 - *buff++ = lp->a.read_bcr(ioaddr, 34);
29069 + *buff++ = lp->a->read_bcr(ioaddr, 34);
29070 }
29071 }
29072 }
29073 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29074 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29075 lp->options |= PCNET32_PORT_FD;
29076
29077 - lp->a = *a;
29078 + lp->a = a;
29079
29080 /* prior to register_netdev, dev->name is not yet correct */
29081 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29082 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29083 if (lp->mii) {
29084 /* lp->phycount and lp->phymask are set to 0 by memset above */
29085
29086 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29087 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29088 /* scan for PHYs */
29089 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29090 unsigned short id1, id2;
29091 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29092 pr_info("Found PHY %04x:%04x at address %d\n",
29093 id1, id2, i);
29094 }
29095 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29096 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29097 if (lp->phycount > 1)
29098 lp->options |= PCNET32_PORT_MII;
29099 }
29100 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29101 }
29102
29103 /* Reset the PCNET32 */
29104 - lp->a.reset(ioaddr);
29105 + lp->a->reset(ioaddr);
29106
29107 /* switch pcnet32 to 32bit mode */
29108 - lp->a.write_bcr(ioaddr, 20, 2);
29109 + lp->a->write_bcr(ioaddr, 20, 2);
29110
29111 netif_printk(lp, ifup, KERN_DEBUG, dev,
29112 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29113 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29114 (u32) (lp->init_dma_addr));
29115
29116 /* set/reset autoselect bit */
29117 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
29118 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
29119 if (lp->options & PCNET32_PORT_ASEL)
29120 val |= 2;
29121 - lp->a.write_bcr(ioaddr, 2, val);
29122 + lp->a->write_bcr(ioaddr, 2, val);
29123
29124 /* handle full duplex setting */
29125 if (lp->mii_if.full_duplex) {
29126 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
29127 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
29128 if (lp->options & PCNET32_PORT_FD) {
29129 val |= 1;
29130 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29131 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29132 if (lp->chip_version == 0x2627)
29133 val |= 3;
29134 }
29135 - lp->a.write_bcr(ioaddr, 9, val);
29136 + lp->a->write_bcr(ioaddr, 9, val);
29137 }
29138
29139 /* set/reset GPSI bit in test register */
29140 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29141 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29142 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29143 val |= 0x10;
29144 - lp->a.write_csr(ioaddr, 124, val);
29145 + lp->a->write_csr(ioaddr, 124, val);
29146
29147 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29148 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29149 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29150 * duplex, and/or enable auto negotiation, and clear DANAS
29151 */
29152 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29153 - lp->a.write_bcr(ioaddr, 32,
29154 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
29155 + lp->a->write_bcr(ioaddr, 32,
29156 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
29157 /* disable Auto Negotiation, set 10Mpbs, HD */
29158 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29159 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29160 if (lp->options & PCNET32_PORT_FD)
29161 val |= 0x10;
29162 if (lp->options & PCNET32_PORT_100)
29163 val |= 0x08;
29164 - lp->a.write_bcr(ioaddr, 32, val);
29165 + lp->a->write_bcr(ioaddr, 32, val);
29166 } else {
29167 if (lp->options & PCNET32_PORT_ASEL) {
29168 - lp->a.write_bcr(ioaddr, 32,
29169 - lp->a.read_bcr(ioaddr,
29170 + lp->a->write_bcr(ioaddr, 32,
29171 + lp->a->read_bcr(ioaddr,
29172 32) | 0x0080);
29173 /* enable auto negotiate, setup, disable fd */
29174 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29175 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29176 val |= 0x20;
29177 - lp->a.write_bcr(ioaddr, 32, val);
29178 + lp->a->write_bcr(ioaddr, 32, val);
29179 }
29180 }
29181 } else {
29182 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29183 * There is really no good other way to handle multiple PHYs
29184 * other than turning off all automatics
29185 */
29186 - val = lp->a.read_bcr(ioaddr, 2);
29187 - lp->a.write_bcr(ioaddr, 2, val & ~2);
29188 - val = lp->a.read_bcr(ioaddr, 32);
29189 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29190 + val = lp->a->read_bcr(ioaddr, 2);
29191 + lp->a->write_bcr(ioaddr, 2, val & ~2);
29192 + val = lp->a->read_bcr(ioaddr, 32);
29193 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29194
29195 if (!(lp->options & PCNET32_PORT_ASEL)) {
29196 /* setup ecmd */
29197 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29198 ethtool_cmd_speed_set(&ecmd,
29199 (lp->options & PCNET32_PORT_100) ?
29200 SPEED_100 : SPEED_10);
29201 - bcr9 = lp->a.read_bcr(ioaddr, 9);
29202 + bcr9 = lp->a->read_bcr(ioaddr, 9);
29203
29204 if (lp->options & PCNET32_PORT_FD) {
29205 ecmd.duplex = DUPLEX_FULL;
29206 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29207 ecmd.duplex = DUPLEX_HALF;
29208 bcr9 |= ~(1 << 0);
29209 }
29210 - lp->a.write_bcr(ioaddr, 9, bcr9);
29211 + lp->a->write_bcr(ioaddr, 9, bcr9);
29212 }
29213
29214 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29215 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29216
29217 #ifdef DO_DXSUFLO
29218 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29219 - val = lp->a.read_csr(ioaddr, CSR3);
29220 + val = lp->a->read_csr(ioaddr, CSR3);
29221 val |= 0x40;
29222 - lp->a.write_csr(ioaddr, CSR3, val);
29223 + lp->a->write_csr(ioaddr, CSR3, val);
29224 }
29225 #endif
29226
29227 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29228 napi_enable(&lp->napi);
29229
29230 /* Re-initialize the PCNET32, and start it when done. */
29231 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29232 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29233 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29234 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29235
29236 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29237 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29238 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29239 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29240
29241 netif_start_queue(dev);
29242
29243 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29244
29245 i = 0;
29246 while (i++ < 100)
29247 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29248 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29249 break;
29250 /*
29251 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29252 * reports that doing so triggers a bug in the '974.
29253 */
29254 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29255 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29256
29257 netif_printk(lp, ifup, KERN_DEBUG, dev,
29258 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29259 i,
29260 (u32) (lp->init_dma_addr),
29261 - lp->a.read_csr(ioaddr, CSR0));
29262 + lp->a->read_csr(ioaddr, CSR0));
29263
29264 spin_unlock_irqrestore(&lp->lock, flags);
29265
29266 @@ -2218,7 +2218,7 @@ err_free_ring:
29267 * Switch back to 16bit mode to avoid problems with dumb
29268 * DOS packet driver after a warm reboot
29269 */
29270 - lp->a.write_bcr(ioaddr, 20, 4);
29271 + lp->a->write_bcr(ioaddr, 20, 4);
29272
29273 err_free_irq:
29274 spin_unlock_irqrestore(&lp->lock, flags);
29275 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29276
29277 /* wait for stop */
29278 for (i = 0; i < 100; i++)
29279 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29280 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29281 break;
29282
29283 if (i >= 100)
29284 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29285 return;
29286
29287 /* ReInit Ring */
29288 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29289 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29290 i = 0;
29291 while (i++ < 1000)
29292 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29293 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29294 break;
29295
29296 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29297 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29298 }
29299
29300 static void pcnet32_tx_timeout(struct net_device *dev)
29301 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29302 /* Transmitter timeout, serious problems. */
29303 if (pcnet32_debug & NETIF_MSG_DRV)
29304 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29305 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29306 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29307 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29308 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29309 dev->stats.tx_errors++;
29310 if (netif_msg_tx_err(lp)) {
29311 int i;
29312 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29313
29314 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29315 "%s() called, csr0 %4.4x\n",
29316 - __func__, lp->a.read_csr(ioaddr, CSR0));
29317 + __func__, lp->a->read_csr(ioaddr, CSR0));
29318
29319 /* Default status -- will not enable Successful-TxDone
29320 * interrupt when that option is available to us.
29321 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29322 dev->stats.tx_bytes += skb->len;
29323
29324 /* Trigger an immediate send poll. */
29325 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29326 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29327
29328 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29329 lp->tx_full = 1;
29330 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29331
29332 spin_lock(&lp->lock);
29333
29334 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29335 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29336 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29337 if (csr0 == 0xffff)
29338 break; /* PCMCIA remove happened */
29339 /* Acknowledge all of the current interrupt sources ASAP. */
29340 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29341 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29342
29343 netif_printk(lp, intr, KERN_DEBUG, dev,
29344 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29345 - csr0, lp->a.read_csr(ioaddr, CSR0));
29346 + csr0, lp->a->read_csr(ioaddr, CSR0));
29347
29348 /* Log misc errors. */
29349 if (csr0 & 0x4000)
29350 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29351 if (napi_schedule_prep(&lp->napi)) {
29352 u16 val;
29353 /* set interrupt masks */
29354 - val = lp->a.read_csr(ioaddr, CSR3);
29355 + val = lp->a->read_csr(ioaddr, CSR3);
29356 val |= 0x5f00;
29357 - lp->a.write_csr(ioaddr, CSR3, val);
29358 + lp->a->write_csr(ioaddr, CSR3, val);
29359
29360 __napi_schedule(&lp->napi);
29361 break;
29362 }
29363 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29364 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29365 }
29366
29367 netif_printk(lp, intr, KERN_DEBUG, dev,
29368 "exiting interrupt, csr0=%#4.4x\n",
29369 - lp->a.read_csr(ioaddr, CSR0));
29370 + lp->a->read_csr(ioaddr, CSR0));
29371
29372 spin_unlock(&lp->lock);
29373
29374 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29375
29376 spin_lock_irqsave(&lp->lock, flags);
29377
29378 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29379 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29380
29381 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29382 "Shutting down ethercard, status was %2.2x\n",
29383 - lp->a.read_csr(ioaddr, CSR0));
29384 + lp->a->read_csr(ioaddr, CSR0));
29385
29386 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29387 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29388 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29389
29390 /*
29391 * Switch back to 16bit mode to avoid problems with dumb
29392 * DOS packet driver after a warm reboot
29393 */
29394 - lp->a.write_bcr(ioaddr, 20, 4);
29395 + lp->a->write_bcr(ioaddr, 20, 4);
29396
29397 spin_unlock_irqrestore(&lp->lock, flags);
29398
29399 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29400 unsigned long flags;
29401
29402 spin_lock_irqsave(&lp->lock, flags);
29403 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29404 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29405 spin_unlock_irqrestore(&lp->lock, flags);
29406
29407 return &dev->stats;
29408 @@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29409 if (dev->flags & IFF_ALLMULTI) {
29410 ib->filter[0] = cpu_to_le32(~0U);
29411 ib->filter[1] = cpu_to_le32(~0U);
29412 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29413 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29414 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29415 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29416 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29417 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29418 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29419 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29420 return;
29421 }
29422 /* clear the multicast filter */
29423 @@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29424 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29425 }
29426 for (i = 0; i < 4; i++)
29427 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29428 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29429 le16_to_cpu(mcast_table[i]));
29430 }
29431
29432 @@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29433
29434 spin_lock_irqsave(&lp->lock, flags);
29435 suspended = pcnet32_suspend(dev, &flags, 0);
29436 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29437 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29438 if (dev->flags & IFF_PROMISC) {
29439 /* Log any net taps. */
29440 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29441 lp->init_block->mode =
29442 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29443 7);
29444 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29445 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29446 } else {
29447 lp->init_block->mode =
29448 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29449 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29450 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29451 pcnet32_load_multicast(dev);
29452 }
29453
29454 if (suspended) {
29455 int csr5;
29456 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29457 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29458 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29459 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29460 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29461 } else {
29462 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29463 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29464 pcnet32_restart(dev, CSR0_NORMAL);
29465 netif_wake_queue(dev);
29466 }
29467 @@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29468 if (!lp->mii)
29469 return 0;
29470
29471 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29472 - val_out = lp->a.read_bcr(ioaddr, 34);
29473 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29474 + val_out = lp->a->read_bcr(ioaddr, 34);
29475
29476 return val_out;
29477 }
29478 @@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
29479 if (!lp->mii)
29480 return;
29481
29482 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29483 - lp->a.write_bcr(ioaddr, 34, val);
29484 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29485 + lp->a->write_bcr(ioaddr, 34, val);
29486 }
29487
29488 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29489 @@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
29490 curr_link = mii_link_ok(&lp->mii_if);
29491 } else {
29492 ulong ioaddr = dev->base_addr; /* card base I/O address */
29493 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29494 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29495 }
29496 if (!curr_link) {
29497 if (prev_link || verbose) {
29498 @@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
29499 (ecmd.duplex == DUPLEX_FULL)
29500 ? "full" : "half");
29501 }
29502 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29503 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29504 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29505 if (lp->mii_if.full_duplex)
29506 bcr9 |= (1 << 0);
29507 else
29508 bcr9 &= ~(1 << 0);
29509 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
29510 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
29511 }
29512 } else {
29513 netif_info(lp, link, dev, "link up\n");
29514 diff -urNp linux-3.0.3/drivers/net/ppp_generic.c linux-3.0.3/drivers/net/ppp_generic.c
29515 --- linux-3.0.3/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
29516 +++ linux-3.0.3/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
29517 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29518 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29519 struct ppp_stats stats;
29520 struct ppp_comp_stats cstats;
29521 - char *vers;
29522
29523 switch (cmd) {
29524 case SIOCGPPPSTATS:
29525 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29526 break;
29527
29528 case SIOCGPPPVER:
29529 - vers = PPP_VERSION;
29530 - if (copy_to_user(addr, vers, strlen(vers) + 1))
29531 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29532 break;
29533 err = 0;
29534 break;
29535 diff -urNp linux-3.0.3/drivers/net/r8169.c linux-3.0.3/drivers/net/r8169.c
29536 --- linux-3.0.3/drivers/net/r8169.c 2011-08-23 21:44:40.000000000 -0400
29537 +++ linux-3.0.3/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
29538 @@ -645,12 +645,12 @@ struct rtl8169_private {
29539 struct mdio_ops {
29540 void (*write)(void __iomem *, int, int);
29541 int (*read)(void __iomem *, int);
29542 - } mdio_ops;
29543 + } __no_const mdio_ops;
29544
29545 struct pll_power_ops {
29546 void (*down)(struct rtl8169_private *);
29547 void (*up)(struct rtl8169_private *);
29548 - } pll_power_ops;
29549 + } __no_const pll_power_ops;
29550
29551 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29552 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29553 diff -urNp linux-3.0.3/drivers/net/tg3.h linux-3.0.3/drivers/net/tg3.h
29554 --- linux-3.0.3/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
29555 +++ linux-3.0.3/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
29556 @@ -134,6 +134,7 @@
29557 #define CHIPREV_ID_5750_A0 0x4000
29558 #define CHIPREV_ID_5750_A1 0x4001
29559 #define CHIPREV_ID_5750_A3 0x4003
29560 +#define CHIPREV_ID_5750_C1 0x4201
29561 #define CHIPREV_ID_5750_C2 0x4202
29562 #define CHIPREV_ID_5752_A0_HW 0x5000
29563 #define CHIPREV_ID_5752_A0 0x6000
29564 diff -urNp linux-3.0.3/drivers/net/tokenring/abyss.c linux-3.0.3/drivers/net/tokenring/abyss.c
29565 --- linux-3.0.3/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
29566 +++ linux-3.0.3/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
29567 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29568
29569 static int __init abyss_init (void)
29570 {
29571 - abyss_netdev_ops = tms380tr_netdev_ops;
29572 + pax_open_kernel();
29573 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29574
29575 - abyss_netdev_ops.ndo_open = abyss_open;
29576 - abyss_netdev_ops.ndo_stop = abyss_close;
29577 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29578 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29579 + pax_close_kernel();
29580
29581 return pci_register_driver(&abyss_driver);
29582 }
29583 diff -urNp linux-3.0.3/drivers/net/tokenring/madgemc.c linux-3.0.3/drivers/net/tokenring/madgemc.c
29584 --- linux-3.0.3/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
29585 +++ linux-3.0.3/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
29586 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29587
29588 static int __init madgemc_init (void)
29589 {
29590 - madgemc_netdev_ops = tms380tr_netdev_ops;
29591 - madgemc_netdev_ops.ndo_open = madgemc_open;
29592 - madgemc_netdev_ops.ndo_stop = madgemc_close;
29593 + pax_open_kernel();
29594 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29595 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29596 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29597 + pax_close_kernel();
29598
29599 return mca_register_driver (&madgemc_driver);
29600 }
29601 diff -urNp linux-3.0.3/drivers/net/tokenring/proteon.c linux-3.0.3/drivers/net/tokenring/proteon.c
29602 --- linux-3.0.3/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
29603 +++ linux-3.0.3/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
29604 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29605 struct platform_device *pdev;
29606 int i, num = 0, err = 0;
29607
29608 - proteon_netdev_ops = tms380tr_netdev_ops;
29609 - proteon_netdev_ops.ndo_open = proteon_open;
29610 - proteon_netdev_ops.ndo_stop = tms380tr_close;
29611 + pax_open_kernel();
29612 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29613 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29614 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29615 + pax_close_kernel();
29616
29617 err = platform_driver_register(&proteon_driver);
29618 if (err)
29619 diff -urNp linux-3.0.3/drivers/net/tokenring/skisa.c linux-3.0.3/drivers/net/tokenring/skisa.c
29620 --- linux-3.0.3/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
29621 +++ linux-3.0.3/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
29622 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29623 struct platform_device *pdev;
29624 int i, num = 0, err = 0;
29625
29626 - sk_isa_netdev_ops = tms380tr_netdev_ops;
29627 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
29628 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29629 + pax_open_kernel();
29630 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29631 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29632 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29633 + pax_close_kernel();
29634
29635 err = platform_driver_register(&sk_isa_driver);
29636 if (err)
29637 diff -urNp linux-3.0.3/drivers/net/tulip/de2104x.c linux-3.0.3/drivers/net/tulip/de2104x.c
29638 --- linux-3.0.3/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
29639 +++ linux-3.0.3/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
29640 @@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
29641 struct de_srom_info_leaf *il;
29642 void *bufp;
29643
29644 + pax_track_stack();
29645 +
29646 /* download entire eeprom */
29647 for (i = 0; i < DE_EEPROM_WORDS; i++)
29648 ((__le16 *)ee_data)[i] =
29649 diff -urNp linux-3.0.3/drivers/net/tulip/de4x5.c linux-3.0.3/drivers/net/tulip/de4x5.c
29650 --- linux-3.0.3/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
29651 +++ linux-3.0.3/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
29652 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29653 for (i=0; i<ETH_ALEN; i++) {
29654 tmp.addr[i] = dev->dev_addr[i];
29655 }
29656 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29657 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29658 break;
29659
29660 case DE4X5_SET_HWADDR: /* Set the hardware address */
29661 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29662 spin_lock_irqsave(&lp->lock, flags);
29663 memcpy(&statbuf, &lp->pktStats, ioc->len);
29664 spin_unlock_irqrestore(&lp->lock, flags);
29665 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
29666 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29667 return -EFAULT;
29668 break;
29669 }
29670 diff -urNp linux-3.0.3/drivers/net/usb/hso.c linux-3.0.3/drivers/net/usb/hso.c
29671 --- linux-3.0.3/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
29672 +++ linux-3.0.3/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
29673 @@ -71,7 +71,7 @@
29674 #include <asm/byteorder.h>
29675 #include <linux/serial_core.h>
29676 #include <linux/serial.h>
29677 -
29678 +#include <asm/local.h>
29679
29680 #define MOD_AUTHOR "Option Wireless"
29681 #define MOD_DESCRIPTION "USB High Speed Option driver"
29682 @@ -257,7 +257,7 @@ struct hso_serial {
29683
29684 /* from usb_serial_port */
29685 struct tty_struct *tty;
29686 - int open_count;
29687 + local_t open_count;
29688 spinlock_t serial_lock;
29689
29690 int (*write_data) (struct hso_serial *serial);
29691 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29692 struct urb *urb;
29693
29694 urb = serial->rx_urb[0];
29695 - if (serial->open_count > 0) {
29696 + if (local_read(&serial->open_count) > 0) {
29697 count = put_rxbuf_data(urb, serial);
29698 if (count == -1)
29699 return;
29700 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29701 DUMP1(urb->transfer_buffer, urb->actual_length);
29702
29703 /* Anyone listening? */
29704 - if (serial->open_count == 0)
29705 + if (local_read(&serial->open_count) == 0)
29706 return;
29707
29708 if (status == 0) {
29709 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29710 spin_unlock_irq(&serial->serial_lock);
29711
29712 /* check for port already opened, if not set the termios */
29713 - serial->open_count++;
29714 - if (serial->open_count == 1) {
29715 + if (local_inc_return(&serial->open_count) == 1) {
29716 serial->rx_state = RX_IDLE;
29717 /* Force default termio settings */
29718 _hso_serial_set_termios(tty, NULL);
29719 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29720 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29721 if (result) {
29722 hso_stop_serial_device(serial->parent);
29723 - serial->open_count--;
29724 + local_dec(&serial->open_count);
29725 kref_put(&serial->parent->ref, hso_serial_ref_free);
29726 }
29727 } else {
29728 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29729
29730 /* reset the rts and dtr */
29731 /* do the actual close */
29732 - serial->open_count--;
29733 + local_dec(&serial->open_count);
29734
29735 - if (serial->open_count <= 0) {
29736 - serial->open_count = 0;
29737 + if (local_read(&serial->open_count) <= 0) {
29738 + local_set(&serial->open_count, 0);
29739 spin_lock_irq(&serial->serial_lock);
29740 if (serial->tty == tty) {
29741 serial->tty->driver_data = NULL;
29742 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29743
29744 /* the actual setup */
29745 spin_lock_irqsave(&serial->serial_lock, flags);
29746 - if (serial->open_count)
29747 + if (local_read(&serial->open_count))
29748 _hso_serial_set_termios(tty, old);
29749 else
29750 tty->termios = old;
29751 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29752 D1("Pending read interrupt on port %d\n", i);
29753 spin_lock(&serial->serial_lock);
29754 if (serial->rx_state == RX_IDLE &&
29755 - serial->open_count > 0) {
29756 + local_read(&serial->open_count) > 0) {
29757 /* Setup and send a ctrl req read on
29758 * port i */
29759 if (!serial->rx_urb_filled[0]) {
29760 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
29761 /* Start all serial ports */
29762 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29763 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29764 - if (dev2ser(serial_table[i])->open_count) {
29765 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
29766 result =
29767 hso_start_serial_device(serial_table[i], GFP_NOIO);
29768 hso_kick_transmit(dev2ser(serial_table[i]));
29769 diff -urNp linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c
29770 --- linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29771 +++ linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
29772 @@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
29773 * Return with error code if any of the queue indices
29774 * is out of range
29775 */
29776 - if (p->ring_index[i] < 0 ||
29777 - p->ring_index[i] >= adapter->num_rx_queues)
29778 + if (p->ring_index[i] >= adapter->num_rx_queues)
29779 return -EINVAL;
29780 }
29781
29782 diff -urNp linux-3.0.3/drivers/net/vxge/vxge-config.h linux-3.0.3/drivers/net/vxge/vxge-config.h
29783 --- linux-3.0.3/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
29784 +++ linux-3.0.3/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
29785 @@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
29786 void (*link_down)(struct __vxge_hw_device *devh);
29787 void (*crit_err)(struct __vxge_hw_device *devh,
29788 enum vxge_hw_event type, u64 ext_data);
29789 -};
29790 +} __no_const;
29791
29792 /*
29793 * struct __vxge_hw_blockpool_entry - Block private data structure
29794 diff -urNp linux-3.0.3/drivers/net/vxge/vxge-main.c linux-3.0.3/drivers/net/vxge/vxge-main.c
29795 --- linux-3.0.3/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
29796 +++ linux-3.0.3/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
29797 @@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29798 struct sk_buff *completed[NR_SKB_COMPLETED];
29799 int more;
29800
29801 + pax_track_stack();
29802 +
29803 do {
29804 more = 0;
29805 skb_ptr = completed;
29806 @@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
29807 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29808 int index;
29809
29810 + pax_track_stack();
29811 +
29812 /*
29813 * Filling
29814 * - itable with bucket numbers
29815 diff -urNp linux-3.0.3/drivers/net/vxge/vxge-traffic.h linux-3.0.3/drivers/net/vxge/vxge-traffic.h
29816 --- linux-3.0.3/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
29817 +++ linux-3.0.3/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
29818 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29819 struct vxge_hw_mempool_dma *dma_object,
29820 u32 index,
29821 u32 is_last);
29822 -};
29823 +} __no_const;
29824
29825 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29826 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29827 diff -urNp linux-3.0.3/drivers/net/wan/cycx_x25.c linux-3.0.3/drivers/net/wan/cycx_x25.c
29828 --- linux-3.0.3/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
29829 +++ linux-3.0.3/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
29830 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29831 unsigned char hex[1024],
29832 * phex = hex;
29833
29834 + pax_track_stack();
29835 +
29836 if (len >= (sizeof(hex) / 2))
29837 len = (sizeof(hex) / 2) - 1;
29838
29839 diff -urNp linux-3.0.3/drivers/net/wan/hdlc_x25.c linux-3.0.3/drivers/net/wan/hdlc_x25.c
29840 --- linux-3.0.3/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
29841 +++ linux-3.0.3/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
29842 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29843
29844 static int x25_open(struct net_device *dev)
29845 {
29846 - struct lapb_register_struct cb;
29847 + static struct lapb_register_struct cb = {
29848 + .connect_confirmation = x25_connected,
29849 + .connect_indication = x25_connected,
29850 + .disconnect_confirmation = x25_disconnected,
29851 + .disconnect_indication = x25_disconnected,
29852 + .data_indication = x25_data_indication,
29853 + .data_transmit = x25_data_transmit
29854 + };
29855 int result;
29856
29857 - cb.connect_confirmation = x25_connected;
29858 - cb.connect_indication = x25_connected;
29859 - cb.disconnect_confirmation = x25_disconnected;
29860 - cb.disconnect_indication = x25_disconnected;
29861 - cb.data_indication = x25_data_indication;
29862 - cb.data_transmit = x25_data_transmit;
29863 -
29864 result = lapb_register(dev, &cb);
29865 if (result != LAPB_OK)
29866 return result;
29867 diff -urNp linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c
29868 --- linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
29869 +++ linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
29870 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29871 int do_autopm = 1;
29872 DECLARE_COMPLETION_ONSTACK(notif_completion);
29873
29874 + pax_track_stack();
29875 +
29876 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
29877 i2400m, ack, ack_size);
29878 BUG_ON(_ack == i2400m->bm_ack_buf);
29879 diff -urNp linux-3.0.3/drivers/net/wireless/airo.c linux-3.0.3/drivers/net/wireless/airo.c
29880 --- linux-3.0.3/drivers/net/wireless/airo.c 2011-08-23 21:44:40.000000000 -0400
29881 +++ linux-3.0.3/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
29882 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
29883 BSSListElement * loop_net;
29884 BSSListElement * tmp_net;
29885
29886 + pax_track_stack();
29887 +
29888 /* Blow away current list of scan results */
29889 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
29890 list_move_tail (&loop_net->list, &ai->network_free_list);
29891 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
29892 WepKeyRid wkr;
29893 int rc;
29894
29895 + pax_track_stack();
29896 +
29897 memset( &mySsid, 0, sizeof( mySsid ) );
29898 kfree (ai->flash);
29899 ai->flash = NULL;
29900 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
29901 __le32 *vals = stats.vals;
29902 int len;
29903
29904 + pax_track_stack();
29905 +
29906 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29907 return -ENOMEM;
29908 data = file->private_data;
29909 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
29910 /* If doLoseSync is not 1, we won't do a Lose Sync */
29911 int doLoseSync = -1;
29912
29913 + pax_track_stack();
29914 +
29915 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29916 return -ENOMEM;
29917 data = file->private_data;
29918 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
29919 int i;
29920 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
29921
29922 + pax_track_stack();
29923 +
29924 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
29925 if (!qual)
29926 return -ENOMEM;
29927 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
29928 CapabilityRid cap_rid;
29929 __le32 *vals = stats_rid.vals;
29930
29931 + pax_track_stack();
29932 +
29933 /* Get stats out of the card */
29934 clear_bit(JOB_WSTATS, &local->jobs);
29935 if (local->power.event) {
29936 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c
29937 --- linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
29938 +++ linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
29939 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
29940 unsigned int v;
29941 u64 tsf;
29942
29943 + pax_track_stack();
29944 +
29945 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
29946 len += snprintf(buf+len, sizeof(buf)-len,
29947 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
29948 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
29949 unsigned int len = 0;
29950 unsigned int i;
29951
29952 + pax_track_stack();
29953 +
29954 len += snprintf(buf+len, sizeof(buf)-len,
29955 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
29956
29957 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
29958 unsigned int i;
29959 unsigned int v;
29960
29961 + pax_track_stack();
29962 +
29963 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
29964 sc->ah->ah_ant_mode);
29965 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
29966 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
29967 unsigned int len = 0;
29968 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
29969
29970 + pax_track_stack();
29971 +
29972 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
29973 sc->bssidmask);
29974 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
29975 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
29976 unsigned int len = 0;
29977 int i;
29978
29979 + pax_track_stack();
29980 +
29981 len += snprintf(buf+len, sizeof(buf)-len,
29982 "RX\n---------------------\n");
29983 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
29984 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
29985 char buf[700];
29986 unsigned int len = 0;
29987
29988 + pax_track_stack();
29989 +
29990 len += snprintf(buf+len, sizeof(buf)-len,
29991 "HW has PHY error counters:\t%s\n",
29992 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
29993 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
29994 struct ath5k_buf *bf, *bf0;
29995 int i, n;
29996
29997 + pax_track_stack();
29998 +
29999 len += snprintf(buf+len, sizeof(buf)-len,
30000 "available txbuffers: %d\n", sc->txbuf_len);
30001
30002 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c
30003 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
30004 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
30005 @@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
30006 int i, im, j;
30007 int nmeasurement;
30008
30009 + pax_track_stack();
30010 +
30011 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30012 if (ah->txchainmask & (1 << i))
30013 num_chains++;
30014 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30015 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
30016 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
30017 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30018 int theta_low_bin = 0;
30019 int i;
30020
30021 + pax_track_stack();
30022 +
30023 /* disregard any bin that contains <= 16 samples */
30024 thresh_accum_cnt = 16;
30025 scale_factor = 5;
30026 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c
30027 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
30028 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
30029 @@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
30030 char buf[512];
30031 unsigned int len = 0;
30032
30033 + pax_track_stack();
30034 +
30035 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30036 len += snprintf(buf + len, sizeof(buf) - len,
30037 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30038 @@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30039 u8 addr[ETH_ALEN];
30040 u32 tmp;
30041
30042 + pax_track_stack();
30043 +
30044 len += snprintf(buf + len, sizeof(buf) - len,
30045 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30046 wiphy_name(sc->hw->wiphy),
30047 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30048 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
30049 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
30050 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30051 unsigned int len = 0;
30052 int ret = 0;
30053
30054 + pax_track_stack();
30055 +
30056 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30057
30058 ath9k_htc_ps_wakeup(priv);
30059 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30060 unsigned int len = 0;
30061 int ret = 0;
30062
30063 + pax_track_stack();
30064 +
30065 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30066
30067 ath9k_htc_ps_wakeup(priv);
30068 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30069 unsigned int len = 0;
30070 int ret = 0;
30071
30072 + pax_track_stack();
30073 +
30074 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30075
30076 ath9k_htc_ps_wakeup(priv);
30077 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30078 char buf[512];
30079 unsigned int len = 0;
30080
30081 + pax_track_stack();
30082 +
30083 len += snprintf(buf + len, sizeof(buf) - len,
30084 "%20s : %10u\n", "Buffers queued",
30085 priv->debug.tx_stats.buf_queued);
30086 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30087 char buf[512];
30088 unsigned int len = 0;
30089
30090 + pax_track_stack();
30091 +
30092 spin_lock_bh(&priv->tx.tx_lock);
30093
30094 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30095 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30096 char buf[512];
30097 unsigned int len = 0;
30098
30099 + pax_track_stack();
30100 +
30101 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30102 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30103
30104 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h
30105 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:44:40.000000000 -0400
30106 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
30107 @@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30108
30109 /* ANI */
30110 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30111 -};
30112 +} __no_const;
30113
30114 /**
30115 * struct ath_hw_ops - callbacks used by hardware code and driver code
30116 @@ -637,7 +637,7 @@ struct ath_hw_ops {
30117 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30118 struct ath_hw_antcomb_conf *antconf);
30119
30120 -};
30121 +} __no_const;
30122
30123 struct ath_nf_limits {
30124 s16 max;
30125 @@ -650,7 +650,7 @@ struct ath_nf_limits {
30126 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
30127
30128 struct ath_hw {
30129 - struct ath_ops reg_ops;
30130 + ath_ops_no_const reg_ops;
30131
30132 struct ieee80211_hw *hw;
30133 struct ath_common common;
30134 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath.h linux-3.0.3/drivers/net/wireless/ath/ath.h
30135 --- linux-3.0.3/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
30136 +++ linux-3.0.3/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
30137 @@ -121,6 +121,7 @@ struct ath_ops {
30138 void (*write_flush) (void *);
30139 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30140 };
30141 +typedef struct ath_ops __no_const ath_ops_no_const;
30142
30143 struct ath_common;
30144 struct ath_bus_ops;
30145 diff -urNp linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c
30146 --- linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
30147 +++ linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
30148 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30149 int err;
30150 DECLARE_SSID_BUF(ssid);
30151
30152 + pax_track_stack();
30153 +
30154 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30155
30156 if (ssid_len)
30157 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30158 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30159 int err;
30160
30161 + pax_track_stack();
30162 +
30163 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30164 idx, keylen, len);
30165
30166 diff -urNp linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c
30167 --- linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
30168 +++ linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
30169 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30170 unsigned long flags;
30171 DECLARE_SSID_BUF(ssid);
30172
30173 + pax_track_stack();
30174 +
30175 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30176 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30177 print_ssid(ssid, info_element->data, info_element->len),
30178 diff -urNp linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c
30179 --- linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
30180 +++ linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
30181 @@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30182 */
30183 if (iwl3945_mod_params.disable_hw_scan) {
30184 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30185 - iwl3945_hw_ops.hw_scan = NULL;
30186 + pax_open_kernel();
30187 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30188 + pax_close_kernel();
30189 }
30190
30191 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30192 diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30193 --- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
30194 +++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
30195 @@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30196 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30197 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30198
30199 + pax_track_stack();
30200 +
30201 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30202
30203 /* Treat uninitialized rate scaling data same as non-existing. */
30204 @@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30205 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30206 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30207
30208 + pax_track_stack();
30209 +
30210 /* Override starting rate (index 0) if needed for debug purposes */
30211 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30212
30213 diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30214 --- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
30215 +++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
30216 @@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30217 int pos = 0;
30218 const size_t bufsz = sizeof(buf);
30219
30220 + pax_track_stack();
30221 +
30222 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30223 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30224 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30225 @@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30226 char buf[256 * NUM_IWL_RXON_CTX];
30227 const size_t bufsz = sizeof(buf);
30228
30229 + pax_track_stack();
30230 +
30231 for_each_context(priv, ctx) {
30232 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30233 ctx->ctxid);
30234 diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h
30235 --- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30236 +++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30237 @@ -68,8 +68,8 @@ do {
30238 } while (0)
30239
30240 #else
30241 -#define IWL_DEBUG(__priv, level, fmt, args...)
30242 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30243 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30244 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30245 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30246 const void *p, u32 len)
30247 {}
30248 diff -urNp linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c
30249 --- linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30250 +++ linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30251 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30252 int buf_len = 512;
30253 size_t len = 0;
30254
30255 + pax_track_stack();
30256 +
30257 if (*ppos != 0)
30258 return 0;
30259 if (count < sizeof(buf))
30260 diff -urNp linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c
30261 --- linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30262 +++ linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30263 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30264 return -EINVAL;
30265
30266 if (fake_hw_scan) {
30267 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30268 - mac80211_hwsim_ops.sw_scan_start = NULL;
30269 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30270 + pax_open_kernel();
30271 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30272 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30273 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30274 + pax_close_kernel();
30275 }
30276
30277 spin_lock_init(&hwsim_radio_lock);
30278 diff -urNp linux-3.0.3/drivers/net/wireless/rndis_wlan.c linux-3.0.3/drivers/net/wireless/rndis_wlan.c
30279 --- linux-3.0.3/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30280 +++ linux-3.0.3/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30281 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30282
30283 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30284
30285 - if (rts_threshold < 0 || rts_threshold > 2347)
30286 + if (rts_threshold > 2347)
30287 rts_threshold = 2347;
30288
30289 tmp = cpu_to_le32(rts_threshold);
30290 diff -urNp linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30291 --- linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30292 +++ linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30293 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30294 u8 rfpath;
30295 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30296
30297 + pax_track_stack();
30298 +
30299 precommoncmdcnt = 0;
30300 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30301 MAX_PRECMD_CNT,
30302 diff -urNp linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h
30303 --- linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30304 +++ linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30305 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
30306 void (*reset)(struct wl1251 *wl);
30307 void (*enable_irq)(struct wl1251 *wl);
30308 void (*disable_irq)(struct wl1251 *wl);
30309 -};
30310 +} __no_const;
30311
30312 struct wl1251 {
30313 struct ieee80211_hw *hw;
30314 diff -urNp linux-3.0.3/drivers/net/wireless/wl12xx/spi.c linux-3.0.3/drivers/net/wireless/wl12xx/spi.c
30315 --- linux-3.0.3/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30316 +++ linux-3.0.3/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30317 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30318 u32 chunk_len;
30319 int i;
30320
30321 + pax_track_stack();
30322 +
30323 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30324
30325 spi_message_init(&m);
30326 diff -urNp linux-3.0.3/drivers/oprofile/buffer_sync.c linux-3.0.3/drivers/oprofile/buffer_sync.c
30327 --- linux-3.0.3/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30328 +++ linux-3.0.3/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30329 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30330 if (cookie == NO_COOKIE)
30331 offset = pc;
30332 if (cookie == INVALID_COOKIE) {
30333 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30334 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30335 offset = pc;
30336 }
30337 if (cookie != last_cookie) {
30338 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30339 /* add userspace sample */
30340
30341 if (!mm) {
30342 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30343 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30344 return 0;
30345 }
30346
30347 cookie = lookup_dcookie(mm, s->eip, &offset);
30348
30349 if (cookie == INVALID_COOKIE) {
30350 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30351 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30352 return 0;
30353 }
30354
30355 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30356 /* ignore backtraces if failed to add a sample */
30357 if (state == sb_bt_start) {
30358 state = sb_bt_ignore;
30359 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30360 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30361 }
30362 }
30363 release_mm(mm);
30364 diff -urNp linux-3.0.3/drivers/oprofile/event_buffer.c linux-3.0.3/drivers/oprofile/event_buffer.c
30365 --- linux-3.0.3/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30366 +++ linux-3.0.3/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30367 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30368 }
30369
30370 if (buffer_pos == buffer_size) {
30371 - atomic_inc(&oprofile_stats.event_lost_overflow);
30372 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30373 return;
30374 }
30375
30376 diff -urNp linux-3.0.3/drivers/oprofile/oprof.c linux-3.0.3/drivers/oprofile/oprof.c
30377 --- linux-3.0.3/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30378 +++ linux-3.0.3/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30379 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30380 if (oprofile_ops.switch_events())
30381 return;
30382
30383 - atomic_inc(&oprofile_stats.multiplex_counter);
30384 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30385 start_switch_worker();
30386 }
30387
30388 diff -urNp linux-3.0.3/drivers/oprofile/oprofilefs.c linux-3.0.3/drivers/oprofile/oprofilefs.c
30389 --- linux-3.0.3/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30390 +++ linux-3.0.3/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30391 @@ -186,7 +186,7 @@ static const struct file_operations atom
30392
30393
30394 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30395 - char const *name, atomic_t *val)
30396 + char const *name, atomic_unchecked_t *val)
30397 {
30398 return __oprofilefs_create_file(sb, root, name,
30399 &atomic_ro_fops, 0444, val);
30400 diff -urNp linux-3.0.3/drivers/oprofile/oprofile_stats.c linux-3.0.3/drivers/oprofile/oprofile_stats.c
30401 --- linux-3.0.3/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30402 +++ linux-3.0.3/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30403 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30404 cpu_buf->sample_invalid_eip = 0;
30405 }
30406
30407 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30408 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30409 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30410 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30411 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30412 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30413 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30414 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30415 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30416 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30417 }
30418
30419
30420 diff -urNp linux-3.0.3/drivers/oprofile/oprofile_stats.h linux-3.0.3/drivers/oprofile/oprofile_stats.h
30421 --- linux-3.0.3/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30422 +++ linux-3.0.3/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30423 @@ -13,11 +13,11 @@
30424 #include <asm/atomic.h>
30425
30426 struct oprofile_stat_struct {
30427 - atomic_t sample_lost_no_mm;
30428 - atomic_t sample_lost_no_mapping;
30429 - atomic_t bt_lost_no_mapping;
30430 - atomic_t event_lost_overflow;
30431 - atomic_t multiplex_counter;
30432 + atomic_unchecked_t sample_lost_no_mm;
30433 + atomic_unchecked_t sample_lost_no_mapping;
30434 + atomic_unchecked_t bt_lost_no_mapping;
30435 + atomic_unchecked_t event_lost_overflow;
30436 + atomic_unchecked_t multiplex_counter;
30437 };
30438
30439 extern struct oprofile_stat_struct oprofile_stats;
30440 diff -urNp linux-3.0.3/drivers/parport/procfs.c linux-3.0.3/drivers/parport/procfs.c
30441 --- linux-3.0.3/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30442 +++ linux-3.0.3/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30443 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30444
30445 *ppos += len;
30446
30447 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30448 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30449 }
30450
30451 #ifdef CONFIG_PARPORT_1284
30452 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30453
30454 *ppos += len;
30455
30456 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30457 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30458 }
30459 #endif /* IEEE1284.3 support. */
30460
30461 diff -urNp linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h
30462 --- linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30463 +++ linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30464 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30465 int (*hardware_test) (struct slot* slot, u32 value);
30466 u8 (*get_power) (struct slot* slot);
30467 int (*set_power) (struct slot* slot, int value);
30468 -};
30469 +} __no_const;
30470
30471 struct cpci_hp_controller {
30472 unsigned int irq;
30473 diff -urNp linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c
30474 --- linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
30475 +++ linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
30476 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30477
30478 void compaq_nvram_init (void __iomem *rom_start)
30479 {
30480 +
30481 +#ifndef CONFIG_PAX_KERNEXEC
30482 if (rom_start) {
30483 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30484 }
30485 +#endif
30486 +
30487 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30488
30489 /* initialize our int15 lock */
30490 diff -urNp linux-3.0.3/drivers/pci/pcie/aspm.c linux-3.0.3/drivers/pci/pcie/aspm.c
30491 --- linux-3.0.3/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
30492 +++ linux-3.0.3/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
30493 @@ -27,9 +27,9 @@
30494 #define MODULE_PARAM_PREFIX "pcie_aspm."
30495
30496 /* Note: those are not register definitions */
30497 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30498 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30499 -#define ASPM_STATE_L1 (4) /* L1 state */
30500 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30501 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30502 +#define ASPM_STATE_L1 (4U) /* L1 state */
30503 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30504 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30505
30506 diff -urNp linux-3.0.3/drivers/pci/probe.c linux-3.0.3/drivers/pci/probe.c
30507 --- linux-3.0.3/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
30508 +++ linux-3.0.3/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
30509 @@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
30510 u32 l, sz, mask;
30511 u16 orig_cmd;
30512
30513 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30514 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30515
30516 if (!dev->mmio_always_on) {
30517 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30518 diff -urNp linux-3.0.3/drivers/pci/proc.c linux-3.0.3/drivers/pci/proc.c
30519 --- linux-3.0.3/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
30520 +++ linux-3.0.3/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
30521 @@ -476,7 +476,16 @@ static const struct file_operations proc
30522 static int __init pci_proc_init(void)
30523 {
30524 struct pci_dev *dev = NULL;
30525 +
30526 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30527 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30528 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30529 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30530 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30531 +#endif
30532 +#else
30533 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30534 +#endif
30535 proc_create("devices", 0, proc_bus_pci_dir,
30536 &proc_bus_pci_dev_operations);
30537 proc_initialized = 1;
30538 diff -urNp linux-3.0.3/drivers/pci/xen-pcifront.c linux-3.0.3/drivers/pci/xen-pcifront.c
30539 --- linux-3.0.3/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
30540 +++ linux-3.0.3/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
30541 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30542 struct pcifront_sd *sd = bus->sysdata;
30543 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30544
30545 + pax_track_stack();
30546 +
30547 if (verbose_request)
30548 dev_info(&pdev->xdev->dev,
30549 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30550 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30551 struct pcifront_sd *sd = bus->sysdata;
30552 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30553
30554 + pax_track_stack();
30555 +
30556 if (verbose_request)
30557 dev_info(&pdev->xdev->dev,
30558 "write dev=%04x:%02x:%02x.%01x - "
30559 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30560 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30561 struct msi_desc *entry;
30562
30563 + pax_track_stack();
30564 +
30565 if (nvec > SH_INFO_MAX_VEC) {
30566 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30567 " Increase SH_INFO_MAX_VEC.\n", nvec);
30568 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30569 struct pcifront_sd *sd = dev->bus->sysdata;
30570 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30571
30572 + pax_track_stack();
30573 +
30574 err = do_pci_op(pdev, &op);
30575
30576 /* What should do for error ? */
30577 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30578 struct pcifront_sd *sd = dev->bus->sysdata;
30579 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30580
30581 + pax_track_stack();
30582 +
30583 err = do_pci_op(pdev, &op);
30584 if (likely(!err)) {
30585 vector[0] = op.value;
30586 diff -urNp linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c
30587 --- linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
30588 +++ linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
30589 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
30590 return 0;
30591 }
30592
30593 -void static hotkey_mask_warn_incomplete_mask(void)
30594 +static void hotkey_mask_warn_incomplete_mask(void)
30595 {
30596 /* log only what the user can fix... */
30597 const u32 wantedmask = hotkey_driver_mask &
30598 diff -urNp linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c
30599 --- linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
30600 +++ linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
30601 @@ -59,7 +59,7 @@ do { \
30602 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30603 } while(0)
30604
30605 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30606 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30607 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30608
30609 /*
30610 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30611
30612 cpu = get_cpu();
30613 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30614 +
30615 + pax_open_kernel();
30616 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30617 + pax_close_kernel();
30618
30619 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30620 spin_lock_irqsave(&pnp_bios_lock, flags);
30621 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30622 :"memory");
30623 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30624
30625 + pax_open_kernel();
30626 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30627 + pax_close_kernel();
30628 +
30629 put_cpu();
30630
30631 /* If we get here and this is set then the PnP BIOS faulted on us. */
30632 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30633 return status;
30634 }
30635
30636 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30637 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30638 {
30639 int i;
30640
30641 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30642 pnp_bios_callpoint.offset = header->fields.pm16offset;
30643 pnp_bios_callpoint.segment = PNP_CS16;
30644
30645 + pax_open_kernel();
30646 +
30647 for_each_possible_cpu(i) {
30648 struct desc_struct *gdt = get_cpu_gdt_table(i);
30649 if (!gdt)
30650 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30651 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30652 (unsigned long)__va(header->fields.pm16dseg));
30653 }
30654 +
30655 + pax_close_kernel();
30656 }
30657 diff -urNp linux-3.0.3/drivers/pnp/resource.c linux-3.0.3/drivers/pnp/resource.c
30658 --- linux-3.0.3/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
30659 +++ linux-3.0.3/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
30660 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30661 return 1;
30662
30663 /* check if the resource is valid */
30664 - if (*irq < 0 || *irq > 15)
30665 + if (*irq > 15)
30666 return 0;
30667
30668 /* check if the resource is reserved */
30669 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30670 return 1;
30671
30672 /* check if the resource is valid */
30673 - if (*dma < 0 || *dma == 4 || *dma > 7)
30674 + if (*dma == 4 || *dma > 7)
30675 return 0;
30676
30677 /* check if the resource is reserved */
30678 diff -urNp linux-3.0.3/drivers/power/bq27x00_battery.c linux-3.0.3/drivers/power/bq27x00_battery.c
30679 --- linux-3.0.3/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
30680 +++ linux-3.0.3/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
30681 @@ -67,7 +67,7 @@
30682 struct bq27x00_device_info;
30683 struct bq27x00_access_methods {
30684 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30685 -};
30686 +} __no_const;
30687
30688 enum bq27x00_chip { BQ27000, BQ27500 };
30689
30690 diff -urNp linux-3.0.3/drivers/regulator/max8660.c linux-3.0.3/drivers/regulator/max8660.c
30691 --- linux-3.0.3/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
30692 +++ linux-3.0.3/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
30693 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30694 max8660->shadow_regs[MAX8660_OVER1] = 5;
30695 } else {
30696 /* Otherwise devices can be toggled via software */
30697 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
30698 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
30699 + pax_open_kernel();
30700 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30701 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30702 + pax_close_kernel();
30703 }
30704
30705 /*
30706 diff -urNp linux-3.0.3/drivers/regulator/mc13892-regulator.c linux-3.0.3/drivers/regulator/mc13892-regulator.c
30707 --- linux-3.0.3/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
30708 +++ linux-3.0.3/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
30709 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
30710 }
30711 mc13xxx_unlock(mc13892);
30712
30713 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30714 + pax_open_kernel();
30715 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30716 = mc13892_vcam_set_mode;
30717 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30718 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30719 = mc13892_vcam_get_mode;
30720 + pax_close_kernel();
30721 for (i = 0; i < pdata->num_regulators; i++) {
30722 init_data = &pdata->regulators[i];
30723 priv->regulators[i] = regulator_register(
30724 diff -urNp linux-3.0.3/drivers/rtc/rtc-dev.c linux-3.0.3/drivers/rtc/rtc-dev.c
30725 --- linux-3.0.3/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
30726 +++ linux-3.0.3/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
30727 @@ -14,6 +14,7 @@
30728 #include <linux/module.h>
30729 #include <linux/rtc.h>
30730 #include <linux/sched.h>
30731 +#include <linux/grsecurity.h>
30732 #include "rtc-core.h"
30733
30734 static dev_t rtc_devt;
30735 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30736 if (copy_from_user(&tm, uarg, sizeof(tm)))
30737 return -EFAULT;
30738
30739 + gr_log_timechange();
30740 +
30741 return rtc_set_time(rtc, &tm);
30742
30743 case RTC_PIE_ON:
30744 diff -urNp linux-3.0.3/drivers/scsi/aacraid/aacraid.h linux-3.0.3/drivers/scsi/aacraid/aacraid.h
30745 --- linux-3.0.3/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
30746 +++ linux-3.0.3/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
30747 @@ -492,7 +492,7 @@ struct adapter_ops
30748 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30749 /* Administrative operations */
30750 int (*adapter_comm)(struct aac_dev * dev, int comm);
30751 -};
30752 +} __no_const;
30753
30754 /*
30755 * Define which interrupt handler needs to be installed
30756 diff -urNp linux-3.0.3/drivers/scsi/aacraid/commctrl.c linux-3.0.3/drivers/scsi/aacraid/commctrl.c
30757 --- linux-3.0.3/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
30758 +++ linux-3.0.3/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
30759 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30760 u32 actual_fibsize64, actual_fibsize = 0;
30761 int i;
30762
30763 + pax_track_stack();
30764
30765 if (dev->in_reset) {
30766 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30767 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfad.c linux-3.0.3/drivers/scsi/bfa/bfad.c
30768 --- linux-3.0.3/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
30769 +++ linux-3.0.3/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
30770 @@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30771 struct bfad_vport_s *vport, *vport_new;
30772 struct bfa_fcs_driver_info_s driver_info;
30773
30774 + pax_track_stack();
30775 +
30776 /* Fill the driver_info info to fcs*/
30777 memset(&driver_info, 0, sizeof(driver_info));
30778 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30779 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c
30780 --- linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
30781 +++ linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
30782 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30783 u16 len, count;
30784 u16 templen;
30785
30786 + pax_track_stack();
30787 +
30788 /*
30789 * get hba attributes
30790 */
30791 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30792 u8 count = 0;
30793 u16 templen;
30794
30795 + pax_track_stack();
30796 +
30797 /*
30798 * get port attributes
30799 */
30800 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c
30801 --- linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
30802 +++ linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
30803 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30804 struct fc_rpsc_speed_info_s speeds;
30805 struct bfa_port_attr_s pport_attr;
30806
30807 + pax_track_stack();
30808 +
30809 bfa_trc(port->fcs, rx_fchs->s_id);
30810 bfa_trc(port->fcs, rx_fchs->d_id);
30811
30812 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa.h linux-3.0.3/drivers/scsi/bfa/bfa.h
30813 --- linux-3.0.3/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
30814 +++ linux-3.0.3/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
30815 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30816 u32 *nvecs, u32 *maxvec);
30817 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30818 u32 *end);
30819 -};
30820 +} __no_const;
30821 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30822
30823 struct bfa_iocfc_s {
30824 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h
30825 --- linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
30826 +++ linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
30827 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30828 bfa_ioc_disable_cbfn_t disable_cbfn;
30829 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30830 bfa_ioc_reset_cbfn_t reset_cbfn;
30831 -};
30832 +} __no_const;
30833
30834 /*
30835 * Heartbeat failure notification queue element.
30836 @@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
30837 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30838 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30839 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30840 -};
30841 +} __no_const;
30842
30843 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30844 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30845 diff -urNp linux-3.0.3/drivers/scsi/BusLogic.c linux-3.0.3/drivers/scsi/BusLogic.c
30846 --- linux-3.0.3/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
30847 +++ linux-3.0.3/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
30848 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30849 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30850 *PrototypeHostAdapter)
30851 {
30852 + pax_track_stack();
30853 +
30854 /*
30855 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30856 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30857 diff -urNp linux-3.0.3/drivers/scsi/dpt_i2o.c linux-3.0.3/drivers/scsi/dpt_i2o.c
30858 --- linux-3.0.3/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
30859 +++ linux-3.0.3/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
30860 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30861 dma_addr_t addr;
30862 ulong flags = 0;
30863
30864 + pax_track_stack();
30865 +
30866 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30867 // get user msg size in u32s
30868 if(get_user(size, &user_msg[0])){
30869 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
30870 s32 rcode;
30871 dma_addr_t addr;
30872
30873 + pax_track_stack();
30874 +
30875 memset(msg, 0 , sizeof(msg));
30876 len = scsi_bufflen(cmd);
30877 direction = 0x00000000;
30878 diff -urNp linux-3.0.3/drivers/scsi/eata.c linux-3.0.3/drivers/scsi/eata.c
30879 --- linux-3.0.3/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
30880 +++ linux-3.0.3/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
30881 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
30882 struct hostdata *ha;
30883 char name[16];
30884
30885 + pax_track_stack();
30886 +
30887 sprintf(name, "%s%d", driver_name, j);
30888
30889 if (!request_region(port_base, REGION_SIZE, driver_name)) {
30890 diff -urNp linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c
30891 --- linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
30892 +++ linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
30893 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
30894 } buf;
30895 int rc;
30896
30897 + pax_track_stack();
30898 +
30899 fiph = (struct fip_header *)skb->data;
30900 sub = fiph->fip_subcode;
30901
30902 diff -urNp linux-3.0.3/drivers/scsi/gdth.c linux-3.0.3/drivers/scsi/gdth.c
30903 --- linux-3.0.3/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
30904 +++ linux-3.0.3/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
30905 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
30906 unsigned long flags;
30907 gdth_ha_str *ha;
30908
30909 + pax_track_stack();
30910 +
30911 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
30912 return -EFAULT;
30913 ha = gdth_find_ha(ldrv.ionode);
30914 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
30915 gdth_ha_str *ha;
30916 int rval;
30917
30918 + pax_track_stack();
30919 +
30920 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
30921 res.number >= MAX_HDRIVES)
30922 return -EFAULT;
30923 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
30924 gdth_ha_str *ha;
30925 int rval;
30926
30927 + pax_track_stack();
30928 +
30929 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
30930 return -EFAULT;
30931 ha = gdth_find_ha(gen.ionode);
30932 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
30933 int i;
30934 gdth_cmd_str gdtcmd;
30935 char cmnd[MAX_COMMAND_SIZE];
30936 +
30937 + pax_track_stack();
30938 +
30939 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
30940
30941 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
30942 diff -urNp linux-3.0.3/drivers/scsi/gdth_proc.c linux-3.0.3/drivers/scsi/gdth_proc.c
30943 --- linux-3.0.3/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
30944 +++ linux-3.0.3/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
30945 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
30946 u64 paddr;
30947
30948 char cmnd[MAX_COMMAND_SIZE];
30949 +
30950 + pax_track_stack();
30951 +
30952 memset(cmnd, 0xff, 12);
30953 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
30954
30955 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
30956 gdth_hget_str *phg;
30957 char cmnd[MAX_COMMAND_SIZE];
30958
30959 + pax_track_stack();
30960 +
30961 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
30962 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
30963 if (!gdtcmd || !estr)
30964 diff -urNp linux-3.0.3/drivers/scsi/hosts.c linux-3.0.3/drivers/scsi/hosts.c
30965 --- linux-3.0.3/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
30966 +++ linux-3.0.3/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
30967 @@ -42,7 +42,7 @@
30968 #include "scsi_logging.h"
30969
30970
30971 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
30972 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
30973
30974
30975 static void scsi_host_cls_release(struct device *dev)
30976 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
30977 * subtract one because we increment first then return, but we need to
30978 * know what the next host number was before increment
30979 */
30980 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
30981 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
30982 shost->dma_channel = 0xff;
30983
30984 /* These three are default values which can be overridden */
30985 diff -urNp linux-3.0.3/drivers/scsi/hpsa.c linux-3.0.3/drivers/scsi/hpsa.c
30986 --- linux-3.0.3/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
30987 +++ linux-3.0.3/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
30988 @@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
30989 u32 a;
30990
30991 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30992 - return h->access.command_completed(h);
30993 + return h->access->command_completed(h);
30994
30995 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30996 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30997 @@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
30998 while (!list_empty(&h->reqQ)) {
30999 c = list_entry(h->reqQ.next, struct CommandList, list);
31000 /* can't do anything if fifo is full */
31001 - if ((h->access.fifo_full(h))) {
31002 + if ((h->access->fifo_full(h))) {
31003 dev_warn(&h->pdev->dev, "fifo full\n");
31004 break;
31005 }
31006 @@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
31007 h->Qdepth--;
31008
31009 /* Tell the controller execute command */
31010 - h->access.submit_command(h, c);
31011 + h->access->submit_command(h, c);
31012
31013 /* Put job onto the completed Q */
31014 addQ(&h->cmpQ, c);
31015 @@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
31016
31017 static inline unsigned long get_next_completion(struct ctlr_info *h)
31018 {
31019 - return h->access.command_completed(h);
31020 + return h->access->command_completed(h);
31021 }
31022
31023 static inline bool interrupt_pending(struct ctlr_info *h)
31024 {
31025 - return h->access.intr_pending(h);
31026 + return h->access->intr_pending(h);
31027 }
31028
31029 static inline long interrupt_not_for_us(struct ctlr_info *h)
31030 {
31031 - return (h->access.intr_pending(h) == 0) ||
31032 + return (h->access->intr_pending(h) == 0) ||
31033 (h->interrupts_enabled == 0);
31034 }
31035
31036 @@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31037 if (prod_index < 0)
31038 return -ENODEV;
31039 h->product_name = products[prod_index].product_name;
31040 - h->access = *(products[prod_index].access);
31041 + h->access = products[prod_index].access;
31042
31043 if (hpsa_board_disabled(h->pdev)) {
31044 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31045 @@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31046 }
31047
31048 /* make sure the board interrupts are off */
31049 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31050 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31051
31052 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31053 goto clean2;
31054 @@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31055 * fake ones to scoop up any residual completions.
31056 */
31057 spin_lock_irqsave(&h->lock, flags);
31058 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31059 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31060 spin_unlock_irqrestore(&h->lock, flags);
31061 free_irq(h->intr[h->intr_mode], h);
31062 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31063 @@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31064 dev_info(&h->pdev->dev, "Board READY.\n");
31065 dev_info(&h->pdev->dev,
31066 "Waiting for stale completions to drain.\n");
31067 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31068 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31069 msleep(10000);
31070 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31071 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31072
31073 rc = controller_reset_failed(h->cfgtable);
31074 if (rc)
31075 @@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31076 }
31077
31078 /* Turn the interrupts on so we can service requests */
31079 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31080 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31081
31082 hpsa_hba_inquiry(h);
31083 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
31084 @@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31085 * To write all data in the battery backed cache to disks
31086 */
31087 hpsa_flush_cache(h);
31088 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31089 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31090 free_irq(h->intr[h->intr_mode], h);
31091 #ifdef CONFIG_PCI_MSI
31092 if (h->msix_vector)
31093 @@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31094 return;
31095 }
31096 /* Change the access methods to the performant access methods */
31097 - h->access = SA5_performant_access;
31098 + h->access = &SA5_performant_access;
31099 h->transMethod = CFGTBL_Trans_Performant;
31100 }
31101
31102 diff -urNp linux-3.0.3/drivers/scsi/hpsa.h linux-3.0.3/drivers/scsi/hpsa.h
31103 --- linux-3.0.3/drivers/scsi/hpsa.h 2011-08-23 21:44:40.000000000 -0400
31104 +++ linux-3.0.3/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
31105 @@ -73,7 +73,7 @@ struct ctlr_info {
31106 unsigned int msix_vector;
31107 unsigned int msi_vector;
31108 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31109 - struct access_method access;
31110 + struct access_method *access;
31111
31112 /* queue and queue Info */
31113 struct list_head reqQ;
31114 diff -urNp linux-3.0.3/drivers/scsi/ips.h linux-3.0.3/drivers/scsi/ips.h
31115 --- linux-3.0.3/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
31116 +++ linux-3.0.3/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
31117 @@ -1027,7 +1027,7 @@ typedef struct {
31118 int (*intr)(struct ips_ha *);
31119 void (*enableint)(struct ips_ha *);
31120 uint32_t (*statupd)(struct ips_ha *);
31121 -} ips_hw_func_t;
31122 +} __no_const ips_hw_func_t;
31123
31124 typedef struct ips_ha {
31125 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31126 diff -urNp linux-3.0.3/drivers/scsi/libfc/fc_exch.c linux-3.0.3/drivers/scsi/libfc/fc_exch.c
31127 --- linux-3.0.3/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
31128 +++ linux-3.0.3/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
31129 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31130 * all together if not used XXX
31131 */
31132 struct {
31133 - atomic_t no_free_exch;
31134 - atomic_t no_free_exch_xid;
31135 - atomic_t xid_not_found;
31136 - atomic_t xid_busy;
31137 - atomic_t seq_not_found;
31138 - atomic_t non_bls_resp;
31139 + atomic_unchecked_t no_free_exch;
31140 + atomic_unchecked_t no_free_exch_xid;
31141 + atomic_unchecked_t xid_not_found;
31142 + atomic_unchecked_t xid_busy;
31143 + atomic_unchecked_t seq_not_found;
31144 + atomic_unchecked_t non_bls_resp;
31145 } stats;
31146 };
31147
31148 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31149 /* allocate memory for exchange */
31150 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31151 if (!ep) {
31152 - atomic_inc(&mp->stats.no_free_exch);
31153 + atomic_inc_unchecked(&mp->stats.no_free_exch);
31154 goto out;
31155 }
31156 memset(ep, 0, sizeof(*ep));
31157 @@ -761,7 +761,7 @@ out:
31158 return ep;
31159 err:
31160 spin_unlock_bh(&pool->lock);
31161 - atomic_inc(&mp->stats.no_free_exch_xid);
31162 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31163 mempool_free(ep, mp->ep_pool);
31164 return NULL;
31165 }
31166 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31167 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31168 ep = fc_exch_find(mp, xid);
31169 if (!ep) {
31170 - atomic_inc(&mp->stats.xid_not_found);
31171 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31172 reject = FC_RJT_OX_ID;
31173 goto out;
31174 }
31175 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31176 ep = fc_exch_find(mp, xid);
31177 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31178 if (ep) {
31179 - atomic_inc(&mp->stats.xid_busy);
31180 + atomic_inc_unchecked(&mp->stats.xid_busy);
31181 reject = FC_RJT_RX_ID;
31182 goto rel;
31183 }
31184 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31185 }
31186 xid = ep->xid; /* get our XID */
31187 } else if (!ep) {
31188 - atomic_inc(&mp->stats.xid_not_found);
31189 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31190 reject = FC_RJT_RX_ID; /* XID not found */
31191 goto out;
31192 }
31193 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31194 } else {
31195 sp = &ep->seq;
31196 if (sp->id != fh->fh_seq_id) {
31197 - atomic_inc(&mp->stats.seq_not_found);
31198 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31199 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31200 goto rel;
31201 }
31202 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31203
31204 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31205 if (!ep) {
31206 - atomic_inc(&mp->stats.xid_not_found);
31207 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31208 goto out;
31209 }
31210 if (ep->esb_stat & ESB_ST_COMPLETE) {
31211 - atomic_inc(&mp->stats.xid_not_found);
31212 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31213 goto rel;
31214 }
31215 if (ep->rxid == FC_XID_UNKNOWN)
31216 ep->rxid = ntohs(fh->fh_rx_id);
31217 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31218 - atomic_inc(&mp->stats.xid_not_found);
31219 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31220 goto rel;
31221 }
31222 if (ep->did != ntoh24(fh->fh_s_id) &&
31223 ep->did != FC_FID_FLOGI) {
31224 - atomic_inc(&mp->stats.xid_not_found);
31225 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31226 goto rel;
31227 }
31228 sof = fr_sof(fp);
31229 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31230 sp->ssb_stat |= SSB_ST_RESP;
31231 sp->id = fh->fh_seq_id;
31232 } else if (sp->id != fh->fh_seq_id) {
31233 - atomic_inc(&mp->stats.seq_not_found);
31234 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31235 goto rel;
31236 }
31237
31238 @@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31239 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31240
31241 if (!sp)
31242 - atomic_inc(&mp->stats.xid_not_found);
31243 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31244 else
31245 - atomic_inc(&mp->stats.non_bls_resp);
31246 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31247
31248 fc_frame_free(fp);
31249 }
31250 diff -urNp linux-3.0.3/drivers/scsi/libsas/sas_ata.c linux-3.0.3/drivers/scsi/libsas/sas_ata.c
31251 --- linux-3.0.3/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31252 +++ linux-3.0.3/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31253 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31254 .postreset = ata_std_postreset,
31255 .error_handler = ata_std_error_handler,
31256 .post_internal_cmd = sas_ata_post_internal,
31257 - .qc_defer = ata_std_qc_defer,
31258 + .qc_defer = ata_std_qc_defer,
31259 .qc_prep = ata_noop_qc_prep,
31260 .qc_issue = sas_ata_qc_issue,
31261 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31262 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c
31263 --- linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31264 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31265 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31266
31267 #include <linux/debugfs.h>
31268
31269 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31270 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31271 static unsigned long lpfc_debugfs_start_time = 0L;
31272
31273 /* iDiag */
31274 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31275 lpfc_debugfs_enable = 0;
31276
31277 len = 0;
31278 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31279 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31280 (lpfc_debugfs_max_disc_trc - 1);
31281 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31282 dtp = vport->disc_trc + i;
31283 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31284 lpfc_debugfs_enable = 0;
31285
31286 len = 0;
31287 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31288 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31289 (lpfc_debugfs_max_slow_ring_trc - 1);
31290 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31291 dtp = phba->slow_ring_trc + i;
31292 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31293 uint32_t *ptr;
31294 char buffer[1024];
31295
31296 + pax_track_stack();
31297 +
31298 off = 0;
31299 spin_lock_irq(&phba->hbalock);
31300
31301 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31302 !vport || !vport->disc_trc)
31303 return;
31304
31305 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31306 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31307 (lpfc_debugfs_max_disc_trc - 1);
31308 dtp = vport->disc_trc + index;
31309 dtp->fmt = fmt;
31310 dtp->data1 = data1;
31311 dtp->data2 = data2;
31312 dtp->data3 = data3;
31313 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31314 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31315 dtp->jif = jiffies;
31316 #endif
31317 return;
31318 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31319 !phba || !phba->slow_ring_trc)
31320 return;
31321
31322 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31323 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31324 (lpfc_debugfs_max_slow_ring_trc - 1);
31325 dtp = phba->slow_ring_trc + index;
31326 dtp->fmt = fmt;
31327 dtp->data1 = data1;
31328 dtp->data2 = data2;
31329 dtp->data3 = data3;
31330 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31331 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31332 dtp->jif = jiffies;
31333 #endif
31334 return;
31335 @@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31336 "slow_ring buffer\n");
31337 goto debug_failed;
31338 }
31339 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31340 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31341 memset(phba->slow_ring_trc, 0,
31342 (sizeof(struct lpfc_debugfs_trc) *
31343 lpfc_debugfs_max_slow_ring_trc));
31344 @@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31345 "buffer\n");
31346 goto debug_failed;
31347 }
31348 - atomic_set(&vport->disc_trc_cnt, 0);
31349 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31350
31351 snprintf(name, sizeof(name), "discovery_trace");
31352 vport->debug_disc_trc =
31353 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc.h linux-3.0.3/drivers/scsi/lpfc/lpfc.h
31354 --- linux-3.0.3/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31355 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31356 @@ -420,7 +420,7 @@ struct lpfc_vport {
31357 struct dentry *debug_nodelist;
31358 struct dentry *vport_debugfs_root;
31359 struct lpfc_debugfs_trc *disc_trc;
31360 - atomic_t disc_trc_cnt;
31361 + atomic_unchecked_t disc_trc_cnt;
31362 #endif
31363 uint8_t stat_data_enabled;
31364 uint8_t stat_data_blocked;
31365 @@ -826,8 +826,8 @@ struct lpfc_hba {
31366 struct timer_list fabric_block_timer;
31367 unsigned long bit_flags;
31368 #define FABRIC_COMANDS_BLOCKED 0
31369 - atomic_t num_rsrc_err;
31370 - atomic_t num_cmd_success;
31371 + atomic_unchecked_t num_rsrc_err;
31372 + atomic_unchecked_t num_cmd_success;
31373 unsigned long last_rsrc_error_time;
31374 unsigned long last_ramp_down_time;
31375 unsigned long last_ramp_up_time;
31376 @@ -841,7 +841,7 @@ struct lpfc_hba {
31377 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31378 struct dentry *debug_slow_ring_trc;
31379 struct lpfc_debugfs_trc *slow_ring_trc;
31380 - atomic_t slow_ring_trc_cnt;
31381 + atomic_unchecked_t slow_ring_trc_cnt;
31382 /* iDiag debugfs sub-directory */
31383 struct dentry *idiag_root;
31384 struct dentry *idiag_pci_cfg;
31385 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c
31386 --- linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31387 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31388 @@ -9923,8 +9923,10 @@ lpfc_init(void)
31389 printk(LPFC_COPYRIGHT "\n");
31390
31391 if (lpfc_enable_npiv) {
31392 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31393 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31394 + pax_open_kernel();
31395 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31396 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31397 + pax_close_kernel();
31398 }
31399 lpfc_transport_template =
31400 fc_attach_transport(&lpfc_transport_functions);
31401 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c
31402 --- linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31403 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31404 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31405 uint32_t evt_posted;
31406
31407 spin_lock_irqsave(&phba->hbalock, flags);
31408 - atomic_inc(&phba->num_rsrc_err);
31409 + atomic_inc_unchecked(&phba->num_rsrc_err);
31410 phba->last_rsrc_error_time = jiffies;
31411
31412 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31413 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31414 unsigned long flags;
31415 struct lpfc_hba *phba = vport->phba;
31416 uint32_t evt_posted;
31417 - atomic_inc(&phba->num_cmd_success);
31418 + atomic_inc_unchecked(&phba->num_cmd_success);
31419
31420 if (vport->cfg_lun_queue_depth <= queue_depth)
31421 return;
31422 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31423 unsigned long num_rsrc_err, num_cmd_success;
31424 int i;
31425
31426 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31427 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31428 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31429 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31430
31431 vports = lpfc_create_vport_work_array(phba);
31432 if (vports != NULL)
31433 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31434 }
31435 }
31436 lpfc_destroy_vport_work_array(phba, vports);
31437 - atomic_set(&phba->num_rsrc_err, 0);
31438 - atomic_set(&phba->num_cmd_success, 0);
31439 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31440 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31441 }
31442
31443 /**
31444 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31445 }
31446 }
31447 lpfc_destroy_vport_work_array(phba, vports);
31448 - atomic_set(&phba->num_rsrc_err, 0);
31449 - atomic_set(&phba->num_cmd_success, 0);
31450 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31451 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31452 }
31453
31454 /**
31455 diff -urNp linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c
31456 --- linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31457 +++ linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31458 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31459 int rval;
31460 int i;
31461
31462 + pax_track_stack();
31463 +
31464 // Allocate memory for the base list of scb for management module.
31465 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31466
31467 diff -urNp linux-3.0.3/drivers/scsi/osd/osd_initiator.c linux-3.0.3/drivers/scsi/osd/osd_initiator.c
31468 --- linux-3.0.3/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31469 +++ linux-3.0.3/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31470 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31471 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31472 int ret;
31473
31474 + pax_track_stack();
31475 +
31476 or = osd_start_request(od, GFP_KERNEL);
31477 if (!or)
31478 return -ENOMEM;
31479 diff -urNp linux-3.0.3/drivers/scsi/pmcraid.c linux-3.0.3/drivers/scsi/pmcraid.c
31480 --- linux-3.0.3/drivers/scsi/pmcraid.c 2011-08-23 21:44:40.000000000 -0400
31481 +++ linux-3.0.3/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
31482 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31483 res->scsi_dev = scsi_dev;
31484 scsi_dev->hostdata = res;
31485 res->change_detected = 0;
31486 - atomic_set(&res->read_failures, 0);
31487 - atomic_set(&res->write_failures, 0);
31488 + atomic_set_unchecked(&res->read_failures, 0);
31489 + atomic_set_unchecked(&res->write_failures, 0);
31490 rc = 0;
31491 }
31492 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31493 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31494
31495 /* If this was a SCSI read/write command keep count of errors */
31496 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31497 - atomic_inc(&res->read_failures);
31498 + atomic_inc_unchecked(&res->read_failures);
31499 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31500 - atomic_inc(&res->write_failures);
31501 + atomic_inc_unchecked(&res->write_failures);
31502
31503 if (!RES_IS_GSCSI(res->cfg_entry) &&
31504 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31505 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31506 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31507 * hrrq_id assigned here in queuecommand
31508 */
31509 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31510 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31511 pinstance->num_hrrq;
31512 cmd->cmd_done = pmcraid_io_done;
31513
31514 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31515 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31516 * hrrq_id assigned here in queuecommand
31517 */
31518 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31519 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31520 pinstance->num_hrrq;
31521
31522 if (request_size) {
31523 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
31524
31525 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31526 /* add resources only after host is added into system */
31527 - if (!atomic_read(&pinstance->expose_resources))
31528 + if (!atomic_read_unchecked(&pinstance->expose_resources))
31529 return;
31530
31531 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31532 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
31533 init_waitqueue_head(&pinstance->reset_wait_q);
31534
31535 atomic_set(&pinstance->outstanding_cmds, 0);
31536 - atomic_set(&pinstance->last_message_id, 0);
31537 - atomic_set(&pinstance->expose_resources, 0);
31538 + atomic_set_unchecked(&pinstance->last_message_id, 0);
31539 + atomic_set_unchecked(&pinstance->expose_resources, 0);
31540
31541 INIT_LIST_HEAD(&pinstance->free_res_q);
31542 INIT_LIST_HEAD(&pinstance->used_res_q);
31543 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
31544 /* Schedule worker thread to handle CCN and take care of adding and
31545 * removing devices to OS
31546 */
31547 - atomic_set(&pinstance->expose_resources, 1);
31548 + atomic_set_unchecked(&pinstance->expose_resources, 1);
31549 schedule_work(&pinstance->worker_q);
31550 return rc;
31551
31552 diff -urNp linux-3.0.3/drivers/scsi/pmcraid.h linux-3.0.3/drivers/scsi/pmcraid.h
31553 --- linux-3.0.3/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
31554 +++ linux-3.0.3/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
31555 @@ -749,7 +749,7 @@ struct pmcraid_instance {
31556 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31557
31558 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31559 - atomic_t last_message_id;
31560 + atomic_unchecked_t last_message_id;
31561
31562 /* configuration table */
31563 struct pmcraid_config_table *cfg_table;
31564 @@ -778,7 +778,7 @@ struct pmcraid_instance {
31565 atomic_t outstanding_cmds;
31566
31567 /* should add/delete resources to mid-layer now ?*/
31568 - atomic_t expose_resources;
31569 + atomic_unchecked_t expose_resources;
31570
31571
31572
31573 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
31574 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31575 };
31576 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31577 - atomic_t read_failures; /* count of failed READ commands */
31578 - atomic_t write_failures; /* count of failed WRITE commands */
31579 + atomic_unchecked_t read_failures; /* count of failed READ commands */
31580 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31581
31582 /* To indicate add/delete/modify during CCN */
31583 u8 change_detected;
31584 diff -urNp linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h
31585 --- linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
31586 +++ linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
31587 @@ -2244,7 +2244,7 @@ struct isp_operations {
31588 int (*get_flash_version) (struct scsi_qla_host *, void *);
31589 int (*start_scsi) (srb_t *);
31590 int (*abort_isp) (struct scsi_qla_host *);
31591 -};
31592 +} __no_const;
31593
31594 /* MSI-X Support *************************************************************/
31595
31596 diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h
31597 --- linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
31598 +++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
31599 @@ -256,7 +256,7 @@ struct ddb_entry {
31600 atomic_t retry_relogin_timer; /* Min Time between relogins
31601 * (4000 only) */
31602 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31603 - atomic_t relogin_retry_count; /* Num of times relogin has been
31604 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31605 * retried */
31606
31607 uint16_t port;
31608 diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c
31609 --- linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
31610 +++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
31611 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31612 ddb_entry->fw_ddb_index = fw_ddb_index;
31613 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31614 atomic_set(&ddb_entry->relogin_timer, 0);
31615 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31616 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31617 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31618 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31619 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31620 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31621 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31622 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31623 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31624 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31625 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31626 atomic_set(&ddb_entry->relogin_timer, 0);
31627 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31628 iscsi_unblock_session(ddb_entry->sess);
31629 diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c
31630 --- linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
31631 +++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
31632 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
31633 ddb_entry->fw_ddb_device_state ==
31634 DDB_DS_SESSION_FAILED) {
31635 /* Reset retry relogin timer */
31636 - atomic_inc(&ddb_entry->relogin_retry_count);
31637 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31638 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31639 " timed out-retrying"
31640 " relogin (%d)\n",
31641 ha->host_no,
31642 ddb_entry->fw_ddb_index,
31643 - atomic_read(&ddb_entry->
31644 + atomic_read_unchecked(&ddb_entry->
31645 relogin_retry_count))
31646 );
31647 start_dpc++;
31648 diff -urNp linux-3.0.3/drivers/scsi/scsi.c linux-3.0.3/drivers/scsi/scsi.c
31649 --- linux-3.0.3/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
31650 +++ linux-3.0.3/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
31651 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31652 unsigned long timeout;
31653 int rtn = 0;
31654
31655 - atomic_inc(&cmd->device->iorequest_cnt);
31656 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31657
31658 /* check if the device is still usable */
31659 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31660 diff -urNp linux-3.0.3/drivers/scsi/scsi_debug.c linux-3.0.3/drivers/scsi/scsi_debug.c
31661 --- linux-3.0.3/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
31662 +++ linux-3.0.3/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
31663 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31664 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31665 unsigned char *cmd = (unsigned char *)scp->cmnd;
31666
31667 + pax_track_stack();
31668 +
31669 if ((errsts = check_readiness(scp, 1, devip)))
31670 return errsts;
31671 memset(arr, 0, sizeof(arr));
31672 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31673 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31674 unsigned char *cmd = (unsigned char *)scp->cmnd;
31675
31676 + pax_track_stack();
31677 +
31678 if ((errsts = check_readiness(scp, 1, devip)))
31679 return errsts;
31680 memset(arr, 0, sizeof(arr));
31681 diff -urNp linux-3.0.3/drivers/scsi/scsi_lib.c linux-3.0.3/drivers/scsi/scsi_lib.c
31682 --- linux-3.0.3/drivers/scsi/scsi_lib.c 2011-08-23 21:44:40.000000000 -0400
31683 +++ linux-3.0.3/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
31684 @@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
31685 shost = sdev->host;
31686 scsi_init_cmd_errh(cmd);
31687 cmd->result = DID_NO_CONNECT << 16;
31688 - atomic_inc(&cmd->device->iorequest_cnt);
31689 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31690
31691 /*
31692 * SCSI request completion path will do scsi_device_unbusy(),
31693 @@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
31694
31695 INIT_LIST_HEAD(&cmd->eh_entry);
31696
31697 - atomic_inc(&cmd->device->iodone_cnt);
31698 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
31699 if (cmd->result)
31700 - atomic_inc(&cmd->device->ioerr_cnt);
31701 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31702
31703 disposition = scsi_decide_disposition(cmd);
31704 if (disposition != SUCCESS &&
31705 diff -urNp linux-3.0.3/drivers/scsi/scsi_sysfs.c linux-3.0.3/drivers/scsi/scsi_sysfs.c
31706 --- linux-3.0.3/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
31707 +++ linux-3.0.3/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
31708 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31709 char *buf) \
31710 { \
31711 struct scsi_device *sdev = to_scsi_device(dev); \
31712 - unsigned long long count = atomic_read(&sdev->field); \
31713 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
31714 return snprintf(buf, 20, "0x%llx\n", count); \
31715 } \
31716 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31717 diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_fc.c linux-3.0.3/drivers/scsi/scsi_transport_fc.c
31718 --- linux-3.0.3/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
31719 +++ linux-3.0.3/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
31720 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31721 * Netlink Infrastructure
31722 */
31723
31724 -static atomic_t fc_event_seq;
31725 +static atomic_unchecked_t fc_event_seq;
31726
31727 /**
31728 * fc_get_event_number - Obtain the next sequential FC event number
31729 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
31730 u32
31731 fc_get_event_number(void)
31732 {
31733 - return atomic_add_return(1, &fc_event_seq);
31734 + return atomic_add_return_unchecked(1, &fc_event_seq);
31735 }
31736 EXPORT_SYMBOL(fc_get_event_number);
31737
31738 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void
31739 {
31740 int error;
31741
31742 - atomic_set(&fc_event_seq, 0);
31743 + atomic_set_unchecked(&fc_event_seq, 0);
31744
31745 error = transport_class_register(&fc_host_class);
31746 if (error)
31747 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
31748 char *cp;
31749
31750 *val = simple_strtoul(buf, &cp, 0);
31751 - if ((*cp && (*cp != '\n')) || (*val < 0))
31752 + if (*cp && (*cp != '\n'))
31753 return -EINVAL;
31754 /*
31755 * Check for overflow; dev_loss_tmo is u32
31756 diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c
31757 --- linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
31758 +++ linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
31759 @@ -83,7 +83,7 @@ struct iscsi_internal {
31760 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31761 };
31762
31763 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31764 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31765 static struct workqueue_struct *iscsi_eh_timer_workq;
31766
31767 /*
31768 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31769 int err;
31770
31771 ihost = shost->shost_data;
31772 - session->sid = atomic_add_return(1, &iscsi_session_nr);
31773 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31774
31775 if (id == ISCSI_MAX_TARGET) {
31776 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31777 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31778 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31779 ISCSI_TRANSPORT_VERSION);
31780
31781 - atomic_set(&iscsi_session_nr, 0);
31782 + atomic_set_unchecked(&iscsi_session_nr, 0);
31783
31784 err = class_register(&iscsi_transport_class);
31785 if (err)
31786 diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_srp.c linux-3.0.3/drivers/scsi/scsi_transport_srp.c
31787 --- linux-3.0.3/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
31788 +++ linux-3.0.3/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
31789 @@ -33,7 +33,7 @@
31790 #include "scsi_transport_srp_internal.h"
31791
31792 struct srp_host_attrs {
31793 - atomic_t next_port_id;
31794 + atomic_unchecked_t next_port_id;
31795 };
31796 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31797
31798 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31799 struct Scsi_Host *shost = dev_to_shost(dev);
31800 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31801
31802 - atomic_set(&srp_host->next_port_id, 0);
31803 + atomic_set_unchecked(&srp_host->next_port_id, 0);
31804 return 0;
31805 }
31806
31807 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31808 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31809 rport->roles = ids->roles;
31810
31811 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31812 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31813 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31814
31815 transport_setup_device(&rport->dev);
31816 diff -urNp linux-3.0.3/drivers/scsi/sg.c linux-3.0.3/drivers/scsi/sg.c
31817 --- linux-3.0.3/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
31818 +++ linux-3.0.3/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
31819 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31820 const struct file_operations * fops;
31821 };
31822
31823 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31824 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31825 {"allow_dio", &adio_fops},
31826 {"debug", &debug_fops},
31827 {"def_reserved_size", &dressz_fops},
31828 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31829 {
31830 int k, mask;
31831 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31832 - struct sg_proc_leaf * leaf;
31833 + const struct sg_proc_leaf * leaf;
31834
31835 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31836 if (!sg_proc_sgp)
31837 diff -urNp linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c
31838 --- linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
31839 +++ linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
31840 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31841 int do_iounmap = 0;
31842 int do_disable_device = 1;
31843
31844 + pax_track_stack();
31845 +
31846 memset(&sym_dev, 0, sizeof(sym_dev));
31847 memset(&nvram, 0, sizeof(nvram));
31848 sym_dev.pdev = pdev;
31849 diff -urNp linux-3.0.3/drivers/scsi/vmw_pvscsi.c linux-3.0.3/drivers/scsi/vmw_pvscsi.c
31850 --- linux-3.0.3/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
31851 +++ linux-3.0.3/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
31852 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31853 dma_addr_t base;
31854 unsigned i;
31855
31856 + pax_track_stack();
31857 +
31858 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31859 cmd.reqRingNumPages = adapter->req_pages;
31860 cmd.cmpRingNumPages = adapter->cmp_pages;
31861 diff -urNp linux-3.0.3/drivers/spi/spi.c linux-3.0.3/drivers/spi/spi.c
31862 --- linux-3.0.3/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
31863 +++ linux-3.0.3/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
31864 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31865 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31866
31867 /* portable code must never pass more than 32 bytes */
31868 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31869 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
31870
31871 static u8 *buf;
31872
31873 diff -urNp linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31874 --- linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:44:40.000000000 -0400
31875 +++ linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
31876 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
31877 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31878
31879
31880 -static struct net_device_ops ar6000_netdev_ops = {
31881 +static net_device_ops_no_const ar6000_netdev_ops = {
31882 .ndo_init = NULL,
31883 .ndo_open = ar6000_open,
31884 .ndo_stop = ar6000_close,
31885 diff -urNp linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31886 --- linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
31887 +++ linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
31888 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31889 typedef struct ar6k_pal_config_s
31890 {
31891 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
31892 -}ar6k_pal_config_t;
31893 +} __no_const ar6k_pal_config_t;
31894
31895 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
31896 #endif /* _AR6K_PAL_H_ */
31897 diff -urNp linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31898 --- linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
31899 +++ linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
31900 @@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31901 free_netdev(ifp->net);
31902 }
31903 /* Allocate etherdev, including space for private structure */
31904 - ifp->net = alloc_etherdev(sizeof(dhd));
31905 + ifp->net = alloc_etherdev(sizeof(*dhd));
31906 if (!ifp->net) {
31907 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31908 ret = -ENOMEM;
31909 }
31910 if (ret == 0) {
31911 strcpy(ifp->net->name, ifp->name);
31912 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31913 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31914 err = dhd_net_attach(&dhd->pub, ifp->idx);
31915 if (err != 0) {
31916 DHD_ERROR(("%s: dhd_net_attach failed, "
31917 @@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31918 strcpy(nv_path, nvram_path);
31919
31920 /* Allocate etherdev, including space for private structure */
31921 - net = alloc_etherdev(sizeof(dhd));
31922 + net = alloc_etherdev(sizeof(*dhd));
31923 if (!net) {
31924 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31925 goto fail;
31926 @@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31927 /*
31928 * Save the dhd_info into the priv
31929 */
31930 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31931 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31932
31933 /* Set network interface name if it was provided as module parameter */
31934 if (iface_name[0]) {
31935 @@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31936 /*
31937 * Save the dhd_info into the priv
31938 */
31939 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31940 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31941
31942 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
31943 g_bus = bus;
31944 diff -urNp linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
31945 --- linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
31946 +++ linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
31947 @@ -593,7 +593,7 @@ struct phy_func_ptr {
31948 initfn_t carrsuppr;
31949 rxsigpwrfn_t rxsigpwr;
31950 detachfn_t detach;
31951 -};
31952 +} __no_const;
31953 typedef struct phy_func_ptr phy_func_ptr_t;
31954
31955 struct phy_info {
31956 diff -urNp linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h
31957 --- linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
31958 +++ linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
31959 @@ -185,7 +185,7 @@ typedef struct {
31960 u16 func, uint bustype, void *regsva, void *param);
31961 /* detach from device */
31962 void (*detach) (void *ch);
31963 -} bcmsdh_driver_t;
31964 +} __no_const bcmsdh_driver_t;
31965
31966 /* platform specific/high level functions */
31967 extern int bcmsdh_register(bcmsdh_driver_t *driver);
31968 diff -urNp linux-3.0.3/drivers/staging/et131x/et1310_tx.c linux-3.0.3/drivers/staging/et131x/et1310_tx.c
31969 --- linux-3.0.3/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
31970 +++ linux-3.0.3/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
31971 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
31972 struct net_device_stats *stats = &etdev->net_stats;
31973
31974 if (tcb->flags & fMP_DEST_BROAD)
31975 - atomic_inc(&etdev->Stats.brdcstxmt);
31976 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
31977 else if (tcb->flags & fMP_DEST_MULTI)
31978 - atomic_inc(&etdev->Stats.multixmt);
31979 + atomic_inc_unchecked(&etdev->Stats.multixmt);
31980 else
31981 - atomic_inc(&etdev->Stats.unixmt);
31982 + atomic_inc_unchecked(&etdev->Stats.unixmt);
31983
31984 if (tcb->skb) {
31985 stats->tx_bytes += tcb->skb->len;
31986 diff -urNp linux-3.0.3/drivers/staging/et131x/et131x_adapter.h linux-3.0.3/drivers/staging/et131x/et131x_adapter.h
31987 --- linux-3.0.3/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
31988 +++ linux-3.0.3/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
31989 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
31990 * operations
31991 */
31992 u32 unircv; /* # multicast packets received */
31993 - atomic_t unixmt; /* # multicast packets for Tx */
31994 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
31995 u32 multircv; /* # multicast packets received */
31996 - atomic_t multixmt; /* # multicast packets for Tx */
31997 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
31998 u32 brdcstrcv; /* # broadcast packets received */
31999 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
32000 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
32001 u32 norcvbuf; /* # Rx packets discarded */
32002 u32 noxmtbuf; /* # Tx packets discarded */
32003
32004 diff -urNp linux-3.0.3/drivers/staging/hv/channel.c linux-3.0.3/drivers/staging/hv/channel.c
32005 --- linux-3.0.3/drivers/staging/hv/channel.c 2011-08-23 21:44:40.000000000 -0400
32006 +++ linux-3.0.3/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
32007 @@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
32008 int ret = 0;
32009 int t;
32010
32011 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32012 - atomic_inc(&vmbus_connection.next_gpadl_handle);
32013 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32014 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32015
32016 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32017 if (ret)
32018 diff -urNp linux-3.0.3/drivers/staging/hv/hv.c linux-3.0.3/drivers/staging/hv/hv.c
32019 --- linux-3.0.3/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
32020 +++ linux-3.0.3/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
32021 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
32022 u64 output_address = (output) ? virt_to_phys(output) : 0;
32023 u32 output_address_hi = output_address >> 32;
32024 u32 output_address_lo = output_address & 0xFFFFFFFF;
32025 - volatile void *hypercall_page = hv_context.hypercall_page;
32026 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32027
32028 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
32029 "=a"(hv_status_lo) : "d" (control_hi),
32030 diff -urNp linux-3.0.3/drivers/staging/hv/hv_mouse.c linux-3.0.3/drivers/staging/hv/hv_mouse.c
32031 --- linux-3.0.3/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
32032 +++ linux-3.0.3/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
32033 @@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32034 if (hid_dev) {
32035 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32036
32037 - hid_dev->ll_driver->open = mousevsc_hid_open;
32038 - hid_dev->ll_driver->close = mousevsc_hid_close;
32039 + pax_open_kernel();
32040 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32041 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32042 + pax_close_kernel();
32043
32044 hid_dev->bus = BUS_VIRTUAL;
32045 hid_dev->vendor = input_device_ctx->device_info.vendor;
32046 diff -urNp linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h
32047 --- linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
32048 +++ linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
32049 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
32050 struct vmbus_connection {
32051 enum vmbus_connect_state conn_state;
32052
32053 - atomic_t next_gpadl_handle;
32054 + atomic_unchecked_t next_gpadl_handle;
32055
32056 /*
32057 * Represents channel interrupts. Each bit position represents a
32058 diff -urNp linux-3.0.3/drivers/staging/hv/rndis_filter.c linux-3.0.3/drivers/staging/hv/rndis_filter.c
32059 --- linux-3.0.3/drivers/staging/hv/rndis_filter.c 2011-08-23 21:44:40.000000000 -0400
32060 +++ linux-3.0.3/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
32061 @@ -43,7 +43,7 @@ struct rndis_device {
32062
32063 enum rndis_device_state state;
32064 u32 link_stat;
32065 - atomic_t new_req_id;
32066 + atomic_unchecked_t new_req_id;
32067
32068 spinlock_t request_lock;
32069 struct list_head req_list;
32070 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32071 * template
32072 */
32073 set = &rndis_msg->msg.set_req;
32074 - set->req_id = atomic_inc_return(&dev->new_req_id);
32075 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32076
32077 /* Add to the request list */
32078 spin_lock_irqsave(&dev->request_lock, flags);
32079 @@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32080
32081 /* Setup the rndis set */
32082 halt = &request->request_msg.msg.halt_req;
32083 - halt->req_id = atomic_inc_return(&dev->new_req_id);
32084 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32085
32086 /* Ignore return since this msg is optional. */
32087 rndis_filter_send_request(dev, request);
32088 diff -urNp linux-3.0.3/drivers/staging/hv/vmbus_drv.c linux-3.0.3/drivers/staging/hv/vmbus_drv.c
32089 --- linux-3.0.3/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
32090 +++ linux-3.0.3/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
32091 @@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32092 {
32093 int ret = 0;
32094
32095 - static atomic_t device_num = ATOMIC_INIT(0);
32096 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32097
32098 /* Set the device name. Otherwise, device_register() will fail. */
32099 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32100 - atomic_inc_return(&device_num));
32101 + atomic_inc_return_unchecked(&device_num));
32102
32103 /* The new device belongs to this bus */
32104 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32105 diff -urNp linux-3.0.3/drivers/staging/iio/ring_generic.h linux-3.0.3/drivers/staging/iio/ring_generic.h
32106 --- linux-3.0.3/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
32107 +++ linux-3.0.3/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
32108 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32109
32110 int (*is_enabled)(struct iio_ring_buffer *ring);
32111 int (*enable)(struct iio_ring_buffer *ring);
32112 -};
32113 +} __no_const;
32114
32115 struct iio_ring_setup_ops {
32116 int (*preenable)(struct iio_dev *);
32117 diff -urNp linux-3.0.3/drivers/staging/octeon/ethernet.c linux-3.0.3/drivers/staging/octeon/ethernet.c
32118 --- linux-3.0.3/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
32119 +++ linux-3.0.3/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
32120 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32121 * since the RX tasklet also increments it.
32122 */
32123 #ifdef CONFIG_64BIT
32124 - atomic64_add(rx_status.dropped_packets,
32125 - (atomic64_t *)&priv->stats.rx_dropped);
32126 + atomic64_add_unchecked(rx_status.dropped_packets,
32127 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32128 #else
32129 - atomic_add(rx_status.dropped_packets,
32130 - (atomic_t *)&priv->stats.rx_dropped);
32131 + atomic_add_unchecked(rx_status.dropped_packets,
32132 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
32133 #endif
32134 }
32135
32136 diff -urNp linux-3.0.3/drivers/staging/octeon/ethernet-rx.c linux-3.0.3/drivers/staging/octeon/ethernet-rx.c
32137 --- linux-3.0.3/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
32138 +++ linux-3.0.3/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
32139 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32140 /* Increment RX stats for virtual ports */
32141 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32142 #ifdef CONFIG_64BIT
32143 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32144 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32145 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32146 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32147 #else
32148 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32149 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32150 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32151 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32152 #endif
32153 }
32154 netif_receive_skb(skb);
32155 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32156 dev->name);
32157 */
32158 #ifdef CONFIG_64BIT
32159 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32160 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32161 #else
32162 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32163 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32164 #endif
32165 dev_kfree_skb_irq(skb);
32166 }
32167 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/inode.c linux-3.0.3/drivers/staging/pohmelfs/inode.c
32168 --- linux-3.0.3/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
32169 +++ linux-3.0.3/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
32170 @@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32171 mutex_init(&psb->mcache_lock);
32172 psb->mcache_root = RB_ROOT;
32173 psb->mcache_timeout = msecs_to_jiffies(5000);
32174 - atomic_long_set(&psb->mcache_gen, 0);
32175 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
32176
32177 psb->trans_max_pages = 100;
32178
32179 @@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32180 INIT_LIST_HEAD(&psb->crypto_ready_list);
32181 INIT_LIST_HEAD(&psb->crypto_active_list);
32182
32183 - atomic_set(&psb->trans_gen, 1);
32184 + atomic_set_unchecked(&psb->trans_gen, 1);
32185 atomic_long_set(&psb->total_inodes, 0);
32186
32187 mutex_init(&psb->state_lock);
32188 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/mcache.c linux-3.0.3/drivers/staging/pohmelfs/mcache.c
32189 --- linux-3.0.3/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
32190 +++ linux-3.0.3/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
32191 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32192 m->data = data;
32193 m->start = start;
32194 m->size = size;
32195 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
32196 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32197
32198 mutex_lock(&psb->mcache_lock);
32199 err = pohmelfs_mcache_insert(psb, m);
32200 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/netfs.h linux-3.0.3/drivers/staging/pohmelfs/netfs.h
32201 --- linux-3.0.3/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
32202 +++ linux-3.0.3/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
32203 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32204 struct pohmelfs_sb {
32205 struct rb_root mcache_root;
32206 struct mutex mcache_lock;
32207 - atomic_long_t mcache_gen;
32208 + atomic_long_unchecked_t mcache_gen;
32209 unsigned long mcache_timeout;
32210
32211 unsigned int idx;
32212
32213 unsigned int trans_retries;
32214
32215 - atomic_t trans_gen;
32216 + atomic_unchecked_t trans_gen;
32217
32218 unsigned int crypto_attached_size;
32219 unsigned int crypto_align_size;
32220 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/trans.c linux-3.0.3/drivers/staging/pohmelfs/trans.c
32221 --- linux-3.0.3/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32222 +++ linux-3.0.3/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32223 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32224 int err;
32225 struct netfs_cmd *cmd = t->iovec.iov_base;
32226
32227 - t->gen = atomic_inc_return(&psb->trans_gen);
32228 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32229
32230 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32231 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32232 diff -urNp linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h
32233 --- linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32234 +++ linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32235 @@ -83,7 +83,7 @@ struct _io_ops {
32236 u8 *pmem);
32237 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32238 u8 *pmem);
32239 -};
32240 +} __no_const;
32241
32242 struct io_req {
32243 struct list_head list;
32244 diff -urNp linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c
32245 --- linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32246 +++ linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
32247 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32248 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32249
32250 if (rlen)
32251 - if (copy_to_user(data, &resp, rlen))
32252 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32253 return -EFAULT;
32254
32255 return 0;
32256 diff -urNp linux-3.0.3/drivers/staging/tty/stallion.c linux-3.0.3/drivers/staging/tty/stallion.c
32257 --- linux-3.0.3/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32258 +++ linux-3.0.3/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32259 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32260 struct stlport stl_dummyport;
32261 struct stlport *portp;
32262
32263 + pax_track_stack();
32264 +
32265 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32266 return -EFAULT;
32267 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32268 diff -urNp linux-3.0.3/drivers/staging/usbip/usbip_common.h linux-3.0.3/drivers/staging/usbip/usbip_common.h
32269 --- linux-3.0.3/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32270 +++ linux-3.0.3/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32271 @@ -315,7 +315,7 @@ struct usbip_device {
32272 void (*shutdown)(struct usbip_device *);
32273 void (*reset)(struct usbip_device *);
32274 void (*unusable)(struct usbip_device *);
32275 - } eh_ops;
32276 + } __no_const eh_ops;
32277 };
32278
32279 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32280 diff -urNp linux-3.0.3/drivers/staging/usbip/vhci.h linux-3.0.3/drivers/staging/usbip/vhci.h
32281 --- linux-3.0.3/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32282 +++ linux-3.0.3/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32283 @@ -94,7 +94,7 @@ struct vhci_hcd {
32284 unsigned resuming:1;
32285 unsigned long re_timeout;
32286
32287 - atomic_t seqnum;
32288 + atomic_unchecked_t seqnum;
32289
32290 /*
32291 * NOTE:
32292 diff -urNp linux-3.0.3/drivers/staging/usbip/vhci_hcd.c linux-3.0.3/drivers/staging/usbip/vhci_hcd.c
32293 --- linux-3.0.3/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:44:40.000000000 -0400
32294 +++ linux-3.0.3/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32295 @@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32296 return;
32297 }
32298
32299 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32300 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32301 if (priv->seqnum == 0xffff)
32302 dev_info(&urb->dev->dev, "seqnum max\n");
32303
32304 @@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32305 return -ENOMEM;
32306 }
32307
32308 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32309 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32310 if (unlink->seqnum == 0xffff)
32311 pr_info("seqnum max\n");
32312
32313 @@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32314 vdev->rhport = rhport;
32315 }
32316
32317 - atomic_set(&vhci->seqnum, 0);
32318 + atomic_set_unchecked(&vhci->seqnum, 0);
32319 spin_lock_init(&vhci->lock);
32320
32321 hcd->power_budget = 0; /* no limit */
32322 diff -urNp linux-3.0.3/drivers/staging/usbip/vhci_rx.c linux-3.0.3/drivers/staging/usbip/vhci_rx.c
32323 --- linux-3.0.3/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32324 +++ linux-3.0.3/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32325 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32326 if (!urb) {
32327 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32328 pr_info("max seqnum %d\n",
32329 - atomic_read(&the_controller->seqnum));
32330 + atomic_read_unchecked(&the_controller->seqnum));
32331 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32332 return;
32333 }
32334 diff -urNp linux-3.0.3/drivers/staging/vt6655/hostap.c linux-3.0.3/drivers/staging/vt6655/hostap.c
32335 --- linux-3.0.3/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32336 +++ linux-3.0.3/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32337 @@ -79,14 +79,13 @@ static int msglevel
32338 *
32339 */
32340
32341 +static net_device_ops_no_const apdev_netdev_ops;
32342 +
32343 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32344 {
32345 PSDevice apdev_priv;
32346 struct net_device *dev = pDevice->dev;
32347 int ret;
32348 - const struct net_device_ops apdev_netdev_ops = {
32349 - .ndo_start_xmit = pDevice->tx_80211,
32350 - };
32351
32352 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32353
32354 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32355 *apdev_priv = *pDevice;
32356 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32357
32358 + /* only half broken now */
32359 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32360 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32361
32362 pDevice->apdev->type = ARPHRD_IEEE80211;
32363 diff -urNp linux-3.0.3/drivers/staging/vt6656/hostap.c linux-3.0.3/drivers/staging/vt6656/hostap.c
32364 --- linux-3.0.3/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32365 +++ linux-3.0.3/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32366 @@ -80,14 +80,13 @@ static int msglevel
32367 *
32368 */
32369
32370 +static net_device_ops_no_const apdev_netdev_ops;
32371 +
32372 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32373 {
32374 PSDevice apdev_priv;
32375 struct net_device *dev = pDevice->dev;
32376 int ret;
32377 - const struct net_device_ops apdev_netdev_ops = {
32378 - .ndo_start_xmit = pDevice->tx_80211,
32379 - };
32380
32381 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32382
32383 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32384 *apdev_priv = *pDevice;
32385 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32386
32387 + /* only half broken now */
32388 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32389 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32390
32391 pDevice->apdev->type = ARPHRD_IEEE80211;
32392 diff -urNp linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c
32393 --- linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32394 +++ linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32395 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32396
32397 struct usbctlx_completor {
32398 int (*complete) (struct usbctlx_completor *);
32399 -};
32400 +} __no_const;
32401
32402 static int
32403 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32404 diff -urNp linux-3.0.3/drivers/staging/zcache/tmem.c linux-3.0.3/drivers/staging/zcache/tmem.c
32405 --- linux-3.0.3/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32406 +++ linux-3.0.3/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32407 @@ -39,7 +39,7 @@
32408 * A tmem host implementation must use this function to register callbacks
32409 * for memory allocation.
32410 */
32411 -static struct tmem_hostops tmem_hostops;
32412 +static tmem_hostops_no_const tmem_hostops;
32413
32414 static void tmem_objnode_tree_init(void);
32415
32416 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32417 * A tmem host implementation must use this function to register
32418 * callbacks for a page-accessible memory (PAM) implementation
32419 */
32420 -static struct tmem_pamops tmem_pamops;
32421 +static tmem_pamops_no_const tmem_pamops;
32422
32423 void tmem_register_pamops(struct tmem_pamops *m)
32424 {
32425 diff -urNp linux-3.0.3/drivers/staging/zcache/tmem.h linux-3.0.3/drivers/staging/zcache/tmem.h
32426 --- linux-3.0.3/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32427 +++ linux-3.0.3/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32428 @@ -171,6 +171,7 @@ struct tmem_pamops {
32429 int (*get_data)(struct page *, void *, struct tmem_pool *);
32430 void (*free)(void *, struct tmem_pool *);
32431 };
32432 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32433 extern void tmem_register_pamops(struct tmem_pamops *m);
32434
32435 /* memory allocation methods provided by the host implementation */
32436 @@ -180,6 +181,7 @@ struct tmem_hostops {
32437 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32438 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32439 };
32440 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32441 extern void tmem_register_hostops(struct tmem_hostops *m);
32442
32443 /* core tmem accessor functions */
32444 diff -urNp linux-3.0.3/drivers/target/target_core_alua.c linux-3.0.3/drivers/target/target_core_alua.c
32445 --- linux-3.0.3/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32446 +++ linux-3.0.3/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32447 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32448 char path[ALUA_METADATA_PATH_LEN];
32449 int len;
32450
32451 + pax_track_stack();
32452 +
32453 memset(path, 0, ALUA_METADATA_PATH_LEN);
32454
32455 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32456 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32457 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32458 int len;
32459
32460 + pax_track_stack();
32461 +
32462 memset(path, 0, ALUA_METADATA_PATH_LEN);
32463 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32464
32465 diff -urNp linux-3.0.3/drivers/target/target_core_cdb.c linux-3.0.3/drivers/target/target_core_cdb.c
32466 --- linux-3.0.3/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32467 +++ linux-3.0.3/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32468 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32469 int length = 0;
32470 unsigned char buf[SE_MODE_PAGE_BUF];
32471
32472 + pax_track_stack();
32473 +
32474 memset(buf, 0, SE_MODE_PAGE_BUF);
32475
32476 switch (cdb[2] & 0x3f) {
32477 diff -urNp linux-3.0.3/drivers/target/target_core_configfs.c linux-3.0.3/drivers/target/target_core_configfs.c
32478 --- linux-3.0.3/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
32479 +++ linux-3.0.3/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
32480 @@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
32481 ssize_t len = 0;
32482 int reg_count = 0, prf_isid;
32483
32484 + pax_track_stack();
32485 +
32486 if (!(su_dev->se_dev_ptr))
32487 return -ENODEV;
32488
32489 diff -urNp linux-3.0.3/drivers/target/target_core_pr.c linux-3.0.3/drivers/target/target_core_pr.c
32490 --- linux-3.0.3/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
32491 +++ linux-3.0.3/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
32492 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32493 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32494 u16 tpgt;
32495
32496 + pax_track_stack();
32497 +
32498 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32499 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32500 /*
32501 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32502 ssize_t len = 0;
32503 int reg_count = 0;
32504
32505 + pax_track_stack();
32506 +
32507 memset(buf, 0, pr_aptpl_buf_len);
32508 /*
32509 * Called to clear metadata once APTPL has been deactivated.
32510 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32511 char path[512];
32512 int ret;
32513
32514 + pax_track_stack();
32515 +
32516 memset(iov, 0, sizeof(struct iovec));
32517 memset(path, 0, 512);
32518
32519 diff -urNp linux-3.0.3/drivers/target/target_core_tmr.c linux-3.0.3/drivers/target/target_core_tmr.c
32520 --- linux-3.0.3/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
32521 +++ linux-3.0.3/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
32522 @@ -269,7 +269,7 @@ int core_tmr_lun_reset(
32523 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32524 T_TASK(cmd)->t_task_cdbs,
32525 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32526 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32527 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32528 atomic_read(&T_TASK(cmd)->t_transport_active),
32529 atomic_read(&T_TASK(cmd)->t_transport_stop),
32530 atomic_read(&T_TASK(cmd)->t_transport_sent));
32531 @@ -311,7 +311,7 @@ int core_tmr_lun_reset(
32532 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32533 " task: %p, t_fe_count: %d dev: %p\n", task,
32534 fe_count, dev);
32535 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32536 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32537 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32538 flags);
32539 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32540 @@ -321,7 +321,7 @@ int core_tmr_lun_reset(
32541 }
32542 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32543 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32544 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32545 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32546 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32547 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32548
32549 diff -urNp linux-3.0.3/drivers/target/target_core_transport.c linux-3.0.3/drivers/target/target_core_transport.c
32550 --- linux-3.0.3/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
32551 +++ linux-3.0.3/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
32552 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32553
32554 dev->queue_depth = dev_limits->queue_depth;
32555 atomic_set(&dev->depth_left, dev->queue_depth);
32556 - atomic_set(&dev->dev_ordered_id, 0);
32557 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
32558
32559 se_dev_set_default_attribs(dev, dev_limits);
32560
32561 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32562 * Used to determine when ORDERED commands should go from
32563 * Dormant to Active status.
32564 */
32565 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32566 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32567 smp_mb__after_atomic_inc();
32568 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32569 cmd->se_ordered_id, cmd->sam_task_attr,
32570 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32571 " t_transport_active: %d t_transport_stop: %d"
32572 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32573 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32574 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32575 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32576 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32577 atomic_read(&T_TASK(cmd)->t_transport_active),
32578 atomic_read(&T_TASK(cmd)->t_transport_stop),
32579 @@ -2673,9 +2673,9 @@ check_depth:
32580 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32581 atomic_set(&task->task_active, 1);
32582 atomic_set(&task->task_sent, 1);
32583 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32584 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32585
32586 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32587 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32588 T_TASK(cmd)->t_task_cdbs)
32589 atomic_set(&cmd->transport_sent, 1);
32590
32591 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32592 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32593 }
32594 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32595 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
32596 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32597 goto remove;
32598
32599 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32600 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32601 {
32602 int ret = 0;
32603
32604 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32605 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32606 if (!(send_status) ||
32607 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32608 return 1;
32609 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32610 */
32611 if (cmd->data_direction == DMA_TO_DEVICE) {
32612 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32613 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32614 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32615 smp_mb__after_atomic_inc();
32616 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32617 transport_new_cmd_failure(cmd);
32618 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32619 CMD_TFO(cmd)->get_task_tag(cmd),
32620 T_TASK(cmd)->t_task_cdbs,
32621 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32622 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32623 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32624 atomic_read(&T_TASK(cmd)->t_transport_active),
32625 atomic_read(&T_TASK(cmd)->t_transport_stop),
32626 atomic_read(&T_TASK(cmd)->t_transport_sent));
32627 diff -urNp linux-3.0.3/drivers/telephony/ixj.c linux-3.0.3/drivers/telephony/ixj.c
32628 --- linux-3.0.3/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
32629 +++ linux-3.0.3/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
32630 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32631 bool mContinue;
32632 char *pIn, *pOut;
32633
32634 + pax_track_stack();
32635 +
32636 if (!SCI_Prepare(j))
32637 return 0;
32638
32639 diff -urNp linux-3.0.3/drivers/tty/hvc/hvcs.c linux-3.0.3/drivers/tty/hvc/hvcs.c
32640 --- linux-3.0.3/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
32641 +++ linux-3.0.3/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
32642 @@ -83,6 +83,7 @@
32643 #include <asm/hvcserver.h>
32644 #include <asm/uaccess.h>
32645 #include <asm/vio.h>
32646 +#include <asm/local.h>
32647
32648 /*
32649 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32650 @@ -270,7 +271,7 @@ struct hvcs_struct {
32651 unsigned int index;
32652
32653 struct tty_struct *tty;
32654 - int open_count;
32655 + local_t open_count;
32656
32657 /*
32658 * Used to tell the driver kernel_thread what operations need to take
32659 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32660
32661 spin_lock_irqsave(&hvcsd->lock, flags);
32662
32663 - if (hvcsd->open_count > 0) {
32664 + if (local_read(&hvcsd->open_count) > 0) {
32665 spin_unlock_irqrestore(&hvcsd->lock, flags);
32666 printk(KERN_INFO "HVCS: vterm state unchanged. "
32667 "The hvcs device node is still in use.\n");
32668 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32669 if ((retval = hvcs_partner_connect(hvcsd)))
32670 goto error_release;
32671
32672 - hvcsd->open_count = 1;
32673 + local_set(&hvcsd->open_count, 1);
32674 hvcsd->tty = tty;
32675 tty->driver_data = hvcsd;
32676
32677 @@ -1179,7 +1180,7 @@ fast_open:
32678
32679 spin_lock_irqsave(&hvcsd->lock, flags);
32680 kref_get(&hvcsd->kref);
32681 - hvcsd->open_count++;
32682 + local_inc(&hvcsd->open_count);
32683 hvcsd->todo_mask |= HVCS_SCHED_READ;
32684 spin_unlock_irqrestore(&hvcsd->lock, flags);
32685
32686 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32687 hvcsd = tty->driver_data;
32688
32689 spin_lock_irqsave(&hvcsd->lock, flags);
32690 - if (--hvcsd->open_count == 0) {
32691 + if (local_dec_and_test(&hvcsd->open_count)) {
32692
32693 vio_disable_interrupts(hvcsd->vdev);
32694
32695 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32696 free_irq(irq, hvcsd);
32697 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32698 return;
32699 - } else if (hvcsd->open_count < 0) {
32700 + } else if (local_read(&hvcsd->open_count) < 0) {
32701 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32702 " is missmanaged.\n",
32703 - hvcsd->vdev->unit_address, hvcsd->open_count);
32704 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32705 }
32706
32707 spin_unlock_irqrestore(&hvcsd->lock, flags);
32708 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32709
32710 spin_lock_irqsave(&hvcsd->lock, flags);
32711 /* Preserve this so that we know how many kref refs to put */
32712 - temp_open_count = hvcsd->open_count;
32713 + temp_open_count = local_read(&hvcsd->open_count);
32714
32715 /*
32716 * Don't kref put inside the spinlock because the destruction
32717 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32718 hvcsd->tty->driver_data = NULL;
32719 hvcsd->tty = NULL;
32720
32721 - hvcsd->open_count = 0;
32722 + local_set(&hvcsd->open_count, 0);
32723
32724 /* This will drop any buffered data on the floor which is OK in a hangup
32725 * scenario. */
32726 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32727 * the middle of a write operation? This is a crummy place to do this
32728 * but we want to keep it all in the spinlock.
32729 */
32730 - if (hvcsd->open_count <= 0) {
32731 + if (local_read(&hvcsd->open_count) <= 0) {
32732 spin_unlock_irqrestore(&hvcsd->lock, flags);
32733 return -ENODEV;
32734 }
32735 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32736 {
32737 struct hvcs_struct *hvcsd = tty->driver_data;
32738
32739 - if (!hvcsd || hvcsd->open_count <= 0)
32740 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32741 return 0;
32742
32743 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32744 diff -urNp linux-3.0.3/drivers/tty/ipwireless/tty.c linux-3.0.3/drivers/tty/ipwireless/tty.c
32745 --- linux-3.0.3/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
32746 +++ linux-3.0.3/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
32747 @@ -29,6 +29,7 @@
32748 #include <linux/tty_driver.h>
32749 #include <linux/tty_flip.h>
32750 #include <linux/uaccess.h>
32751 +#include <asm/local.h>
32752
32753 #include "tty.h"
32754 #include "network.h"
32755 @@ -51,7 +52,7 @@ struct ipw_tty {
32756 int tty_type;
32757 struct ipw_network *network;
32758 struct tty_struct *linux_tty;
32759 - int open_count;
32760 + local_t open_count;
32761 unsigned int control_lines;
32762 struct mutex ipw_tty_mutex;
32763 int tx_bytes_queued;
32764 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32765 mutex_unlock(&tty->ipw_tty_mutex);
32766 return -ENODEV;
32767 }
32768 - if (tty->open_count == 0)
32769 + if (local_read(&tty->open_count) == 0)
32770 tty->tx_bytes_queued = 0;
32771
32772 - tty->open_count++;
32773 + local_inc(&tty->open_count);
32774
32775 tty->linux_tty = linux_tty;
32776 linux_tty->driver_data = tty;
32777 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32778
32779 static void do_ipw_close(struct ipw_tty *tty)
32780 {
32781 - tty->open_count--;
32782 -
32783 - if (tty->open_count == 0) {
32784 + if (local_dec_return(&tty->open_count) == 0) {
32785 struct tty_struct *linux_tty = tty->linux_tty;
32786
32787 if (linux_tty != NULL) {
32788 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32789 return;
32790
32791 mutex_lock(&tty->ipw_tty_mutex);
32792 - if (tty->open_count == 0) {
32793 + if (local_read(&tty->open_count) == 0) {
32794 mutex_unlock(&tty->ipw_tty_mutex);
32795 return;
32796 }
32797 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32798 return;
32799 }
32800
32801 - if (!tty->open_count) {
32802 + if (!local_read(&tty->open_count)) {
32803 mutex_unlock(&tty->ipw_tty_mutex);
32804 return;
32805 }
32806 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32807 return -ENODEV;
32808
32809 mutex_lock(&tty->ipw_tty_mutex);
32810 - if (!tty->open_count) {
32811 + if (!local_read(&tty->open_count)) {
32812 mutex_unlock(&tty->ipw_tty_mutex);
32813 return -EINVAL;
32814 }
32815 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32816 if (!tty)
32817 return -ENODEV;
32818
32819 - if (!tty->open_count)
32820 + if (!local_read(&tty->open_count))
32821 return -EINVAL;
32822
32823 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32824 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32825 if (!tty)
32826 return 0;
32827
32828 - if (!tty->open_count)
32829 + if (!local_read(&tty->open_count))
32830 return 0;
32831
32832 return tty->tx_bytes_queued;
32833 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32834 if (!tty)
32835 return -ENODEV;
32836
32837 - if (!tty->open_count)
32838 + if (!local_read(&tty->open_count))
32839 return -EINVAL;
32840
32841 return get_control_lines(tty);
32842 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32843 if (!tty)
32844 return -ENODEV;
32845
32846 - if (!tty->open_count)
32847 + if (!local_read(&tty->open_count))
32848 return -EINVAL;
32849
32850 return set_control_lines(tty, set, clear);
32851 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32852 if (!tty)
32853 return -ENODEV;
32854
32855 - if (!tty->open_count)
32856 + if (!local_read(&tty->open_count))
32857 return -EINVAL;
32858
32859 /* FIXME: Exactly how is the tty object locked here .. */
32860 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32861 against a parallel ioctl etc */
32862 mutex_lock(&ttyj->ipw_tty_mutex);
32863 }
32864 - while (ttyj->open_count)
32865 + while (local_read(&ttyj->open_count))
32866 do_ipw_close(ttyj);
32867 ipwireless_disassociate_network_ttys(network,
32868 ttyj->channel_idx);
32869 diff -urNp linux-3.0.3/drivers/tty/n_gsm.c linux-3.0.3/drivers/tty/n_gsm.c
32870 --- linux-3.0.3/drivers/tty/n_gsm.c 2011-08-23 21:44:40.000000000 -0400
32871 +++ linux-3.0.3/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
32872 @@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32873 return NULL;
32874 spin_lock_init(&dlci->lock);
32875 dlci->fifo = &dlci->_fifo;
32876 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32877 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32878 kfree(dlci);
32879 return NULL;
32880 }
32881 diff -urNp linux-3.0.3/drivers/tty/n_tty.c linux-3.0.3/drivers/tty/n_tty.c
32882 --- linux-3.0.3/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
32883 +++ linux-3.0.3/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
32884 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32885 {
32886 *ops = tty_ldisc_N_TTY;
32887 ops->owner = NULL;
32888 - ops->refcount = ops->flags = 0;
32889 + atomic_set(&ops->refcount, 0);
32890 + ops->flags = 0;
32891 }
32892 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32893 diff -urNp linux-3.0.3/drivers/tty/pty.c linux-3.0.3/drivers/tty/pty.c
32894 --- linux-3.0.3/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
32895 +++ linux-3.0.3/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
32896 @@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
32897 register_sysctl_table(pty_root_table);
32898
32899 /* Now create the /dev/ptmx special device */
32900 + pax_open_kernel();
32901 tty_default_fops(&ptmx_fops);
32902 - ptmx_fops.open = ptmx_open;
32903 + *(void **)&ptmx_fops.open = ptmx_open;
32904 + pax_close_kernel();
32905
32906 cdev_init(&ptmx_cdev, &ptmx_fops);
32907 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32908 diff -urNp linux-3.0.3/drivers/tty/rocket.c linux-3.0.3/drivers/tty/rocket.c
32909 --- linux-3.0.3/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
32910 +++ linux-3.0.3/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
32911 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32912 struct rocket_ports tmp;
32913 int board;
32914
32915 + pax_track_stack();
32916 +
32917 if (!retports)
32918 return -EFAULT;
32919 memset(&tmp, 0, sizeof (tmp));
32920 diff -urNp linux-3.0.3/drivers/tty/serial/kgdboc.c linux-3.0.3/drivers/tty/serial/kgdboc.c
32921 --- linux-3.0.3/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
32922 +++ linux-3.0.3/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
32923 @@ -23,8 +23,9 @@
32924 #define MAX_CONFIG_LEN 40
32925
32926 static struct kgdb_io kgdboc_io_ops;
32927 +static struct kgdb_io kgdboc_io_ops_console;
32928
32929 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32930 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32931 static int configured = -1;
32932
32933 static char config[MAX_CONFIG_LEN];
32934 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32935 kgdboc_unregister_kbd();
32936 if (configured == 1)
32937 kgdb_unregister_io_module(&kgdboc_io_ops);
32938 + else if (configured == 2)
32939 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
32940 }
32941
32942 static int configure_kgdboc(void)
32943 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32944 int err;
32945 char *cptr = config;
32946 struct console *cons;
32947 + int is_console = 0;
32948
32949 err = kgdboc_option_setup(config);
32950 if (err || !strlen(config) || isspace(config[0]))
32951 goto noconfig;
32952
32953 err = -ENODEV;
32954 - kgdboc_io_ops.is_console = 0;
32955 kgdb_tty_driver = NULL;
32956
32957 kgdboc_use_kms = 0;
32958 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32959 int idx;
32960 if (cons->device && cons->device(cons, &idx) == p &&
32961 idx == tty_line) {
32962 - kgdboc_io_ops.is_console = 1;
32963 + is_console = 1;
32964 break;
32965 }
32966 cons = cons->next;
32967 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32968 kgdb_tty_line = tty_line;
32969
32970 do_register:
32971 - err = kgdb_register_io_module(&kgdboc_io_ops);
32972 + if (is_console) {
32973 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
32974 + configured = 2;
32975 + } else {
32976 + err = kgdb_register_io_module(&kgdboc_io_ops);
32977 + configured = 1;
32978 + }
32979 if (err)
32980 goto noconfig;
32981
32982 - configured = 1;
32983 -
32984 return 0;
32985
32986 noconfig:
32987 @@ -212,7 +219,7 @@ noconfig:
32988 static int __init init_kgdboc(void)
32989 {
32990 /* Already configured? */
32991 - if (configured == 1)
32992 + if (configured >= 1)
32993 return 0;
32994
32995 return configure_kgdboc();
32996 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32997 if (config[len - 1] == '\n')
32998 config[len - 1] = '\0';
32999
33000 - if (configured == 1)
33001 + if (configured >= 1)
33002 cleanup_kgdboc();
33003
33004 /* Go and configure with the new params. */
33005 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
33006 .post_exception = kgdboc_post_exp_handler,
33007 };
33008
33009 +static struct kgdb_io kgdboc_io_ops_console = {
33010 + .name = "kgdboc",
33011 + .read_char = kgdboc_get_char,
33012 + .write_char = kgdboc_put_char,
33013 + .pre_exception = kgdboc_pre_exp_handler,
33014 + .post_exception = kgdboc_post_exp_handler,
33015 + .is_console = 1
33016 +};
33017 +
33018 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33019 /* This is only available if kgdboc is a built in for early debugging */
33020 static int __init kgdboc_early_init(char *opt)
33021 diff -urNp linux-3.0.3/drivers/tty/serial/mrst_max3110.c linux-3.0.3/drivers/tty/serial/mrst_max3110.c
33022 --- linux-3.0.3/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
33023 +++ linux-3.0.3/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
33024 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33025 int loop = 1, num, total = 0;
33026 u8 recv_buf[512], *pbuf;
33027
33028 + pax_track_stack();
33029 +
33030 pbuf = recv_buf;
33031 do {
33032 num = max3110_read_multi(max, pbuf);
33033 diff -urNp linux-3.0.3/drivers/tty/tty_io.c linux-3.0.3/drivers/tty/tty_io.c
33034 --- linux-3.0.3/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
33035 +++ linux-3.0.3/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
33036 @@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33037
33038 void tty_default_fops(struct file_operations *fops)
33039 {
33040 - *fops = tty_fops;
33041 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33042 }
33043
33044 /*
33045 diff -urNp linux-3.0.3/drivers/tty/tty_ldisc.c linux-3.0.3/drivers/tty/tty_ldisc.c
33046 --- linux-3.0.3/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33047 +++ linux-3.0.3/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33048 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33049 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33050 struct tty_ldisc_ops *ldo = ld->ops;
33051
33052 - ldo->refcount--;
33053 + atomic_dec(&ldo->refcount);
33054 module_put(ldo->owner);
33055 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33056
33057 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33058 spin_lock_irqsave(&tty_ldisc_lock, flags);
33059 tty_ldiscs[disc] = new_ldisc;
33060 new_ldisc->num = disc;
33061 - new_ldisc->refcount = 0;
33062 + atomic_set(&new_ldisc->refcount, 0);
33063 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33064
33065 return ret;
33066 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33067 return -EINVAL;
33068
33069 spin_lock_irqsave(&tty_ldisc_lock, flags);
33070 - if (tty_ldiscs[disc]->refcount)
33071 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33072 ret = -EBUSY;
33073 else
33074 tty_ldiscs[disc] = NULL;
33075 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33076 if (ldops) {
33077 ret = ERR_PTR(-EAGAIN);
33078 if (try_module_get(ldops->owner)) {
33079 - ldops->refcount++;
33080 + atomic_inc(&ldops->refcount);
33081 ret = ldops;
33082 }
33083 }
33084 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33085 unsigned long flags;
33086
33087 spin_lock_irqsave(&tty_ldisc_lock, flags);
33088 - ldops->refcount--;
33089 + atomic_dec(&ldops->refcount);
33090 module_put(ldops->owner);
33091 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33092 }
33093 diff -urNp linux-3.0.3/drivers/tty/vt/keyboard.c linux-3.0.3/drivers/tty/vt/keyboard.c
33094 --- linux-3.0.3/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
33095 +++ linux-3.0.3/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
33096 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33097 kbd->kbdmode == VC_OFF) &&
33098 value != KVAL(K_SAK))
33099 return; /* SAK is allowed even in raw mode */
33100 +
33101 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33102 + {
33103 + void *func = fn_handler[value];
33104 + if (func == fn_show_state || func == fn_show_ptregs ||
33105 + func == fn_show_mem)
33106 + return;
33107 + }
33108 +#endif
33109 +
33110 fn_handler[value](vc);
33111 }
33112
33113 diff -urNp linux-3.0.3/drivers/tty/vt/vt.c linux-3.0.3/drivers/tty/vt/vt.c
33114 --- linux-3.0.3/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
33115 +++ linux-3.0.3/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
33116 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33117
33118 static void notify_write(struct vc_data *vc, unsigned int unicode)
33119 {
33120 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33121 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33122 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33123 }
33124
33125 diff -urNp linux-3.0.3/drivers/tty/vt/vt_ioctl.c linux-3.0.3/drivers/tty/vt/vt_ioctl.c
33126 --- linux-3.0.3/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
33127 +++ linux-3.0.3/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
33128 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33129 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33130 return -EFAULT;
33131
33132 - if (!capable(CAP_SYS_TTY_CONFIG))
33133 - perm = 0;
33134 -
33135 switch (cmd) {
33136 case KDGKBENT:
33137 key_map = key_maps[s];
33138 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33139 val = (i ? K_HOLE : K_NOSUCHMAP);
33140 return put_user(val, &user_kbe->kb_value);
33141 case KDSKBENT:
33142 + if (!capable(CAP_SYS_TTY_CONFIG))
33143 + perm = 0;
33144 +
33145 if (!perm)
33146 return -EPERM;
33147 if (!i && v == K_NOSUCHMAP) {
33148 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33149 int i, j, k;
33150 int ret;
33151
33152 - if (!capable(CAP_SYS_TTY_CONFIG))
33153 - perm = 0;
33154 -
33155 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33156 if (!kbs) {
33157 ret = -ENOMEM;
33158 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33159 kfree(kbs);
33160 return ((p && *p) ? -EOVERFLOW : 0);
33161 case KDSKBSENT:
33162 + if (!capable(CAP_SYS_TTY_CONFIG))
33163 + perm = 0;
33164 +
33165 if (!perm) {
33166 ret = -EPERM;
33167 goto reterr;
33168 diff -urNp linux-3.0.3/drivers/uio/uio.c linux-3.0.3/drivers/uio/uio.c
33169 --- linux-3.0.3/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
33170 +++ linux-3.0.3/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
33171 @@ -25,6 +25,7 @@
33172 #include <linux/kobject.h>
33173 #include <linux/cdev.h>
33174 #include <linux/uio_driver.h>
33175 +#include <asm/local.h>
33176
33177 #define UIO_MAX_DEVICES (1U << MINORBITS)
33178
33179 @@ -32,10 +33,10 @@ struct uio_device {
33180 struct module *owner;
33181 struct device *dev;
33182 int minor;
33183 - atomic_t event;
33184 + atomic_unchecked_t event;
33185 struct fasync_struct *async_queue;
33186 wait_queue_head_t wait;
33187 - int vma_count;
33188 + local_t vma_count;
33189 struct uio_info *info;
33190 struct kobject *map_dir;
33191 struct kobject *portio_dir;
33192 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33193 struct device_attribute *attr, char *buf)
33194 {
33195 struct uio_device *idev = dev_get_drvdata(dev);
33196 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33197 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33198 }
33199
33200 static struct device_attribute uio_class_attributes[] = {
33201 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33202 {
33203 struct uio_device *idev = info->uio_dev;
33204
33205 - atomic_inc(&idev->event);
33206 + atomic_inc_unchecked(&idev->event);
33207 wake_up_interruptible(&idev->wait);
33208 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33209 }
33210 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33211 }
33212
33213 listener->dev = idev;
33214 - listener->event_count = atomic_read(&idev->event);
33215 + listener->event_count = atomic_read_unchecked(&idev->event);
33216 filep->private_data = listener;
33217
33218 if (idev->info->open) {
33219 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33220 return -EIO;
33221
33222 poll_wait(filep, &idev->wait, wait);
33223 - if (listener->event_count != atomic_read(&idev->event))
33224 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33225 return POLLIN | POLLRDNORM;
33226 return 0;
33227 }
33228 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33229 do {
33230 set_current_state(TASK_INTERRUPTIBLE);
33231
33232 - event_count = atomic_read(&idev->event);
33233 + event_count = atomic_read_unchecked(&idev->event);
33234 if (event_count != listener->event_count) {
33235 if (copy_to_user(buf, &event_count, count))
33236 retval = -EFAULT;
33237 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33238 static void uio_vma_open(struct vm_area_struct *vma)
33239 {
33240 struct uio_device *idev = vma->vm_private_data;
33241 - idev->vma_count++;
33242 + local_inc(&idev->vma_count);
33243 }
33244
33245 static void uio_vma_close(struct vm_area_struct *vma)
33246 {
33247 struct uio_device *idev = vma->vm_private_data;
33248 - idev->vma_count--;
33249 + local_dec(&idev->vma_count);
33250 }
33251
33252 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33253 @@ -823,7 +824,7 @@ int __uio_register_device(struct module
33254 idev->owner = owner;
33255 idev->info = info;
33256 init_waitqueue_head(&idev->wait);
33257 - atomic_set(&idev->event, 0);
33258 + atomic_set_unchecked(&idev->event, 0);
33259
33260 ret = uio_get_minor(idev);
33261 if (ret)
33262 diff -urNp linux-3.0.3/drivers/usb/atm/cxacru.c linux-3.0.3/drivers/usb/atm/cxacru.c
33263 --- linux-3.0.3/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33264 +++ linux-3.0.3/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33265 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33266 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33267 if (ret < 2)
33268 return -EINVAL;
33269 - if (index < 0 || index > 0x7f)
33270 + if (index > 0x7f)
33271 return -EINVAL;
33272 pos += tmp;
33273
33274 diff -urNp linux-3.0.3/drivers/usb/atm/usbatm.c linux-3.0.3/drivers/usb/atm/usbatm.c
33275 --- linux-3.0.3/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33276 +++ linux-3.0.3/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33277 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33278 if (printk_ratelimit())
33279 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33280 __func__, vpi, vci);
33281 - atomic_inc(&vcc->stats->rx_err);
33282 + atomic_inc_unchecked(&vcc->stats->rx_err);
33283 return;
33284 }
33285
33286 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33287 if (length > ATM_MAX_AAL5_PDU) {
33288 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33289 __func__, length, vcc);
33290 - atomic_inc(&vcc->stats->rx_err);
33291 + atomic_inc_unchecked(&vcc->stats->rx_err);
33292 goto out;
33293 }
33294
33295 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33296 if (sarb->len < pdu_length) {
33297 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33298 __func__, pdu_length, sarb->len, vcc);
33299 - atomic_inc(&vcc->stats->rx_err);
33300 + atomic_inc_unchecked(&vcc->stats->rx_err);
33301 goto out;
33302 }
33303
33304 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33305 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33306 __func__, vcc);
33307 - atomic_inc(&vcc->stats->rx_err);
33308 + atomic_inc_unchecked(&vcc->stats->rx_err);
33309 goto out;
33310 }
33311
33312 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33313 if (printk_ratelimit())
33314 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33315 __func__, length);
33316 - atomic_inc(&vcc->stats->rx_drop);
33317 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33318 goto out;
33319 }
33320
33321 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33322
33323 vcc->push(vcc, skb);
33324
33325 - atomic_inc(&vcc->stats->rx);
33326 + atomic_inc_unchecked(&vcc->stats->rx);
33327 out:
33328 skb_trim(sarb, 0);
33329 }
33330 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33331 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33332
33333 usbatm_pop(vcc, skb);
33334 - atomic_inc(&vcc->stats->tx);
33335 + atomic_inc_unchecked(&vcc->stats->tx);
33336
33337 skb = skb_dequeue(&instance->sndqueue);
33338 }
33339 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33340 if (!left--)
33341 return sprintf(page,
33342 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33343 - atomic_read(&atm_dev->stats.aal5.tx),
33344 - atomic_read(&atm_dev->stats.aal5.tx_err),
33345 - atomic_read(&atm_dev->stats.aal5.rx),
33346 - atomic_read(&atm_dev->stats.aal5.rx_err),
33347 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33348 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33349 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33350 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33351 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33352 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33353
33354 if (!left--) {
33355 if (instance->disconnected)
33356 diff -urNp linux-3.0.3/drivers/usb/core/devices.c linux-3.0.3/drivers/usb/core/devices.c
33357 --- linux-3.0.3/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33358 +++ linux-3.0.3/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33359 @@ -126,7 +126,7 @@ static const char format_endpt[] =
33360 * time it gets called.
33361 */
33362 static struct device_connect_event {
33363 - atomic_t count;
33364 + atomic_unchecked_t count;
33365 wait_queue_head_t wait;
33366 } device_event = {
33367 .count = ATOMIC_INIT(1),
33368 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33369
33370 void usbfs_conn_disc_event(void)
33371 {
33372 - atomic_add(2, &device_event.count);
33373 + atomic_add_unchecked(2, &device_event.count);
33374 wake_up(&device_event.wait);
33375 }
33376
33377 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33378
33379 poll_wait(file, &device_event.wait, wait);
33380
33381 - event_count = atomic_read(&device_event.count);
33382 + event_count = atomic_read_unchecked(&device_event.count);
33383 if (file->f_version != event_count) {
33384 file->f_version = event_count;
33385 return POLLIN | POLLRDNORM;
33386 diff -urNp linux-3.0.3/drivers/usb/core/message.c linux-3.0.3/drivers/usb/core/message.c
33387 --- linux-3.0.3/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33388 +++ linux-3.0.3/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33389 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33390 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33391 if (buf) {
33392 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33393 - if (len > 0) {
33394 - smallbuf = kmalloc(++len, GFP_NOIO);
33395 + if (len++ > 0) {
33396 + smallbuf = kmalloc(len, GFP_NOIO);
33397 if (!smallbuf)
33398 return buf;
33399 memcpy(smallbuf, buf, len);
33400 diff -urNp linux-3.0.3/drivers/usb/early/ehci-dbgp.c linux-3.0.3/drivers/usb/early/ehci-dbgp.c
33401 --- linux-3.0.3/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33402 +++ linux-3.0.3/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33403 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33404
33405 #ifdef CONFIG_KGDB
33406 static struct kgdb_io kgdbdbgp_io_ops;
33407 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33408 +static struct kgdb_io kgdbdbgp_io_ops_console;
33409 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33410 #else
33411 #define dbgp_kgdb_mode (0)
33412 #endif
33413 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33414 .write_char = kgdbdbgp_write_char,
33415 };
33416
33417 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33418 + .name = "kgdbdbgp",
33419 + .read_char = kgdbdbgp_read_char,
33420 + .write_char = kgdbdbgp_write_char,
33421 + .is_console = 1
33422 +};
33423 +
33424 static int kgdbdbgp_wait_time;
33425
33426 static int __init kgdbdbgp_parse_config(char *str)
33427 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33428 ptr++;
33429 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33430 }
33431 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33432 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33433 + if (early_dbgp_console.index != -1)
33434 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33435 + else
33436 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33437
33438 return 0;
33439 }
33440 diff -urNp linux-3.0.3/drivers/usb/host/xhci-mem.c linux-3.0.3/drivers/usb/host/xhci-mem.c
33441 --- linux-3.0.3/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33442 +++ linux-3.0.3/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33443 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33444 unsigned int num_tests;
33445 int i, ret;
33446
33447 + pax_track_stack();
33448 +
33449 num_tests = ARRAY_SIZE(simple_test_vector);
33450 for (i = 0; i < num_tests; i++) {
33451 ret = xhci_test_trb_in_td(xhci,
33452 diff -urNp linux-3.0.3/drivers/usb/wusbcore/wa-hc.h linux-3.0.3/drivers/usb/wusbcore/wa-hc.h
33453 --- linux-3.0.3/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33454 +++ linux-3.0.3/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33455 @@ -192,7 +192,7 @@ struct wahc {
33456 struct list_head xfer_delayed_list;
33457 spinlock_t xfer_list_lock;
33458 struct work_struct xfer_work;
33459 - atomic_t xfer_id_count;
33460 + atomic_unchecked_t xfer_id_count;
33461 };
33462
33463
33464 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33465 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33466 spin_lock_init(&wa->xfer_list_lock);
33467 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33468 - atomic_set(&wa->xfer_id_count, 1);
33469 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33470 }
33471
33472 /**
33473 diff -urNp linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c
33474 --- linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
33475 +++ linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
33476 @@ -294,7 +294,7 @@ out:
33477 */
33478 static void wa_xfer_id_init(struct wa_xfer *xfer)
33479 {
33480 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33481 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33482 }
33483
33484 /*
33485 diff -urNp linux-3.0.3/drivers/vhost/vhost.c linux-3.0.3/drivers/vhost/vhost.c
33486 --- linux-3.0.3/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
33487 +++ linux-3.0.3/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
33488 @@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
33489 return get_user(vq->last_used_idx, &used->idx);
33490 }
33491
33492 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33493 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33494 {
33495 struct file *eventfp, *filep = NULL,
33496 *pollstart = NULL, *pollstop = NULL;
33497 diff -urNp linux-3.0.3/drivers/video/fbcmap.c linux-3.0.3/drivers/video/fbcmap.c
33498 --- linux-3.0.3/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
33499 +++ linux-3.0.3/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
33500 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33501 rc = -ENODEV;
33502 goto out;
33503 }
33504 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33505 - !info->fbops->fb_setcmap)) {
33506 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33507 rc = -EINVAL;
33508 goto out1;
33509 }
33510 diff -urNp linux-3.0.3/drivers/video/fbmem.c linux-3.0.3/drivers/video/fbmem.c
33511 --- linux-3.0.3/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
33512 +++ linux-3.0.3/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
33513 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33514 image->dx += image->width + 8;
33515 }
33516 } else if (rotate == FB_ROTATE_UD) {
33517 - for (x = 0; x < num && image->dx >= 0; x++) {
33518 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33519 info->fbops->fb_imageblit(info, image);
33520 image->dx -= image->width + 8;
33521 }
33522 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33523 image->dy += image->height + 8;
33524 }
33525 } else if (rotate == FB_ROTATE_CCW) {
33526 - for (x = 0; x < num && image->dy >= 0; x++) {
33527 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33528 info->fbops->fb_imageblit(info, image);
33529 image->dy -= image->height + 8;
33530 }
33531 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33532 int flags = info->flags;
33533 int ret = 0;
33534
33535 + pax_track_stack();
33536 +
33537 if (var->activate & FB_ACTIVATE_INV_MODE) {
33538 struct fb_videomode mode1, mode2;
33539
33540 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33541 void __user *argp = (void __user *)arg;
33542 long ret = 0;
33543
33544 + pax_track_stack();
33545 +
33546 switch (cmd) {
33547 case FBIOGET_VSCREENINFO:
33548 if (!lock_fb_info(info))
33549 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33550 return -EFAULT;
33551 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33552 return -EINVAL;
33553 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33554 + if (con2fb.framebuffer >= FB_MAX)
33555 return -EINVAL;
33556 if (!registered_fb[con2fb.framebuffer])
33557 request_module("fb%d", con2fb.framebuffer);
33558 diff -urNp linux-3.0.3/drivers/video/i810/i810_accel.c linux-3.0.3/drivers/video/i810/i810_accel.c
33559 --- linux-3.0.3/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
33560 +++ linux-3.0.3/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
33561 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33562 }
33563 }
33564 printk("ringbuffer lockup!!!\n");
33565 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33566 i810_report_error(mmio);
33567 par->dev_flags |= LOCKUP;
33568 info->pixmap.scan_align = 1;
33569 diff -urNp linux-3.0.3/drivers/video/udlfb.c linux-3.0.3/drivers/video/udlfb.c
33570 --- linux-3.0.3/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
33571 +++ linux-3.0.3/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
33572 @@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
33573 dlfb_urb_completion(urb);
33574
33575 error:
33576 - atomic_add(bytes_sent, &dev->bytes_sent);
33577 - atomic_add(bytes_identical, &dev->bytes_identical);
33578 - atomic_add(width*height*2, &dev->bytes_rendered);
33579 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33580 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33581 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33582 end_cycles = get_cycles();
33583 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33584 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33585 >> 10)), /* Kcycles */
33586 &dev->cpu_kcycles_used);
33587
33588 @@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
33589 dlfb_urb_completion(urb);
33590
33591 error:
33592 - atomic_add(bytes_sent, &dev->bytes_sent);
33593 - atomic_add(bytes_identical, &dev->bytes_identical);
33594 - atomic_add(bytes_rendered, &dev->bytes_rendered);
33595 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33596 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33597 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33598 end_cycles = get_cycles();
33599 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33600 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33601 >> 10)), /* Kcycles */
33602 &dev->cpu_kcycles_used);
33603 }
33604 @@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
33605 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33606 struct dlfb_data *dev = fb_info->par;
33607 return snprintf(buf, PAGE_SIZE, "%u\n",
33608 - atomic_read(&dev->bytes_rendered));
33609 + atomic_read_unchecked(&dev->bytes_rendered));
33610 }
33611
33612 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33613 @@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
33614 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33615 struct dlfb_data *dev = fb_info->par;
33616 return snprintf(buf, PAGE_SIZE, "%u\n",
33617 - atomic_read(&dev->bytes_identical));
33618 + atomic_read_unchecked(&dev->bytes_identical));
33619 }
33620
33621 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33622 @@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
33623 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33624 struct dlfb_data *dev = fb_info->par;
33625 return snprintf(buf, PAGE_SIZE, "%u\n",
33626 - atomic_read(&dev->bytes_sent));
33627 + atomic_read_unchecked(&dev->bytes_sent));
33628 }
33629
33630 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33631 @@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
33632 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33633 struct dlfb_data *dev = fb_info->par;
33634 return snprintf(buf, PAGE_SIZE, "%u\n",
33635 - atomic_read(&dev->cpu_kcycles_used));
33636 + atomic_read_unchecked(&dev->cpu_kcycles_used));
33637 }
33638
33639 static ssize_t edid_show(
33640 @@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
33641 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33642 struct dlfb_data *dev = fb_info->par;
33643
33644 - atomic_set(&dev->bytes_rendered, 0);
33645 - atomic_set(&dev->bytes_identical, 0);
33646 - atomic_set(&dev->bytes_sent, 0);
33647 - atomic_set(&dev->cpu_kcycles_used, 0);
33648 + atomic_set_unchecked(&dev->bytes_rendered, 0);
33649 + atomic_set_unchecked(&dev->bytes_identical, 0);
33650 + atomic_set_unchecked(&dev->bytes_sent, 0);
33651 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33652
33653 return count;
33654 }
33655 diff -urNp linux-3.0.3/drivers/video/uvesafb.c linux-3.0.3/drivers/video/uvesafb.c
33656 --- linux-3.0.3/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
33657 +++ linux-3.0.3/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
33658 @@ -19,6 +19,7 @@
33659 #include <linux/io.h>
33660 #include <linux/mutex.h>
33661 #include <linux/slab.h>
33662 +#include <linux/moduleloader.h>
33663 #include <video/edid.h>
33664 #include <video/uvesafb.h>
33665 #ifdef CONFIG_X86
33666 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33667 NULL,
33668 };
33669
33670 - return call_usermodehelper(v86d_path, argv, envp, 1);
33671 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33672 }
33673
33674 /*
33675 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33676 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33677 par->pmi_setpal = par->ypan = 0;
33678 } else {
33679 +
33680 +#ifdef CONFIG_PAX_KERNEXEC
33681 +#ifdef CONFIG_MODULES
33682 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33683 +#endif
33684 + if (!par->pmi_code) {
33685 + par->pmi_setpal = par->ypan = 0;
33686 + return 0;
33687 + }
33688 +#endif
33689 +
33690 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33691 + task->t.regs.edi);
33692 +
33693 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33694 + pax_open_kernel();
33695 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33696 + pax_close_kernel();
33697 +
33698 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33699 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33700 +#else
33701 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33702 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33703 +#endif
33704 +
33705 printk(KERN_INFO "uvesafb: protected mode interface info at "
33706 "%04x:%04x\n",
33707 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33708 @@ -1821,6 +1844,11 @@ out:
33709 if (par->vbe_modes)
33710 kfree(par->vbe_modes);
33711
33712 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33713 + if (par->pmi_code)
33714 + module_free_exec(NULL, par->pmi_code);
33715 +#endif
33716 +
33717 framebuffer_release(info);
33718 return err;
33719 }
33720 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33721 kfree(par->vbe_state_orig);
33722 if (par->vbe_state_saved)
33723 kfree(par->vbe_state_saved);
33724 +
33725 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33726 + if (par->pmi_code)
33727 + module_free_exec(NULL, par->pmi_code);
33728 +#endif
33729 +
33730 }
33731
33732 framebuffer_release(info);
33733 diff -urNp linux-3.0.3/drivers/video/vesafb.c linux-3.0.3/drivers/video/vesafb.c
33734 --- linux-3.0.3/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
33735 +++ linux-3.0.3/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
33736 @@ -9,6 +9,7 @@
33737 */
33738
33739 #include <linux/module.h>
33740 +#include <linux/moduleloader.h>
33741 #include <linux/kernel.h>
33742 #include <linux/errno.h>
33743 #include <linux/string.h>
33744 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33745 static int vram_total __initdata; /* Set total amount of memory */
33746 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33747 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33748 -static void (*pmi_start)(void) __read_mostly;
33749 -static void (*pmi_pal) (void) __read_mostly;
33750 +static void (*pmi_start)(void) __read_only;
33751 +static void (*pmi_pal) (void) __read_only;
33752 static int depth __read_mostly;
33753 static int vga_compat __read_mostly;
33754 /* --------------------------------------------------------------------- */
33755 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
33756 unsigned int size_vmode;
33757 unsigned int size_remap;
33758 unsigned int size_total;
33759 + void *pmi_code = NULL;
33760
33761 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33762 return -ENODEV;
33763 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
33764 size_remap = size_total;
33765 vesafb_fix.smem_len = size_remap;
33766
33767 -#ifndef __i386__
33768 - screen_info.vesapm_seg = 0;
33769 -#endif
33770 -
33771 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33772 printk(KERN_WARNING
33773 "vesafb: cannot reserve video memory at 0x%lx\n",
33774 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
33775 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33776 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33777
33778 +#ifdef __i386__
33779 +
33780 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33781 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
33782 + if (!pmi_code)
33783 +#elif !defined(CONFIG_PAX_KERNEXEC)
33784 + if (0)
33785 +#endif
33786 +
33787 +#endif
33788 + screen_info.vesapm_seg = 0;
33789 +
33790 if (screen_info.vesapm_seg) {
33791 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33792 - screen_info.vesapm_seg,screen_info.vesapm_off);
33793 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33794 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33795 }
33796
33797 if (screen_info.vesapm_seg < 0xc000)
33798 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
33799
33800 if (ypan || pmi_setpal) {
33801 unsigned short *pmi_base;
33802 +
33803 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33804 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33805 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33806 +
33807 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33808 + pax_open_kernel();
33809 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33810 +#else
33811 + pmi_code = pmi_base;
33812 +#endif
33813 +
33814 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33815 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33816 +
33817 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33818 + pmi_start = ktva_ktla(pmi_start);
33819 + pmi_pal = ktva_ktla(pmi_pal);
33820 + pax_close_kernel();
33821 +#endif
33822 +
33823 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33824 if (pmi_base[3]) {
33825 printk(KERN_INFO "vesafb: pmi: ports = ");
33826 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
33827 info->node, info->fix.id);
33828 return 0;
33829 err:
33830 +
33831 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33832 + module_free_exec(NULL, pmi_code);
33833 +#endif
33834 +
33835 if (info->screen_base)
33836 iounmap(info->screen_base);
33837 framebuffer_release(info);
33838 diff -urNp linux-3.0.3/drivers/video/via/via_clock.h linux-3.0.3/drivers/video/via/via_clock.h
33839 --- linux-3.0.3/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
33840 +++ linux-3.0.3/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
33841 @@ -56,7 +56,7 @@ struct via_clock {
33842
33843 void (*set_engine_pll_state)(u8 state);
33844 void (*set_engine_pll)(struct via_pll_config config);
33845 -};
33846 +} __no_const;
33847
33848
33849 static inline u32 get_pll_internal_frequency(u32 ref_freq,
33850 diff -urNp linux-3.0.3/drivers/virtio/virtio_balloon.c linux-3.0.3/drivers/virtio/virtio_balloon.c
33851 --- linux-3.0.3/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
33852 +++ linux-3.0.3/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
33853 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct
33854 struct sysinfo i;
33855 int idx = 0;
33856
33857 + pax_track_stack();
33858 +
33859 all_vm_events(events);
33860 si_meminfo(&i);
33861
33862 diff -urNp linux-3.0.3/fs/9p/vfs_inode.c linux-3.0.3/fs/9p/vfs_inode.c
33863 --- linux-3.0.3/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
33864 +++ linux-3.0.3/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
33865 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33866 void
33867 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33868 {
33869 - char *s = nd_get_link(nd);
33870 + const char *s = nd_get_link(nd);
33871
33872 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33873 IS_ERR(s) ? "<error>" : s);
33874 diff -urNp linux-3.0.3/fs/aio.c linux-3.0.3/fs/aio.c
33875 --- linux-3.0.3/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
33876 +++ linux-3.0.3/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
33877 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33878 size += sizeof(struct io_event) * nr_events;
33879 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33880
33881 - if (nr_pages < 0)
33882 + if (nr_pages <= 0)
33883 return -EINVAL;
33884
33885 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33886 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33887 struct aio_timeout to;
33888 int retry = 0;
33889
33890 + pax_track_stack();
33891 +
33892 /* needed to zero any padding within an entry (there shouldn't be
33893 * any, but C is fun!
33894 */
33895 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33896 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33897 {
33898 ssize_t ret;
33899 + struct iovec iovstack;
33900
33901 #ifdef CONFIG_COMPAT
33902 if (compat)
33903 ret = compat_rw_copy_check_uvector(type,
33904 (struct compat_iovec __user *)kiocb->ki_buf,
33905 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33906 + kiocb->ki_nbytes, 1, &iovstack,
33907 &kiocb->ki_iovec);
33908 else
33909 #endif
33910 ret = rw_copy_check_uvector(type,
33911 (struct iovec __user *)kiocb->ki_buf,
33912 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33913 + kiocb->ki_nbytes, 1, &iovstack,
33914 &kiocb->ki_iovec);
33915 if (ret < 0)
33916 goto out;
33917
33918 + if (kiocb->ki_iovec == &iovstack) {
33919 + kiocb->ki_inline_vec = iovstack;
33920 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
33921 + }
33922 kiocb->ki_nr_segs = kiocb->ki_nbytes;
33923 kiocb->ki_cur_seg = 0;
33924 /* ki_nbytes/left now reflect bytes instead of segs */
33925 diff -urNp linux-3.0.3/fs/attr.c linux-3.0.3/fs/attr.c
33926 --- linux-3.0.3/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
33927 +++ linux-3.0.3/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
33928 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
33929 unsigned long limit;
33930
33931 limit = rlimit(RLIMIT_FSIZE);
33932 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
33933 if (limit != RLIM_INFINITY && offset > limit)
33934 goto out_sig;
33935 if (offset > inode->i_sb->s_maxbytes)
33936 diff -urNp linux-3.0.3/fs/befs/linuxvfs.c linux-3.0.3/fs/befs/linuxvfs.c
33937 --- linux-3.0.3/fs/befs/linuxvfs.c 2011-07-21 22:17:23.000000000 -0400
33938 +++ linux-3.0.3/fs/befs/linuxvfs.c 2011-08-23 21:47:56.000000000 -0400
33939 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
33940 {
33941 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
33942 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
33943 - char *link = nd_get_link(nd);
33944 + const char *link = nd_get_link(nd);
33945 if (!IS_ERR(link))
33946 kfree(link);
33947 }
33948 diff -urNp linux-3.0.3/fs/binfmt_aout.c linux-3.0.3/fs/binfmt_aout.c
33949 --- linux-3.0.3/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
33950 +++ linux-3.0.3/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
33951 @@ -16,6 +16,7 @@
33952 #include <linux/string.h>
33953 #include <linux/fs.h>
33954 #include <linux/file.h>
33955 +#include <linux/security.h>
33956 #include <linux/stat.h>
33957 #include <linux/fcntl.h>
33958 #include <linux/ptrace.h>
33959 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
33960 #endif
33961 # define START_STACK(u) ((void __user *)u.start_stack)
33962
33963 + memset(&dump, 0, sizeof(dump));
33964 +
33965 fs = get_fs();
33966 set_fs(KERNEL_DS);
33967 has_dumped = 1;
33968 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
33969
33970 /* If the size of the dump file exceeds the rlimit, then see what would happen
33971 if we wrote the stack, but not the data area. */
33972 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
33973 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
33974 dump.u_dsize = 0;
33975
33976 /* Make sure we have enough room to write the stack and data areas. */
33977 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
33978 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
33979 dump.u_ssize = 0;
33980
33981 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
33982 rlim = rlimit(RLIMIT_DATA);
33983 if (rlim >= RLIM_INFINITY)
33984 rlim = ~0;
33985 +
33986 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
33987 if (ex.a_data + ex.a_bss > rlim)
33988 return -ENOMEM;
33989
33990 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
33991 install_exec_creds(bprm);
33992 current->flags &= ~PF_FORKNOEXEC;
33993
33994 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
33995 + current->mm->pax_flags = 0UL;
33996 +#endif
33997 +
33998 +#ifdef CONFIG_PAX_PAGEEXEC
33999 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
34000 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
34001 +
34002 +#ifdef CONFIG_PAX_EMUTRAMP
34003 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
34004 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
34005 +#endif
34006 +
34007 +#ifdef CONFIG_PAX_MPROTECT
34008 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
34009 + current->mm->pax_flags |= MF_PAX_MPROTECT;
34010 +#endif
34011 +
34012 + }
34013 +#endif
34014 +
34015 if (N_MAGIC(ex) == OMAGIC) {
34016 unsigned long text_addr, map_size;
34017 loff_t pos;
34018 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
34019
34020 down_write(&current->mm->mmap_sem);
34021 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
34022 - PROT_READ | PROT_WRITE | PROT_EXEC,
34023 + PROT_READ | PROT_WRITE,
34024 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
34025 fd_offset + ex.a_text);
34026 up_write(&current->mm->mmap_sem);
34027 diff -urNp linux-3.0.3/fs/binfmt_elf.c linux-3.0.3/fs/binfmt_elf.c
34028 --- linux-3.0.3/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
34029 +++ linux-3.0.3/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
34030 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
34031 #define elf_core_dump NULL
34032 #endif
34033
34034 +#ifdef CONFIG_PAX_MPROTECT
34035 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34036 +#endif
34037 +
34038 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34039 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34040 #else
34041 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34042 .load_binary = load_elf_binary,
34043 .load_shlib = load_elf_library,
34044 .core_dump = elf_core_dump,
34045 +
34046 +#ifdef CONFIG_PAX_MPROTECT
34047 + .handle_mprotect= elf_handle_mprotect,
34048 +#endif
34049 +
34050 .min_coredump = ELF_EXEC_PAGESIZE,
34051 };
34052
34053 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34054
34055 static int set_brk(unsigned long start, unsigned long end)
34056 {
34057 + unsigned long e = end;
34058 +
34059 start = ELF_PAGEALIGN(start);
34060 end = ELF_PAGEALIGN(end);
34061 if (end > start) {
34062 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34063 if (BAD_ADDR(addr))
34064 return addr;
34065 }
34066 - current->mm->start_brk = current->mm->brk = end;
34067 + current->mm->start_brk = current->mm->brk = e;
34068 return 0;
34069 }
34070
34071 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34072 elf_addr_t __user *u_rand_bytes;
34073 const char *k_platform = ELF_PLATFORM;
34074 const char *k_base_platform = ELF_BASE_PLATFORM;
34075 - unsigned char k_rand_bytes[16];
34076 + u32 k_rand_bytes[4];
34077 int items;
34078 elf_addr_t *elf_info;
34079 int ei_index = 0;
34080 const struct cred *cred = current_cred();
34081 struct vm_area_struct *vma;
34082 + unsigned long saved_auxv[AT_VECTOR_SIZE];
34083 +
34084 + pax_track_stack();
34085
34086 /*
34087 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34088 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34089 * Generate 16 random bytes for userspace PRNG seeding.
34090 */
34091 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34092 - u_rand_bytes = (elf_addr_t __user *)
34093 - STACK_ALLOC(p, sizeof(k_rand_bytes));
34094 + srandom32(k_rand_bytes[0] ^ random32());
34095 + srandom32(k_rand_bytes[1] ^ random32());
34096 + srandom32(k_rand_bytes[2] ^ random32());
34097 + srandom32(k_rand_bytes[3] ^ random32());
34098 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
34099 + u_rand_bytes = (elf_addr_t __user *) p;
34100 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34101 return -EFAULT;
34102
34103 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34104 return -EFAULT;
34105 current->mm->env_end = p;
34106
34107 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34108 +
34109 /* Put the elf_info on the stack in the right place. */
34110 sp = (elf_addr_t __user *)envp + 1;
34111 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34112 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34113 return -EFAULT;
34114 return 0;
34115 }
34116 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34117 {
34118 struct elf_phdr *elf_phdata;
34119 struct elf_phdr *eppnt;
34120 - unsigned long load_addr = 0;
34121 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34122 int load_addr_set = 0;
34123 unsigned long last_bss = 0, elf_bss = 0;
34124 - unsigned long error = ~0UL;
34125 + unsigned long error = -EINVAL;
34126 unsigned long total_size;
34127 int retval, i, size;
34128
34129 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34130 goto out_close;
34131 }
34132
34133 +#ifdef CONFIG_PAX_SEGMEXEC
34134 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34135 + pax_task_size = SEGMEXEC_TASK_SIZE;
34136 +#endif
34137 +
34138 eppnt = elf_phdata;
34139 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34140 if (eppnt->p_type == PT_LOAD) {
34141 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34142 k = load_addr + eppnt->p_vaddr;
34143 if (BAD_ADDR(k) ||
34144 eppnt->p_filesz > eppnt->p_memsz ||
34145 - eppnt->p_memsz > TASK_SIZE ||
34146 - TASK_SIZE - eppnt->p_memsz < k) {
34147 + eppnt->p_memsz > pax_task_size ||
34148 + pax_task_size - eppnt->p_memsz < k) {
34149 error = -ENOMEM;
34150 goto out_close;
34151 }
34152 @@ -528,6 +553,193 @@ out:
34153 return error;
34154 }
34155
34156 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34157 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34158 +{
34159 + unsigned long pax_flags = 0UL;
34160 +
34161 +#ifdef CONFIG_PAX_PAGEEXEC
34162 + if (elf_phdata->p_flags & PF_PAGEEXEC)
34163 + pax_flags |= MF_PAX_PAGEEXEC;
34164 +#endif
34165 +
34166 +#ifdef CONFIG_PAX_SEGMEXEC
34167 + if (elf_phdata->p_flags & PF_SEGMEXEC)
34168 + pax_flags |= MF_PAX_SEGMEXEC;
34169 +#endif
34170 +
34171 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34172 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34173 + if ((__supported_pte_mask & _PAGE_NX))
34174 + pax_flags &= ~MF_PAX_SEGMEXEC;
34175 + else
34176 + pax_flags &= ~MF_PAX_PAGEEXEC;
34177 + }
34178 +#endif
34179 +
34180 +#ifdef CONFIG_PAX_EMUTRAMP
34181 + if (elf_phdata->p_flags & PF_EMUTRAMP)
34182 + pax_flags |= MF_PAX_EMUTRAMP;
34183 +#endif
34184 +
34185 +#ifdef CONFIG_PAX_MPROTECT
34186 + if (elf_phdata->p_flags & PF_MPROTECT)
34187 + pax_flags |= MF_PAX_MPROTECT;
34188 +#endif
34189 +
34190 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34191 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34192 + pax_flags |= MF_PAX_RANDMMAP;
34193 +#endif
34194 +
34195 + return pax_flags;
34196 +}
34197 +#endif
34198 +
34199 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34200 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34201 +{
34202 + unsigned long pax_flags = 0UL;
34203 +
34204 +#ifdef CONFIG_PAX_PAGEEXEC
34205 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34206 + pax_flags |= MF_PAX_PAGEEXEC;
34207 +#endif
34208 +
34209 +#ifdef CONFIG_PAX_SEGMEXEC
34210 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34211 + pax_flags |= MF_PAX_SEGMEXEC;
34212 +#endif
34213 +
34214 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34215 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34216 + if ((__supported_pte_mask & _PAGE_NX))
34217 + pax_flags &= ~MF_PAX_SEGMEXEC;
34218 + else
34219 + pax_flags &= ~MF_PAX_PAGEEXEC;
34220 + }
34221 +#endif
34222 +
34223 +#ifdef CONFIG_PAX_EMUTRAMP
34224 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34225 + pax_flags |= MF_PAX_EMUTRAMP;
34226 +#endif
34227 +
34228 +#ifdef CONFIG_PAX_MPROTECT
34229 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34230 + pax_flags |= MF_PAX_MPROTECT;
34231 +#endif
34232 +
34233 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34234 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34235 + pax_flags |= MF_PAX_RANDMMAP;
34236 +#endif
34237 +
34238 + return pax_flags;
34239 +}
34240 +#endif
34241 +
34242 +#ifdef CONFIG_PAX_EI_PAX
34243 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34244 +{
34245 + unsigned long pax_flags = 0UL;
34246 +
34247 +#ifdef CONFIG_PAX_PAGEEXEC
34248 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34249 + pax_flags |= MF_PAX_PAGEEXEC;
34250 +#endif
34251 +
34252 +#ifdef CONFIG_PAX_SEGMEXEC
34253 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34254 + pax_flags |= MF_PAX_SEGMEXEC;
34255 +#endif
34256 +
34257 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34258 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34259 + if ((__supported_pte_mask & _PAGE_NX))
34260 + pax_flags &= ~MF_PAX_SEGMEXEC;
34261 + else
34262 + pax_flags &= ~MF_PAX_PAGEEXEC;
34263 + }
34264 +#endif
34265 +
34266 +#ifdef CONFIG_PAX_EMUTRAMP
34267 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34268 + pax_flags |= MF_PAX_EMUTRAMP;
34269 +#endif
34270 +
34271 +#ifdef CONFIG_PAX_MPROTECT
34272 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34273 + pax_flags |= MF_PAX_MPROTECT;
34274 +#endif
34275 +
34276 +#ifdef CONFIG_PAX_ASLR
34277 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34278 + pax_flags |= MF_PAX_RANDMMAP;
34279 +#endif
34280 +
34281 + return pax_flags;
34282 +}
34283 +#endif
34284 +
34285 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34286 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34287 +{
34288 + unsigned long pax_flags = 0UL;
34289 +
34290 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34291 + unsigned long i;
34292 + int found_flags = 0;
34293 +#endif
34294 +
34295 +#ifdef CONFIG_PAX_EI_PAX
34296 + pax_flags = pax_parse_ei_pax(elf_ex);
34297 +#endif
34298 +
34299 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34300 + for (i = 0UL; i < elf_ex->e_phnum; i++)
34301 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34302 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34303 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34304 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34305 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34306 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34307 + return -EINVAL;
34308 +
34309 +#ifdef CONFIG_PAX_SOFTMODE
34310 + if (pax_softmode)
34311 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
34312 + else
34313 +#endif
34314 +
34315 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34316 + found_flags = 1;
34317 + break;
34318 + }
34319 +#endif
34320 +
34321 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34322 + if (found_flags == 0) {
34323 + struct elf_phdr phdr;
34324 + memset(&phdr, 0, sizeof(phdr));
34325 + phdr.p_flags = PF_NOEMUTRAMP;
34326 +#ifdef CONFIG_PAX_SOFTMODE
34327 + if (pax_softmode)
34328 + pax_flags = pax_parse_softmode(&phdr);
34329 + else
34330 +#endif
34331 + pax_flags = pax_parse_hardmode(&phdr);
34332 + }
34333 +#endif
34334 +
34335 + if (0 > pax_check_flags(&pax_flags))
34336 + return -EINVAL;
34337 +
34338 + current->mm->pax_flags = pax_flags;
34339 + return 0;
34340 +}
34341 +#endif
34342 +
34343 /*
34344 * These are the functions used to load ELF style executables and shared
34345 * libraries. There is no binary dependent code anywhere else.
34346 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34347 {
34348 unsigned int random_variable = 0;
34349
34350 +#ifdef CONFIG_PAX_RANDUSTACK
34351 + if (randomize_va_space)
34352 + return stack_top - current->mm->delta_stack;
34353 +#endif
34354 +
34355 if ((current->flags & PF_RANDOMIZE) &&
34356 !(current->personality & ADDR_NO_RANDOMIZE)) {
34357 random_variable = get_random_int() & STACK_RND_MASK;
34358 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34359 unsigned long load_addr = 0, load_bias = 0;
34360 int load_addr_set = 0;
34361 char * elf_interpreter = NULL;
34362 - unsigned long error;
34363 + unsigned long error = 0;
34364 struct elf_phdr *elf_ppnt, *elf_phdata;
34365 unsigned long elf_bss, elf_brk;
34366 int retval, i;
34367 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34368 unsigned long start_code, end_code, start_data, end_data;
34369 unsigned long reloc_func_desc __maybe_unused = 0;
34370 int executable_stack = EXSTACK_DEFAULT;
34371 - unsigned long def_flags = 0;
34372 struct {
34373 struct elfhdr elf_ex;
34374 struct elfhdr interp_elf_ex;
34375 } *loc;
34376 + unsigned long pax_task_size = TASK_SIZE;
34377
34378 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34379 if (!loc) {
34380 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34381
34382 /* OK, This is the point of no return */
34383 current->flags &= ~PF_FORKNOEXEC;
34384 - current->mm->def_flags = def_flags;
34385 +
34386 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34387 + current->mm->pax_flags = 0UL;
34388 +#endif
34389 +
34390 +#ifdef CONFIG_PAX_DLRESOLVE
34391 + current->mm->call_dl_resolve = 0UL;
34392 +#endif
34393 +
34394 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34395 + current->mm->call_syscall = 0UL;
34396 +#endif
34397 +
34398 +#ifdef CONFIG_PAX_ASLR
34399 + current->mm->delta_mmap = 0UL;
34400 + current->mm->delta_stack = 0UL;
34401 +#endif
34402 +
34403 + current->mm->def_flags = 0;
34404 +
34405 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34406 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34407 + send_sig(SIGKILL, current, 0);
34408 + goto out_free_dentry;
34409 + }
34410 +#endif
34411 +
34412 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34413 + pax_set_initial_flags(bprm);
34414 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34415 + if (pax_set_initial_flags_func)
34416 + (pax_set_initial_flags_func)(bprm);
34417 +#endif
34418 +
34419 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34420 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34421 + current->mm->context.user_cs_limit = PAGE_SIZE;
34422 + current->mm->def_flags |= VM_PAGEEXEC;
34423 + }
34424 +#endif
34425 +
34426 +#ifdef CONFIG_PAX_SEGMEXEC
34427 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34428 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34429 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34430 + pax_task_size = SEGMEXEC_TASK_SIZE;
34431 + current->mm->def_flags |= VM_NOHUGEPAGE;
34432 + }
34433 +#endif
34434 +
34435 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34436 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34437 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34438 + put_cpu();
34439 + }
34440 +#endif
34441
34442 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34443 may depend on the personality. */
34444 SET_PERSONALITY(loc->elf_ex);
34445 +
34446 +#ifdef CONFIG_PAX_ASLR
34447 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34448 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34449 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34450 + }
34451 +#endif
34452 +
34453 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34454 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34455 + executable_stack = EXSTACK_DISABLE_X;
34456 + current->personality &= ~READ_IMPLIES_EXEC;
34457 + } else
34458 +#endif
34459 +
34460 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34461 current->personality |= READ_IMPLIES_EXEC;
34462
34463 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34464 #else
34465 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34466 #endif
34467 +
34468 +#ifdef CONFIG_PAX_RANDMMAP
34469 + /* PaX: randomize base address at the default exe base if requested */
34470 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34471 +#ifdef CONFIG_SPARC64
34472 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34473 +#else
34474 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34475 +#endif
34476 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34477 + elf_flags |= MAP_FIXED;
34478 + }
34479 +#endif
34480 +
34481 }
34482
34483 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34484 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34485 * allowed task size. Note that p_filesz must always be
34486 * <= p_memsz so it is only necessary to check p_memsz.
34487 */
34488 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34489 - elf_ppnt->p_memsz > TASK_SIZE ||
34490 - TASK_SIZE - elf_ppnt->p_memsz < k) {
34491 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34492 + elf_ppnt->p_memsz > pax_task_size ||
34493 + pax_task_size - elf_ppnt->p_memsz < k) {
34494 /* set_brk can never work. Avoid overflows. */
34495 send_sig(SIGKILL, current, 0);
34496 retval = -EINVAL;
34497 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34498 start_data += load_bias;
34499 end_data += load_bias;
34500
34501 +#ifdef CONFIG_PAX_RANDMMAP
34502 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34503 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34504 +#endif
34505 +
34506 /* Calling set_brk effectively mmaps the pages that we need
34507 * for the bss and break sections. We must do this before
34508 * mapping in the interpreter, to make sure it doesn't wind
34509 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34510 goto out_free_dentry;
34511 }
34512 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34513 - send_sig(SIGSEGV, current, 0);
34514 - retval = -EFAULT; /* Nobody gets to see this, but.. */
34515 - goto out_free_dentry;
34516 + /*
34517 + * This bss-zeroing can fail if the ELF
34518 + * file specifies odd protections. So
34519 + * we don't check the return value
34520 + */
34521 }
34522
34523 if (elf_interpreter) {
34524 @@ -1090,7 +1398,7 @@ out:
34525 * Decide what to dump of a segment, part, all or none.
34526 */
34527 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34528 - unsigned long mm_flags)
34529 + unsigned long mm_flags, long signr)
34530 {
34531 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34532
34533 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34534 if (vma->vm_file == NULL)
34535 return 0;
34536
34537 - if (FILTER(MAPPED_PRIVATE))
34538 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34539 goto whole;
34540
34541 /*
34542 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34543 {
34544 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34545 int i = 0;
34546 - do
34547 + do {
34548 i += 2;
34549 - while (auxv[i - 2] != AT_NULL);
34550 + } while (auxv[i - 2] != AT_NULL);
34551 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34552 }
34553
34554 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34555 }
34556
34557 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34558 - unsigned long mm_flags)
34559 + struct coredump_params *cprm)
34560 {
34561 struct vm_area_struct *vma;
34562 size_t size = 0;
34563
34564 for (vma = first_vma(current, gate_vma); vma != NULL;
34565 vma = next_vma(vma, gate_vma))
34566 - size += vma_dump_size(vma, mm_flags);
34567 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34568 return size;
34569 }
34570
34571 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34572
34573 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34574
34575 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34576 + offset += elf_core_vma_data_size(gate_vma, cprm);
34577 offset += elf_core_extra_data_size();
34578 e_shoff = offset;
34579
34580 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34581 offset = dataoff;
34582
34583 size += sizeof(*elf);
34584 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34585 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34586 goto end_coredump;
34587
34588 size += sizeof(*phdr4note);
34589 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34590 if (size > cprm->limit
34591 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34592 goto end_coredump;
34593 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34594 phdr.p_offset = offset;
34595 phdr.p_vaddr = vma->vm_start;
34596 phdr.p_paddr = 0;
34597 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34598 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34599 phdr.p_memsz = vma->vm_end - vma->vm_start;
34600 offset += phdr.p_filesz;
34601 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34602 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34603 phdr.p_align = ELF_EXEC_PAGESIZE;
34604
34605 size += sizeof(phdr);
34606 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34607 if (size > cprm->limit
34608 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34609 goto end_coredump;
34610 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34611 unsigned long addr;
34612 unsigned long end;
34613
34614 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34615 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34616
34617 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34618 struct page *page;
34619 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34620 page = get_dump_page(addr);
34621 if (page) {
34622 void *kaddr = kmap(page);
34623 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34624 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34625 !dump_write(cprm->file, kaddr,
34626 PAGE_SIZE);
34627 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34628
34629 if (e_phnum == PN_XNUM) {
34630 size += sizeof(*shdr4extnum);
34631 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34632 if (size > cprm->limit
34633 || !dump_write(cprm->file, shdr4extnum,
34634 sizeof(*shdr4extnum)))
34635 @@ -2067,6 +2380,97 @@ out:
34636
34637 #endif /* CONFIG_ELF_CORE */
34638
34639 +#ifdef CONFIG_PAX_MPROTECT
34640 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
34641 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34642 + * we'll remove VM_MAYWRITE for good on RELRO segments.
34643 + *
34644 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34645 + * basis because we want to allow the common case and not the special ones.
34646 + */
34647 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34648 +{
34649 + struct elfhdr elf_h;
34650 + struct elf_phdr elf_p;
34651 + unsigned long i;
34652 + unsigned long oldflags;
34653 + bool is_textrel_rw, is_textrel_rx, is_relro;
34654 +
34655 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34656 + return;
34657 +
34658 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34659 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34660 +
34661 +#ifdef CONFIG_PAX_ELFRELOCS
34662 + /* possible TEXTREL */
34663 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34664 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34665 +#else
34666 + is_textrel_rw = false;
34667 + is_textrel_rx = false;
34668 +#endif
34669 +
34670 + /* possible RELRO */
34671 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34672 +
34673 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34674 + return;
34675 +
34676 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34677 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34678 +
34679 +#ifdef CONFIG_PAX_ETEXECRELOCS
34680 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34681 +#else
34682 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34683 +#endif
34684 +
34685 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34686 + !elf_check_arch(&elf_h) ||
34687 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34688 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34689 + return;
34690 +
34691 + for (i = 0UL; i < elf_h.e_phnum; i++) {
34692 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34693 + return;
34694 + switch (elf_p.p_type) {
34695 + case PT_DYNAMIC:
34696 + if (!is_textrel_rw && !is_textrel_rx)
34697 + continue;
34698 + i = 0UL;
34699 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34700 + elf_dyn dyn;
34701 +
34702 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34703 + return;
34704 + if (dyn.d_tag == DT_NULL)
34705 + return;
34706 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34707 + gr_log_textrel(vma);
34708 + if (is_textrel_rw)
34709 + vma->vm_flags |= VM_MAYWRITE;
34710 + else
34711 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34712 + vma->vm_flags &= ~VM_MAYWRITE;
34713 + return;
34714 + }
34715 + i++;
34716 + }
34717 + return;
34718 +
34719 + case PT_GNU_RELRO:
34720 + if (!is_relro)
34721 + continue;
34722 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34723 + vma->vm_flags &= ~VM_MAYWRITE;
34724 + return;
34725 + }
34726 + }
34727 +}
34728 +#endif
34729 +
34730 static int __init init_elf_binfmt(void)
34731 {
34732 return register_binfmt(&elf_format);
34733 diff -urNp linux-3.0.3/fs/binfmt_flat.c linux-3.0.3/fs/binfmt_flat.c
34734 --- linux-3.0.3/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
34735 +++ linux-3.0.3/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
34736 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34737 realdatastart = (unsigned long) -ENOMEM;
34738 printk("Unable to allocate RAM for process data, errno %d\n",
34739 (int)-realdatastart);
34740 + down_write(&current->mm->mmap_sem);
34741 do_munmap(current->mm, textpos, text_len);
34742 + up_write(&current->mm->mmap_sem);
34743 ret = realdatastart;
34744 goto err;
34745 }
34746 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34747 }
34748 if (IS_ERR_VALUE(result)) {
34749 printk("Unable to read data+bss, errno %d\n", (int)-result);
34750 + down_write(&current->mm->mmap_sem);
34751 do_munmap(current->mm, textpos, text_len);
34752 do_munmap(current->mm, realdatastart, len);
34753 + up_write(&current->mm->mmap_sem);
34754 ret = result;
34755 goto err;
34756 }
34757 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34758 }
34759 if (IS_ERR_VALUE(result)) {
34760 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34761 + down_write(&current->mm->mmap_sem);
34762 do_munmap(current->mm, textpos, text_len + data_len + extra +
34763 MAX_SHARED_LIBS * sizeof(unsigned long));
34764 + up_write(&current->mm->mmap_sem);
34765 ret = result;
34766 goto err;
34767 }
34768 diff -urNp linux-3.0.3/fs/bio.c linux-3.0.3/fs/bio.c
34769 --- linux-3.0.3/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
34770 +++ linux-3.0.3/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
34771 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34772 const int read = bio_data_dir(bio) == READ;
34773 struct bio_map_data *bmd = bio->bi_private;
34774 int i;
34775 - char *p = bmd->sgvecs[0].iov_base;
34776 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
34777
34778 __bio_for_each_segment(bvec, bio, i, 0) {
34779 char *addr = page_address(bvec->bv_page);
34780 diff -urNp linux-3.0.3/fs/block_dev.c linux-3.0.3/fs/block_dev.c
34781 --- linux-3.0.3/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
34782 +++ linux-3.0.3/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
34783 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34784 else if (bdev->bd_contains == bdev)
34785 return true; /* is a whole device which isn't held */
34786
34787 - else if (whole->bd_holder == bd_may_claim)
34788 + else if (whole->bd_holder == (void *)bd_may_claim)
34789 return true; /* is a partition of a device that is being partitioned */
34790 else if (whole->bd_holder != NULL)
34791 return false; /* is a partition of a held device */
34792 diff -urNp linux-3.0.3/fs/btrfs/ctree.c linux-3.0.3/fs/btrfs/ctree.c
34793 --- linux-3.0.3/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
34794 +++ linux-3.0.3/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
34795 @@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
34796 free_extent_buffer(buf);
34797 add_root_to_dirty_list(root);
34798 } else {
34799 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34800 - parent_start = parent->start;
34801 - else
34802 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34803 + if (parent)
34804 + parent_start = parent->start;
34805 + else
34806 + parent_start = 0;
34807 + } else
34808 parent_start = 0;
34809
34810 WARN_ON(trans->transid != btrfs_header_generation(parent));
34811 diff -urNp linux-3.0.3/fs/btrfs/inode.c linux-3.0.3/fs/btrfs/inode.c
34812 --- linux-3.0.3/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
34813 +++ linux-3.0.3/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
34814 @@ -6895,7 +6895,7 @@ fail:
34815 return -ENOMEM;
34816 }
34817
34818 -static int btrfs_getattr(struct vfsmount *mnt,
34819 +int btrfs_getattr(struct vfsmount *mnt,
34820 struct dentry *dentry, struct kstat *stat)
34821 {
34822 struct inode *inode = dentry->d_inode;
34823 @@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
34824 return 0;
34825 }
34826
34827 +EXPORT_SYMBOL(btrfs_getattr);
34828 +
34829 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
34830 +{
34831 + return BTRFS_I(inode)->root->anon_super.s_dev;
34832 +}
34833 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34834 +
34835 /*
34836 * If a file is moved, it will inherit the cow and compression flags of the new
34837 * directory.
34838 diff -urNp linux-3.0.3/fs/btrfs/ioctl.c linux-3.0.3/fs/btrfs/ioctl.c
34839 --- linux-3.0.3/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
34840 +++ linux-3.0.3/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
34841 @@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
34842 for (i = 0; i < num_types; i++) {
34843 struct btrfs_space_info *tmp;
34844
34845 + /* Don't copy in more than we allocated */
34846 if (!slot_count)
34847 break;
34848
34849 + slot_count--;
34850 +
34851 info = NULL;
34852 rcu_read_lock();
34853 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34854 @@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
34855 memcpy(dest, &space, sizeof(space));
34856 dest++;
34857 space_args.total_spaces++;
34858 - slot_count--;
34859 }
34860 - if (!slot_count)
34861 - break;
34862 }
34863 up_read(&info->groups_sem);
34864 }
34865 diff -urNp linux-3.0.3/fs/btrfs/relocation.c linux-3.0.3/fs/btrfs/relocation.c
34866 --- linux-3.0.3/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
34867 +++ linux-3.0.3/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
34868 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
34869 }
34870 spin_unlock(&rc->reloc_root_tree.lock);
34871
34872 - BUG_ON((struct btrfs_root *)node->data != root);
34873 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
34874
34875 if (!del) {
34876 spin_lock(&rc->reloc_root_tree.lock);
34877 diff -urNp linux-3.0.3/fs/cachefiles/bind.c linux-3.0.3/fs/cachefiles/bind.c
34878 --- linux-3.0.3/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
34879 +++ linux-3.0.3/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
34880 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
34881 args);
34882
34883 /* start by checking things over */
34884 - ASSERT(cache->fstop_percent >= 0 &&
34885 - cache->fstop_percent < cache->fcull_percent &&
34886 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
34887 cache->fcull_percent < cache->frun_percent &&
34888 cache->frun_percent < 100);
34889
34890 - ASSERT(cache->bstop_percent >= 0 &&
34891 - cache->bstop_percent < cache->bcull_percent &&
34892 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
34893 cache->bcull_percent < cache->brun_percent &&
34894 cache->brun_percent < 100);
34895
34896 diff -urNp linux-3.0.3/fs/cachefiles/daemon.c linux-3.0.3/fs/cachefiles/daemon.c
34897 --- linux-3.0.3/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
34898 +++ linux-3.0.3/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
34899 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
34900 if (n > buflen)
34901 return -EMSGSIZE;
34902
34903 - if (copy_to_user(_buffer, buffer, n) != 0)
34904 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
34905 return -EFAULT;
34906
34907 return n;
34908 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
34909 if (test_bit(CACHEFILES_DEAD, &cache->flags))
34910 return -EIO;
34911
34912 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
34913 + if (datalen > PAGE_SIZE - 1)
34914 return -EOPNOTSUPP;
34915
34916 /* drag the command string into the kernel so we can parse it */
34917 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
34918 if (args[0] != '%' || args[1] != '\0')
34919 return -EINVAL;
34920
34921 - if (fstop < 0 || fstop >= cache->fcull_percent)
34922 + if (fstop >= cache->fcull_percent)
34923 return cachefiles_daemon_range_error(cache, args);
34924
34925 cache->fstop_percent = fstop;
34926 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
34927 if (args[0] != '%' || args[1] != '\0')
34928 return -EINVAL;
34929
34930 - if (bstop < 0 || bstop >= cache->bcull_percent)
34931 + if (bstop >= cache->bcull_percent)
34932 return cachefiles_daemon_range_error(cache, args);
34933
34934 cache->bstop_percent = bstop;
34935 diff -urNp linux-3.0.3/fs/cachefiles/internal.h linux-3.0.3/fs/cachefiles/internal.h
34936 --- linux-3.0.3/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
34937 +++ linux-3.0.3/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
34938 @@ -57,7 +57,7 @@ struct cachefiles_cache {
34939 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
34940 struct rb_root active_nodes; /* active nodes (can't be culled) */
34941 rwlock_t active_lock; /* lock for active_nodes */
34942 - atomic_t gravecounter; /* graveyard uniquifier */
34943 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
34944 unsigned frun_percent; /* when to stop culling (% files) */
34945 unsigned fcull_percent; /* when to start culling (% files) */
34946 unsigned fstop_percent; /* when to stop allocating (% files) */
34947 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
34948 * proc.c
34949 */
34950 #ifdef CONFIG_CACHEFILES_HISTOGRAM
34951 -extern atomic_t cachefiles_lookup_histogram[HZ];
34952 -extern atomic_t cachefiles_mkdir_histogram[HZ];
34953 -extern atomic_t cachefiles_create_histogram[HZ];
34954 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34955 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34956 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
34957
34958 extern int __init cachefiles_proc_init(void);
34959 extern void cachefiles_proc_cleanup(void);
34960 static inline
34961 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
34962 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
34963 {
34964 unsigned long jif = jiffies - start_jif;
34965 if (jif >= HZ)
34966 jif = HZ - 1;
34967 - atomic_inc(&histogram[jif]);
34968 + atomic_inc_unchecked(&histogram[jif]);
34969 }
34970
34971 #else
34972 diff -urNp linux-3.0.3/fs/cachefiles/namei.c linux-3.0.3/fs/cachefiles/namei.c
34973 --- linux-3.0.3/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
34974 +++ linux-3.0.3/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
34975 @@ -318,7 +318,7 @@ try_again:
34976 /* first step is to make up a grave dentry in the graveyard */
34977 sprintf(nbuffer, "%08x%08x",
34978 (uint32_t) get_seconds(),
34979 - (uint32_t) atomic_inc_return(&cache->gravecounter));
34980 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
34981
34982 /* do the multiway lock magic */
34983 trap = lock_rename(cache->graveyard, dir);
34984 diff -urNp linux-3.0.3/fs/cachefiles/proc.c linux-3.0.3/fs/cachefiles/proc.c
34985 --- linux-3.0.3/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
34986 +++ linux-3.0.3/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
34987 @@ -14,9 +14,9 @@
34988 #include <linux/seq_file.h>
34989 #include "internal.h"
34990
34991 -atomic_t cachefiles_lookup_histogram[HZ];
34992 -atomic_t cachefiles_mkdir_histogram[HZ];
34993 -atomic_t cachefiles_create_histogram[HZ];
34994 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34995 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34996 +atomic_unchecked_t cachefiles_create_histogram[HZ];
34997
34998 /*
34999 * display the latency histogram
35000 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
35001 return 0;
35002 default:
35003 index = (unsigned long) v - 3;
35004 - x = atomic_read(&cachefiles_lookup_histogram[index]);
35005 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
35006 - z = atomic_read(&cachefiles_create_histogram[index]);
35007 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
35008 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
35009 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
35010 if (x == 0 && y == 0 && z == 0)
35011 return 0;
35012
35013 diff -urNp linux-3.0.3/fs/cachefiles/rdwr.c linux-3.0.3/fs/cachefiles/rdwr.c
35014 --- linux-3.0.3/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
35015 +++ linux-3.0.3/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
35016 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
35017 old_fs = get_fs();
35018 set_fs(KERNEL_DS);
35019 ret = file->f_op->write(
35020 - file, (const void __user *) data, len, &pos);
35021 + file, (__force const void __user *) data, len, &pos);
35022 set_fs(old_fs);
35023 kunmap(page);
35024 if (ret != len)
35025 diff -urNp linux-3.0.3/fs/ceph/dir.c linux-3.0.3/fs/ceph/dir.c
35026 --- linux-3.0.3/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
35027 +++ linux-3.0.3/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
35028 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
35029 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
35030 struct ceph_mds_client *mdsc = fsc->mdsc;
35031 unsigned frag = fpos_frag(filp->f_pos);
35032 - int off = fpos_off(filp->f_pos);
35033 + unsigned int off = fpos_off(filp->f_pos);
35034 int err;
35035 u32 ftype;
35036 struct ceph_mds_reply_info_parsed *rinfo;
35037 diff -urNp linux-3.0.3/fs/cifs/cifs_debug.c linux-3.0.3/fs/cifs/cifs_debug.c
35038 --- linux-3.0.3/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
35039 +++ linux-3.0.3/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
35040 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
35041
35042 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
35043 #ifdef CONFIG_CIFS_STATS2
35044 - atomic_set(&totBufAllocCount, 0);
35045 - atomic_set(&totSmBufAllocCount, 0);
35046 + atomic_set_unchecked(&totBufAllocCount, 0);
35047 + atomic_set_unchecked(&totSmBufAllocCount, 0);
35048 #endif /* CONFIG_CIFS_STATS2 */
35049 spin_lock(&cifs_tcp_ses_lock);
35050 list_for_each(tmp1, &cifs_tcp_ses_list) {
35051 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35052 tcon = list_entry(tmp3,
35053 struct cifs_tcon,
35054 tcon_list);
35055 - atomic_set(&tcon->num_smbs_sent, 0);
35056 - atomic_set(&tcon->num_writes, 0);
35057 - atomic_set(&tcon->num_reads, 0);
35058 - atomic_set(&tcon->num_oplock_brks, 0);
35059 - atomic_set(&tcon->num_opens, 0);
35060 - atomic_set(&tcon->num_posixopens, 0);
35061 - atomic_set(&tcon->num_posixmkdirs, 0);
35062 - atomic_set(&tcon->num_closes, 0);
35063 - atomic_set(&tcon->num_deletes, 0);
35064 - atomic_set(&tcon->num_mkdirs, 0);
35065 - atomic_set(&tcon->num_rmdirs, 0);
35066 - atomic_set(&tcon->num_renames, 0);
35067 - atomic_set(&tcon->num_t2renames, 0);
35068 - atomic_set(&tcon->num_ffirst, 0);
35069 - atomic_set(&tcon->num_fnext, 0);
35070 - atomic_set(&tcon->num_fclose, 0);
35071 - atomic_set(&tcon->num_hardlinks, 0);
35072 - atomic_set(&tcon->num_symlinks, 0);
35073 - atomic_set(&tcon->num_locks, 0);
35074 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35075 + atomic_set_unchecked(&tcon->num_writes, 0);
35076 + atomic_set_unchecked(&tcon->num_reads, 0);
35077 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35078 + atomic_set_unchecked(&tcon->num_opens, 0);
35079 + atomic_set_unchecked(&tcon->num_posixopens, 0);
35080 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35081 + atomic_set_unchecked(&tcon->num_closes, 0);
35082 + atomic_set_unchecked(&tcon->num_deletes, 0);
35083 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
35084 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
35085 + atomic_set_unchecked(&tcon->num_renames, 0);
35086 + atomic_set_unchecked(&tcon->num_t2renames, 0);
35087 + atomic_set_unchecked(&tcon->num_ffirst, 0);
35088 + atomic_set_unchecked(&tcon->num_fnext, 0);
35089 + atomic_set_unchecked(&tcon->num_fclose, 0);
35090 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
35091 + atomic_set_unchecked(&tcon->num_symlinks, 0);
35092 + atomic_set_unchecked(&tcon->num_locks, 0);
35093 }
35094 }
35095 }
35096 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
35097 smBufAllocCount.counter, cifs_min_small);
35098 #ifdef CONFIG_CIFS_STATS2
35099 seq_printf(m, "Total Large %d Small %d Allocations\n",
35100 - atomic_read(&totBufAllocCount),
35101 - atomic_read(&totSmBufAllocCount));
35102 + atomic_read_unchecked(&totBufAllocCount),
35103 + atomic_read_unchecked(&totSmBufAllocCount));
35104 #endif /* CONFIG_CIFS_STATS2 */
35105
35106 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
35107 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35108 if (tcon->need_reconnect)
35109 seq_puts(m, "\tDISCONNECTED ");
35110 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35111 - atomic_read(&tcon->num_smbs_sent),
35112 - atomic_read(&tcon->num_oplock_brks));
35113 + atomic_read_unchecked(&tcon->num_smbs_sent),
35114 + atomic_read_unchecked(&tcon->num_oplock_brks));
35115 seq_printf(m, "\nReads: %d Bytes: %lld",
35116 - atomic_read(&tcon->num_reads),
35117 + atomic_read_unchecked(&tcon->num_reads),
35118 (long long)(tcon->bytes_read));
35119 seq_printf(m, "\nWrites: %d Bytes: %lld",
35120 - atomic_read(&tcon->num_writes),
35121 + atomic_read_unchecked(&tcon->num_writes),
35122 (long long)(tcon->bytes_written));
35123 seq_printf(m, "\nFlushes: %d",
35124 - atomic_read(&tcon->num_flushes));
35125 + atomic_read_unchecked(&tcon->num_flushes));
35126 seq_printf(m, "\nLocks: %d HardLinks: %d "
35127 "Symlinks: %d",
35128 - atomic_read(&tcon->num_locks),
35129 - atomic_read(&tcon->num_hardlinks),
35130 - atomic_read(&tcon->num_symlinks));
35131 + atomic_read_unchecked(&tcon->num_locks),
35132 + atomic_read_unchecked(&tcon->num_hardlinks),
35133 + atomic_read_unchecked(&tcon->num_symlinks));
35134 seq_printf(m, "\nOpens: %d Closes: %d "
35135 "Deletes: %d",
35136 - atomic_read(&tcon->num_opens),
35137 - atomic_read(&tcon->num_closes),
35138 - atomic_read(&tcon->num_deletes));
35139 + atomic_read_unchecked(&tcon->num_opens),
35140 + atomic_read_unchecked(&tcon->num_closes),
35141 + atomic_read_unchecked(&tcon->num_deletes));
35142 seq_printf(m, "\nPosix Opens: %d "
35143 "Posix Mkdirs: %d",
35144 - atomic_read(&tcon->num_posixopens),
35145 - atomic_read(&tcon->num_posixmkdirs));
35146 + atomic_read_unchecked(&tcon->num_posixopens),
35147 + atomic_read_unchecked(&tcon->num_posixmkdirs));
35148 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35149 - atomic_read(&tcon->num_mkdirs),
35150 - atomic_read(&tcon->num_rmdirs));
35151 + atomic_read_unchecked(&tcon->num_mkdirs),
35152 + atomic_read_unchecked(&tcon->num_rmdirs));
35153 seq_printf(m, "\nRenames: %d T2 Renames %d",
35154 - atomic_read(&tcon->num_renames),
35155 - atomic_read(&tcon->num_t2renames));
35156 + atomic_read_unchecked(&tcon->num_renames),
35157 + atomic_read_unchecked(&tcon->num_t2renames));
35158 seq_printf(m, "\nFindFirst: %d FNext %d "
35159 "FClose %d",
35160 - atomic_read(&tcon->num_ffirst),
35161 - atomic_read(&tcon->num_fnext),
35162 - atomic_read(&tcon->num_fclose));
35163 + atomic_read_unchecked(&tcon->num_ffirst),
35164 + atomic_read_unchecked(&tcon->num_fnext),
35165 + atomic_read_unchecked(&tcon->num_fclose));
35166 }
35167 }
35168 }
35169 diff -urNp linux-3.0.3/fs/cifs/cifsfs.c linux-3.0.3/fs/cifs/cifsfs.c
35170 --- linux-3.0.3/fs/cifs/cifsfs.c 2011-08-23 21:44:40.000000000 -0400
35171 +++ linux-3.0.3/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
35172 @@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
35173 cifs_req_cachep = kmem_cache_create("cifs_request",
35174 CIFSMaxBufSize +
35175 MAX_CIFS_HDR_SIZE, 0,
35176 - SLAB_HWCACHE_ALIGN, NULL);
35177 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
35178 if (cifs_req_cachep == NULL)
35179 return -ENOMEM;
35180
35181 @@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
35182 efficient to alloc 1 per page off the slab compared to 17K (5page)
35183 alloc of large cifs buffers even when page debugging is on */
35184 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
35185 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
35186 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
35187 NULL);
35188 if (cifs_sm_req_cachep == NULL) {
35189 mempool_destroy(cifs_req_poolp);
35190 @@ -1106,8 +1106,8 @@ init_cifs(void)
35191 atomic_set(&bufAllocCount, 0);
35192 atomic_set(&smBufAllocCount, 0);
35193 #ifdef CONFIG_CIFS_STATS2
35194 - atomic_set(&totBufAllocCount, 0);
35195 - atomic_set(&totSmBufAllocCount, 0);
35196 + atomic_set_unchecked(&totBufAllocCount, 0);
35197 + atomic_set_unchecked(&totSmBufAllocCount, 0);
35198 #endif /* CONFIG_CIFS_STATS2 */
35199
35200 atomic_set(&midCount, 0);
35201 diff -urNp linux-3.0.3/fs/cifs/cifsglob.h linux-3.0.3/fs/cifs/cifsglob.h
35202 --- linux-3.0.3/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
35203 +++ linux-3.0.3/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
35204 @@ -381,28 +381,28 @@ struct cifs_tcon {
35205 __u16 Flags; /* optional support bits */
35206 enum statusEnum tidStatus;
35207 #ifdef CONFIG_CIFS_STATS
35208 - atomic_t num_smbs_sent;
35209 - atomic_t num_writes;
35210 - atomic_t num_reads;
35211 - atomic_t num_flushes;
35212 - atomic_t num_oplock_brks;
35213 - atomic_t num_opens;
35214 - atomic_t num_closes;
35215 - atomic_t num_deletes;
35216 - atomic_t num_mkdirs;
35217 - atomic_t num_posixopens;
35218 - atomic_t num_posixmkdirs;
35219 - atomic_t num_rmdirs;
35220 - atomic_t num_renames;
35221 - atomic_t num_t2renames;
35222 - atomic_t num_ffirst;
35223 - atomic_t num_fnext;
35224 - atomic_t num_fclose;
35225 - atomic_t num_hardlinks;
35226 - atomic_t num_symlinks;
35227 - atomic_t num_locks;
35228 - atomic_t num_acl_get;
35229 - atomic_t num_acl_set;
35230 + atomic_unchecked_t num_smbs_sent;
35231 + atomic_unchecked_t num_writes;
35232 + atomic_unchecked_t num_reads;
35233 + atomic_unchecked_t num_flushes;
35234 + atomic_unchecked_t num_oplock_brks;
35235 + atomic_unchecked_t num_opens;
35236 + atomic_unchecked_t num_closes;
35237 + atomic_unchecked_t num_deletes;
35238 + atomic_unchecked_t num_mkdirs;
35239 + atomic_unchecked_t num_posixopens;
35240 + atomic_unchecked_t num_posixmkdirs;
35241 + atomic_unchecked_t num_rmdirs;
35242 + atomic_unchecked_t num_renames;
35243 + atomic_unchecked_t num_t2renames;
35244 + atomic_unchecked_t num_ffirst;
35245 + atomic_unchecked_t num_fnext;
35246 + atomic_unchecked_t num_fclose;
35247 + atomic_unchecked_t num_hardlinks;
35248 + atomic_unchecked_t num_symlinks;
35249 + atomic_unchecked_t num_locks;
35250 + atomic_unchecked_t num_acl_get;
35251 + atomic_unchecked_t num_acl_set;
35252 #ifdef CONFIG_CIFS_STATS2
35253 unsigned long long time_writes;
35254 unsigned long long time_reads;
35255 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
35256 }
35257
35258 #ifdef CONFIG_CIFS_STATS
35259 -#define cifs_stats_inc atomic_inc
35260 +#define cifs_stats_inc atomic_inc_unchecked
35261
35262 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
35263 unsigned int bytes)
35264 @@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
35265 /* Various Debug counters */
35266 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
35267 #ifdef CONFIG_CIFS_STATS2
35268 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
35269 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
35270 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
35271 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
35272 #endif
35273 GLOBAL_EXTERN atomic_t smBufAllocCount;
35274 GLOBAL_EXTERN atomic_t midCount;
35275 diff -urNp linux-3.0.3/fs/cifs/link.c linux-3.0.3/fs/cifs/link.c
35276 --- linux-3.0.3/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
35277 +++ linux-3.0.3/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
35278 @@ -587,7 +587,7 @@ symlink_exit:
35279
35280 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35281 {
35282 - char *p = nd_get_link(nd);
35283 + const char *p = nd_get_link(nd);
35284 if (!IS_ERR(p))
35285 kfree(p);
35286 }
35287 diff -urNp linux-3.0.3/fs/cifs/misc.c linux-3.0.3/fs/cifs/misc.c
35288 --- linux-3.0.3/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
35289 +++ linux-3.0.3/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
35290 @@ -156,7 +156,7 @@ cifs_buf_get(void)
35291 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
35292 atomic_inc(&bufAllocCount);
35293 #ifdef CONFIG_CIFS_STATS2
35294 - atomic_inc(&totBufAllocCount);
35295 + atomic_inc_unchecked(&totBufAllocCount);
35296 #endif /* CONFIG_CIFS_STATS2 */
35297 }
35298
35299 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
35300 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
35301 atomic_inc(&smBufAllocCount);
35302 #ifdef CONFIG_CIFS_STATS2
35303 - atomic_inc(&totSmBufAllocCount);
35304 + atomic_inc_unchecked(&totSmBufAllocCount);
35305 #endif /* CONFIG_CIFS_STATS2 */
35306
35307 }
35308 diff -urNp linux-3.0.3/fs/coda/cache.c linux-3.0.3/fs/coda/cache.c
35309 --- linux-3.0.3/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
35310 +++ linux-3.0.3/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
35311 @@ -24,7 +24,7 @@
35312 #include "coda_linux.h"
35313 #include "coda_cache.h"
35314
35315 -static atomic_t permission_epoch = ATOMIC_INIT(0);
35316 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35317
35318 /* replace or extend an acl cache hit */
35319 void coda_cache_enter(struct inode *inode, int mask)
35320 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35321 struct coda_inode_info *cii = ITOC(inode);
35322
35323 spin_lock(&cii->c_lock);
35324 - cii->c_cached_epoch = atomic_read(&permission_epoch);
35325 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35326 if (cii->c_uid != current_fsuid()) {
35327 cii->c_uid = current_fsuid();
35328 cii->c_cached_perm = mask;
35329 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35330 {
35331 struct coda_inode_info *cii = ITOC(inode);
35332 spin_lock(&cii->c_lock);
35333 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35334 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35335 spin_unlock(&cii->c_lock);
35336 }
35337
35338 /* remove all acl caches */
35339 void coda_cache_clear_all(struct super_block *sb)
35340 {
35341 - atomic_inc(&permission_epoch);
35342 + atomic_inc_unchecked(&permission_epoch);
35343 }
35344
35345
35346 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35347 spin_lock(&cii->c_lock);
35348 hit = (mask & cii->c_cached_perm) == mask &&
35349 cii->c_uid == current_fsuid() &&
35350 - cii->c_cached_epoch == atomic_read(&permission_epoch);
35351 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35352 spin_unlock(&cii->c_lock);
35353
35354 return hit;
35355 diff -urNp linux-3.0.3/fs/compat_binfmt_elf.c linux-3.0.3/fs/compat_binfmt_elf.c
35356 --- linux-3.0.3/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
35357 +++ linux-3.0.3/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
35358 @@ -30,11 +30,13 @@
35359 #undef elf_phdr
35360 #undef elf_shdr
35361 #undef elf_note
35362 +#undef elf_dyn
35363 #undef elf_addr_t
35364 #define elfhdr elf32_hdr
35365 #define elf_phdr elf32_phdr
35366 #define elf_shdr elf32_shdr
35367 #define elf_note elf32_note
35368 +#define elf_dyn Elf32_Dyn
35369 #define elf_addr_t Elf32_Addr
35370
35371 /*
35372 diff -urNp linux-3.0.3/fs/compat.c linux-3.0.3/fs/compat.c
35373 --- linux-3.0.3/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
35374 +++ linux-3.0.3/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
35375 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35376 goto out;
35377
35378 ret = -EINVAL;
35379 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35380 + if (nr_segs > UIO_MAXIOV)
35381 goto out;
35382 if (nr_segs > fast_segs) {
35383 ret = -ENOMEM;
35384 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35385
35386 struct compat_readdir_callback {
35387 struct compat_old_linux_dirent __user *dirent;
35388 + struct file * file;
35389 int result;
35390 };
35391
35392 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35393 buf->result = -EOVERFLOW;
35394 return -EOVERFLOW;
35395 }
35396 +
35397 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35398 + return 0;
35399 +
35400 buf->result++;
35401 dirent = buf->dirent;
35402 if (!access_ok(VERIFY_WRITE, dirent,
35403 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35404
35405 buf.result = 0;
35406 buf.dirent = dirent;
35407 + buf.file = file;
35408
35409 error = vfs_readdir(file, compat_fillonedir, &buf);
35410 if (buf.result)
35411 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
35412 struct compat_getdents_callback {
35413 struct compat_linux_dirent __user *current_dir;
35414 struct compat_linux_dirent __user *previous;
35415 + struct file * file;
35416 int count;
35417 int error;
35418 };
35419 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35420 buf->error = -EOVERFLOW;
35421 return -EOVERFLOW;
35422 }
35423 +
35424 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35425 + return 0;
35426 +
35427 dirent = buf->previous;
35428 if (dirent) {
35429 if (__put_user(offset, &dirent->d_off))
35430 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35431 buf.previous = NULL;
35432 buf.count = count;
35433 buf.error = 0;
35434 + buf.file = file;
35435
35436 error = vfs_readdir(file, compat_filldir, &buf);
35437 if (error >= 0)
35438 @@ -1006,6 +1018,7 @@ out:
35439 struct compat_getdents_callback64 {
35440 struct linux_dirent64 __user *current_dir;
35441 struct linux_dirent64 __user *previous;
35442 + struct file * file;
35443 int count;
35444 int error;
35445 };
35446 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35447 buf->error = -EINVAL; /* only used if we fail.. */
35448 if (reclen > buf->count)
35449 return -EINVAL;
35450 +
35451 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35452 + return 0;
35453 +
35454 dirent = buf->previous;
35455
35456 if (dirent) {
35457 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35458 buf.previous = NULL;
35459 buf.count = count;
35460 buf.error = 0;
35461 + buf.file = file;
35462
35463 error = vfs_readdir(file, compat_filldir64, &buf);
35464 if (error >= 0)
35465 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
35466 struct fdtable *fdt;
35467 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35468
35469 + pax_track_stack();
35470 +
35471 if (n < 0)
35472 goto out_nofds;
35473
35474 diff -urNp linux-3.0.3/fs/compat_ioctl.c linux-3.0.3/fs/compat_ioctl.c
35475 --- linux-3.0.3/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
35476 +++ linux-3.0.3/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
35477 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35478
35479 err = get_user(palp, &up->palette);
35480 err |= get_user(length, &up->length);
35481 + if (err)
35482 + return -EFAULT;
35483
35484 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35485 err = put_user(compat_ptr(palp), &up_native->palette);
35486 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35487 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35488 {
35489 unsigned int a, b;
35490 - a = *(unsigned int *)p;
35491 - b = *(unsigned int *)q;
35492 + a = *(const unsigned int *)p;
35493 + b = *(const unsigned int *)q;
35494 if (a > b)
35495 return 1;
35496 if (a < b)
35497 diff -urNp linux-3.0.3/fs/configfs/dir.c linux-3.0.3/fs/configfs/dir.c
35498 --- linux-3.0.3/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
35499 +++ linux-3.0.3/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
35500 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35501 }
35502 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35503 struct configfs_dirent *next;
35504 - const char * name;
35505 + const unsigned char * name;
35506 + char d_name[sizeof(next->s_dentry->d_iname)];
35507 int len;
35508 struct inode *inode = NULL;
35509
35510 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35511 continue;
35512
35513 name = configfs_get_name(next);
35514 - len = strlen(name);
35515 + if (next->s_dentry && name == next->s_dentry->d_iname) {
35516 + len = next->s_dentry->d_name.len;
35517 + memcpy(d_name, name, len);
35518 + name = d_name;
35519 + } else
35520 + len = strlen(name);
35521
35522 /*
35523 * We'll have a dentry and an inode for
35524 diff -urNp linux-3.0.3/fs/dcache.c linux-3.0.3/fs/dcache.c
35525 --- linux-3.0.3/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
35526 +++ linux-3.0.3/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
35527 @@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
35528 mempages -= reserve;
35529
35530 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35531 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35532 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35533
35534 dcache_init();
35535 inode_init();
35536 diff -urNp linux-3.0.3/fs/ecryptfs/inode.c linux-3.0.3/fs/ecryptfs/inode.c
35537 --- linux-3.0.3/fs/ecryptfs/inode.c 2011-08-23 21:44:40.000000000 -0400
35538 +++ linux-3.0.3/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
35539 @@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
35540 old_fs = get_fs();
35541 set_fs(get_ds());
35542 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35543 - (char __user *)lower_buf,
35544 + (__force char __user *)lower_buf,
35545 lower_bufsiz);
35546 set_fs(old_fs);
35547 if (rc < 0)
35548 @@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
35549 }
35550 old_fs = get_fs();
35551 set_fs(get_ds());
35552 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35553 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35554 set_fs(old_fs);
35555 if (rc < 0) {
35556 kfree(buf);
35557 @@ -765,7 +765,7 @@ out:
35558 static void
35559 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35560 {
35561 - char *buf = nd_get_link(nd);
35562 + const char *buf = nd_get_link(nd);
35563 if (!IS_ERR(buf)) {
35564 /* Free the char* */
35565 kfree(buf);
35566 diff -urNp linux-3.0.3/fs/ecryptfs/miscdev.c linux-3.0.3/fs/ecryptfs/miscdev.c
35567 --- linux-3.0.3/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
35568 +++ linux-3.0.3/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
35569 @@ -328,7 +328,7 @@ check_list:
35570 goto out_unlock_msg_ctx;
35571 i = 5;
35572 if (msg_ctx->msg) {
35573 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
35574 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35575 goto out_unlock_msg_ctx;
35576 i += packet_length_size;
35577 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35578 diff -urNp linux-3.0.3/fs/exec.c linux-3.0.3/fs/exec.c
35579 --- linux-3.0.3/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
35580 +++ linux-3.0.3/fs/exec.c 2011-08-25 17:26:58.000000000 -0400
35581 @@ -55,12 +55,24 @@
35582 #include <linux/pipe_fs_i.h>
35583 #include <linux/oom.h>
35584 #include <linux/compat.h>
35585 +#include <linux/random.h>
35586 +#include <linux/seq_file.h>
35587 +
35588 +#ifdef CONFIG_PAX_REFCOUNT
35589 +#include <linux/kallsyms.h>
35590 +#include <linux/kdebug.h>
35591 +#endif
35592
35593 #include <asm/uaccess.h>
35594 #include <asm/mmu_context.h>
35595 #include <asm/tlb.h>
35596 #include "internal.h"
35597
35598 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35599 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35600 +EXPORT_SYMBOL(pax_set_initial_flags_func);
35601 +#endif
35602 +
35603 int core_uses_pid;
35604 char core_pattern[CORENAME_MAX_SIZE] = "core";
35605 unsigned int core_pipe_limit;
35606 @@ -70,7 +82,7 @@ struct core_name {
35607 char *corename;
35608 int used, size;
35609 };
35610 -static atomic_t call_count = ATOMIC_INIT(1);
35611 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35612
35613 /* The maximal length of core_pattern is also specified in sysctl.c */
35614
35615 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35616 char *tmp = getname(library);
35617 int error = PTR_ERR(tmp);
35618 static const struct open_flags uselib_flags = {
35619 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35620 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35621 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35622 .intent = LOOKUP_OPEN
35623 };
35624 @@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
35625 int write)
35626 {
35627 struct page *page;
35628 - int ret;
35629
35630 -#ifdef CONFIG_STACK_GROWSUP
35631 - if (write) {
35632 - ret = expand_downwards(bprm->vma, pos);
35633 - if (ret < 0)
35634 - return NULL;
35635 - }
35636 -#endif
35637 - ret = get_user_pages(current, bprm->mm, pos,
35638 - 1, write, 1, &page, NULL);
35639 - if (ret <= 0)
35640 + if (0 > expand_downwards(bprm->vma, pos))
35641 + return NULL;
35642 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35643 return NULL;
35644
35645 if (write) {
35646 @@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
35647 vma->vm_end = STACK_TOP_MAX;
35648 vma->vm_start = vma->vm_end - PAGE_SIZE;
35649 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35650 +
35651 +#ifdef CONFIG_PAX_SEGMEXEC
35652 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35653 +#endif
35654 +
35655 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35656 INIT_LIST_HEAD(&vma->anon_vma_chain);
35657
35658 @@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
35659 mm->stack_vm = mm->total_vm = 1;
35660 up_write(&mm->mmap_sem);
35661 bprm->p = vma->vm_end - sizeof(void *);
35662 +
35663 +#ifdef CONFIG_PAX_RANDUSTACK
35664 + if (randomize_va_space)
35665 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35666 +#endif
35667 +
35668 return 0;
35669 err:
35670 up_write(&mm->mmap_sem);
35671 @@ -403,19 +418,7 @@ err:
35672 return err;
35673 }
35674
35675 -struct user_arg_ptr {
35676 -#ifdef CONFIG_COMPAT
35677 - bool is_compat;
35678 -#endif
35679 - union {
35680 - const char __user *const __user *native;
35681 -#ifdef CONFIG_COMPAT
35682 - compat_uptr_t __user *compat;
35683 -#endif
35684 - } ptr;
35685 -};
35686 -
35687 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35688 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35689 {
35690 const char __user *native;
35691
35692 @@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
35693 int r;
35694 mm_segment_t oldfs = get_fs();
35695 struct user_arg_ptr argv = {
35696 - .ptr.native = (const char __user *const __user *)__argv,
35697 + .ptr.native = (__force const char __user *const __user *)__argv,
35698 };
35699
35700 set_fs(KERNEL_DS);
35701 @@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
35702 unsigned long new_end = old_end - shift;
35703 struct mmu_gather tlb;
35704
35705 - BUG_ON(new_start > new_end);
35706 + if (new_start >= new_end || new_start < mmap_min_addr)
35707 + return -ENOMEM;
35708
35709 /*
35710 * ensure there are no vmas between where we want to go
35711 @@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
35712 if (vma != find_vma(mm, new_start))
35713 return -EFAULT;
35714
35715 +#ifdef CONFIG_PAX_SEGMEXEC
35716 + BUG_ON(pax_find_mirror_vma(vma));
35717 +#endif
35718 +
35719 /*
35720 * cover the whole range: [new_start, old_end)
35721 */
35722 @@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
35723 stack_top = arch_align_stack(stack_top);
35724 stack_top = PAGE_ALIGN(stack_top);
35725
35726 - if (unlikely(stack_top < mmap_min_addr) ||
35727 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35728 - return -ENOMEM;
35729 -
35730 stack_shift = vma->vm_end - stack_top;
35731
35732 bprm->p -= stack_shift;
35733 @@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
35734 bprm->exec -= stack_shift;
35735
35736 down_write(&mm->mmap_sem);
35737 +
35738 + /* Move stack pages down in memory. */
35739 + if (stack_shift) {
35740 + ret = shift_arg_pages(vma, stack_shift);
35741 + if (ret)
35742 + goto out_unlock;
35743 + }
35744 +
35745 vm_flags = VM_STACK_FLAGS;
35746
35747 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35748 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35749 + vm_flags &= ~VM_EXEC;
35750 +
35751 +#ifdef CONFIG_PAX_MPROTECT
35752 + if (mm->pax_flags & MF_PAX_MPROTECT)
35753 + vm_flags &= ~VM_MAYEXEC;
35754 +#endif
35755 +
35756 + }
35757 +#endif
35758 +
35759 /*
35760 * Adjust stack execute permissions; explicitly enable for
35761 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35762 @@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
35763 goto out_unlock;
35764 BUG_ON(prev != vma);
35765
35766 - /* Move stack pages down in memory. */
35767 - if (stack_shift) {
35768 - ret = shift_arg_pages(vma, stack_shift);
35769 - if (ret)
35770 - goto out_unlock;
35771 - }
35772 -
35773 /* mprotect_fixup is overkill to remove the temporary stack flags */
35774 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35775
35776 @@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
35777 struct file *file;
35778 int err;
35779 static const struct open_flags open_exec_flags = {
35780 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35781 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35782 .acc_mode = MAY_EXEC | MAY_OPEN,
35783 .intent = LOOKUP_OPEN
35784 };
35785 @@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
35786 old_fs = get_fs();
35787 set_fs(get_ds());
35788 /* The cast to a user pointer is valid due to the set_fs() */
35789 - result = vfs_read(file, (void __user *)addr, count, &pos);
35790 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
35791 set_fs(old_fs);
35792 return result;
35793 }
35794 @@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
35795 }
35796 rcu_read_unlock();
35797
35798 - if (p->fs->users > n_fs) {
35799 + if (atomic_read(&p->fs->users) > n_fs) {
35800 bprm->unsafe |= LSM_UNSAFE_SHARE;
35801 } else {
35802 res = -EAGAIN;
35803 @@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
35804 struct user_arg_ptr envp,
35805 struct pt_regs *regs)
35806 {
35807 +#ifdef CONFIG_GRKERNSEC
35808 + struct file *old_exec_file;
35809 + struct acl_subject_label *old_acl;
35810 + struct rlimit old_rlim[RLIM_NLIMITS];
35811 +#endif
35812 struct linux_binprm *bprm;
35813 struct file *file;
35814 struct files_struct *displaced;
35815 bool clear_in_exec;
35816 int retval;
35817 + const struct cred *cred = current_cred();
35818 +
35819 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35820 +
35821 + /*
35822 + * We move the actual failure in case of RLIMIT_NPROC excess from
35823 + * set*uid() to execve() because too many poorly written programs
35824 + * don't check setuid() return code. Here we additionally recheck
35825 + * whether NPROC limit is still exceeded.
35826 + */
35827 + if ((current->flags & PF_NPROC_EXCEEDED) &&
35828 + atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
35829 + retval = -EAGAIN;
35830 + goto out_ret;
35831 + }
35832 +
35833 + /* We're below the limit (still or again), so we don't want to make
35834 + * further execve() calls fail. */
35835 + current->flags &= ~PF_NPROC_EXCEEDED;
35836
35837 retval = unshare_files(&displaced);
35838 if (retval)
35839 @@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
35840 bprm->filename = filename;
35841 bprm->interp = filename;
35842
35843 + if (gr_process_user_ban()) {
35844 + retval = -EPERM;
35845 + goto out_file;
35846 + }
35847 +
35848 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35849 + retval = -EACCES;
35850 + goto out_file;
35851 + }
35852 +
35853 retval = bprm_mm_init(bprm);
35854 if (retval)
35855 goto out_file;
35856 @@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
35857 if (retval < 0)
35858 goto out;
35859
35860 + if (!gr_tpe_allow(file)) {
35861 + retval = -EACCES;
35862 + goto out;
35863 + }
35864 +
35865 + if (gr_check_crash_exec(file)) {
35866 + retval = -EACCES;
35867 + goto out;
35868 + }
35869 +
35870 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35871 +
35872 + gr_handle_exec_args(bprm, argv);
35873 +
35874 +#ifdef CONFIG_GRKERNSEC
35875 + old_acl = current->acl;
35876 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35877 + old_exec_file = current->exec_file;
35878 + get_file(file);
35879 + current->exec_file = file;
35880 +#endif
35881 +
35882 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35883 + bprm->unsafe & LSM_UNSAFE_SHARE);
35884 + if (retval < 0)
35885 + goto out_fail;
35886 +
35887 retval = search_binary_handler(bprm,regs);
35888 if (retval < 0)
35889 - goto out;
35890 + goto out_fail;
35891 +#ifdef CONFIG_GRKERNSEC
35892 + if (old_exec_file)
35893 + fput(old_exec_file);
35894 +#endif
35895
35896 /* execve succeeded */
35897 current->fs->in_exec = 0;
35898 @@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
35899 put_files_struct(displaced);
35900 return retval;
35901
35902 +out_fail:
35903 +#ifdef CONFIG_GRKERNSEC
35904 + current->acl = old_acl;
35905 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35906 + fput(current->exec_file);
35907 + current->exec_file = old_exec_file;
35908 +#endif
35909 +
35910 out:
35911 if (bprm->mm) {
35912 acct_arg_size(bprm, 0);
35913 @@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
35914 {
35915 char *old_corename = cn->corename;
35916
35917 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
35918 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
35919 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
35920
35921 if (!cn->corename) {
35922 @@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
35923 int pid_in_pattern = 0;
35924 int err = 0;
35925
35926 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
35927 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
35928 cn->corename = kmalloc(cn->size, GFP_KERNEL);
35929 cn->used = 0;
35930
35931 @@ -1758,6 +1848,219 @@ out:
35932 return ispipe;
35933 }
35934
35935 +int pax_check_flags(unsigned long *flags)
35936 +{
35937 + int retval = 0;
35938 +
35939 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
35940 + if (*flags & MF_PAX_SEGMEXEC)
35941 + {
35942 + *flags &= ~MF_PAX_SEGMEXEC;
35943 + retval = -EINVAL;
35944 + }
35945 +#endif
35946 +
35947 + if ((*flags & MF_PAX_PAGEEXEC)
35948 +
35949 +#ifdef CONFIG_PAX_PAGEEXEC
35950 + && (*flags & MF_PAX_SEGMEXEC)
35951 +#endif
35952 +
35953 + )
35954 + {
35955 + *flags &= ~MF_PAX_PAGEEXEC;
35956 + retval = -EINVAL;
35957 + }
35958 +
35959 + if ((*flags & MF_PAX_MPROTECT)
35960 +
35961 +#ifdef CONFIG_PAX_MPROTECT
35962 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35963 +#endif
35964 +
35965 + )
35966 + {
35967 + *flags &= ~MF_PAX_MPROTECT;
35968 + retval = -EINVAL;
35969 + }
35970 +
35971 + if ((*flags & MF_PAX_EMUTRAMP)
35972 +
35973 +#ifdef CONFIG_PAX_EMUTRAMP
35974 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35975 +#endif
35976 +
35977 + )
35978 + {
35979 + *flags &= ~MF_PAX_EMUTRAMP;
35980 + retval = -EINVAL;
35981 + }
35982 +
35983 + return retval;
35984 +}
35985 +
35986 +EXPORT_SYMBOL(pax_check_flags);
35987 +
35988 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35989 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
35990 +{
35991 + struct task_struct *tsk = current;
35992 + struct mm_struct *mm = current->mm;
35993 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
35994 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
35995 + char *path_exec = NULL;
35996 + char *path_fault = NULL;
35997 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
35998 +
35999 + if (buffer_exec && buffer_fault) {
36000 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
36001 +
36002 + down_read(&mm->mmap_sem);
36003 + vma = mm->mmap;
36004 + while (vma && (!vma_exec || !vma_fault)) {
36005 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
36006 + vma_exec = vma;
36007 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
36008 + vma_fault = vma;
36009 + vma = vma->vm_next;
36010 + }
36011 + if (vma_exec) {
36012 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
36013 + if (IS_ERR(path_exec))
36014 + path_exec = "<path too long>";
36015 + else {
36016 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
36017 + if (path_exec) {
36018 + *path_exec = 0;
36019 + path_exec = buffer_exec;
36020 + } else
36021 + path_exec = "<path too long>";
36022 + }
36023 + }
36024 + if (vma_fault) {
36025 + start = vma_fault->vm_start;
36026 + end = vma_fault->vm_end;
36027 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
36028 + if (vma_fault->vm_file) {
36029 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
36030 + if (IS_ERR(path_fault))
36031 + path_fault = "<path too long>";
36032 + else {
36033 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
36034 + if (path_fault) {
36035 + *path_fault = 0;
36036 + path_fault = buffer_fault;
36037 + } else
36038 + path_fault = "<path too long>";
36039 + }
36040 + } else
36041 + path_fault = "<anonymous mapping>";
36042 + }
36043 + up_read(&mm->mmap_sem);
36044 + }
36045 + if (tsk->signal->curr_ip)
36046 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
36047 + else
36048 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
36049 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
36050 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
36051 + task_uid(tsk), task_euid(tsk), pc, sp);
36052 + free_page((unsigned long)buffer_exec);
36053 + free_page((unsigned long)buffer_fault);
36054 + pax_report_insns(pc, sp);
36055 + do_coredump(SIGKILL, SIGKILL, regs);
36056 +}
36057 +#endif
36058 +
36059 +#ifdef CONFIG_PAX_REFCOUNT
36060 +void pax_report_refcount_overflow(struct pt_regs *regs)
36061 +{
36062 + if (current->signal->curr_ip)
36063 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36064 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36065 + else
36066 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36067 + current->comm, task_pid_nr(current), current_uid(), current_euid());
36068 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36069 + show_regs(regs);
36070 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36071 +}
36072 +#endif
36073 +
36074 +#ifdef CONFIG_PAX_USERCOPY
36075 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36076 +int object_is_on_stack(const void *obj, unsigned long len)
36077 +{
36078 + const void * const stack = task_stack_page(current);
36079 + const void * const stackend = stack + THREAD_SIZE;
36080 +
36081 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36082 + const void *frame = NULL;
36083 + const void *oldframe;
36084 +#endif
36085 +
36086 + if (obj + len < obj)
36087 + return -1;
36088 +
36089 + if (obj + len <= stack || stackend <= obj)
36090 + return 0;
36091 +
36092 + if (obj < stack || stackend < obj + len)
36093 + return -1;
36094 +
36095 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36096 + oldframe = __builtin_frame_address(1);
36097 + if (oldframe)
36098 + frame = __builtin_frame_address(2);
36099 + /*
36100 + low ----------------------------------------------> high
36101 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
36102 + ^----------------^
36103 + allow copies only within here
36104 + */
36105 + while (stack <= frame && frame < stackend) {
36106 + /* if obj + len extends past the last frame, this
36107 + check won't pass and the next frame will be 0,
36108 + causing us to bail out and correctly report
36109 + the copy as invalid
36110 + */
36111 + if (obj + len <= frame)
36112 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36113 + oldframe = frame;
36114 + frame = *(const void * const *)frame;
36115 + }
36116 + return -1;
36117 +#else
36118 + return 1;
36119 +#endif
36120 +}
36121 +
36122 +
36123 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36124 +{
36125 + if (current->signal->curr_ip)
36126 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36127 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36128 + else
36129 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36130 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36131 + dump_stack();
36132 + gr_handle_kernel_exploit();
36133 + do_group_exit(SIGKILL);
36134 +}
36135 +#endif
36136 +
36137 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36138 +void pax_track_stack(void)
36139 +{
36140 + unsigned long sp = (unsigned long)&sp;
36141 + if (sp < current_thread_info()->lowest_stack &&
36142 + sp > (unsigned long)task_stack_page(current))
36143 + current_thread_info()->lowest_stack = sp;
36144 +}
36145 +EXPORT_SYMBOL(pax_track_stack);
36146 +#endif
36147 +
36148 static int zap_process(struct task_struct *start, int exit_code)
36149 {
36150 struct task_struct *t;
36151 @@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
36152 pipe = file->f_path.dentry->d_inode->i_pipe;
36153
36154 pipe_lock(pipe);
36155 - pipe->readers++;
36156 - pipe->writers--;
36157 + atomic_inc(&pipe->readers);
36158 + atomic_dec(&pipe->writers);
36159
36160 - while ((pipe->readers > 1) && (!signal_pending(current))) {
36161 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36162 wake_up_interruptible_sync(&pipe->wait);
36163 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36164 pipe_wait(pipe);
36165 }
36166
36167 - pipe->readers--;
36168 - pipe->writers++;
36169 + atomic_dec(&pipe->readers);
36170 + atomic_inc(&pipe->writers);
36171 pipe_unlock(pipe);
36172
36173 }
36174 @@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
36175 int retval = 0;
36176 int flag = 0;
36177 int ispipe;
36178 - static atomic_t core_dump_count = ATOMIC_INIT(0);
36179 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36180 struct coredump_params cprm = {
36181 .signr = signr,
36182 .regs = regs,
36183 @@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
36184
36185 audit_core_dumps(signr);
36186
36187 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36188 + gr_handle_brute_attach(current, cprm.mm_flags);
36189 +
36190 binfmt = mm->binfmt;
36191 if (!binfmt || !binfmt->core_dump)
36192 goto fail;
36193 @@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
36194 goto fail_corename;
36195 }
36196
36197 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36198 +
36199 if (ispipe) {
36200 int dump_count;
36201 char **helper_argv;
36202 @@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
36203 }
36204 cprm.limit = RLIM_INFINITY;
36205
36206 - dump_count = atomic_inc_return(&core_dump_count);
36207 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
36208 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36209 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36210 task_tgid_vnr(current), current->comm);
36211 @@ -2192,7 +2500,7 @@ close_fail:
36212 filp_close(cprm.file, NULL);
36213 fail_dropcount:
36214 if (ispipe)
36215 - atomic_dec(&core_dump_count);
36216 + atomic_dec_unchecked(&core_dump_count);
36217 fail_unlock:
36218 kfree(cn.corename);
36219 fail_corename:
36220 diff -urNp linux-3.0.3/fs/ext2/balloc.c linux-3.0.3/fs/ext2/balloc.c
36221 --- linux-3.0.3/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
36222 +++ linux-3.0.3/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
36223 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36224
36225 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36226 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36227 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36228 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36229 sbi->s_resuid != current_fsuid() &&
36230 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36231 return 0;
36232 diff -urNp linux-3.0.3/fs/ext3/balloc.c linux-3.0.3/fs/ext3/balloc.c
36233 --- linux-3.0.3/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
36234 +++ linux-3.0.3/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
36235 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36236
36237 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36238 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36239 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36240 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36241 sbi->s_resuid != current_fsuid() &&
36242 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36243 return 0;
36244 diff -urNp linux-3.0.3/fs/ext4/balloc.c linux-3.0.3/fs/ext4/balloc.c
36245 --- linux-3.0.3/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
36246 +++ linux-3.0.3/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
36247 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
36248 /* Hm, nope. Are (enough) root reserved blocks available? */
36249 if (sbi->s_resuid == current_fsuid() ||
36250 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36251 - capable(CAP_SYS_RESOURCE) ||
36252 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
36253 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
36254 + capable_nolog(CAP_SYS_RESOURCE)) {
36255
36256 if (free_blocks >= (nblocks + dirty_blocks))
36257 return 1;
36258 diff -urNp linux-3.0.3/fs/ext4/ext4.h linux-3.0.3/fs/ext4/ext4.h
36259 --- linux-3.0.3/fs/ext4/ext4.h 2011-08-23 21:44:40.000000000 -0400
36260 +++ linux-3.0.3/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
36261 @@ -1177,19 +1177,19 @@ struct ext4_sb_info {
36262 unsigned long s_mb_last_start;
36263
36264 /* stats for buddy allocator */
36265 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36266 - atomic_t s_bal_success; /* we found long enough chunks */
36267 - atomic_t s_bal_allocated; /* in blocks */
36268 - atomic_t s_bal_ex_scanned; /* total extents scanned */
36269 - atomic_t s_bal_goals; /* goal hits */
36270 - atomic_t s_bal_breaks; /* too long searches */
36271 - atomic_t s_bal_2orders; /* 2^order hits */
36272 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36273 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36274 + atomic_unchecked_t s_bal_allocated; /* in blocks */
36275 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36276 + atomic_unchecked_t s_bal_goals; /* goal hits */
36277 + atomic_unchecked_t s_bal_breaks; /* too long searches */
36278 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36279 spinlock_t s_bal_lock;
36280 unsigned long s_mb_buddies_generated;
36281 unsigned long long s_mb_generation_time;
36282 - atomic_t s_mb_lost_chunks;
36283 - atomic_t s_mb_preallocated;
36284 - atomic_t s_mb_discarded;
36285 + atomic_unchecked_t s_mb_lost_chunks;
36286 + atomic_unchecked_t s_mb_preallocated;
36287 + atomic_unchecked_t s_mb_discarded;
36288 atomic_t s_lock_busy;
36289
36290 /* locality groups */
36291 diff -urNp linux-3.0.3/fs/ext4/mballoc.c linux-3.0.3/fs/ext4/mballoc.c
36292 --- linux-3.0.3/fs/ext4/mballoc.c 2011-08-23 21:44:40.000000000 -0400
36293 +++ linux-3.0.3/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
36294 @@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
36295 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36296
36297 if (EXT4_SB(sb)->s_mb_stats)
36298 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36299 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36300
36301 break;
36302 }
36303 @@ -2087,7 +2087,7 @@ repeat:
36304 ac->ac_status = AC_STATUS_CONTINUE;
36305 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36306 cr = 3;
36307 - atomic_inc(&sbi->s_mb_lost_chunks);
36308 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36309 goto repeat;
36310 }
36311 }
36312 @@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
36313 ext4_grpblk_t counters[16];
36314 } sg;
36315
36316 + pax_track_stack();
36317 +
36318 group--;
36319 if (group == 0)
36320 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36321 @@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
36322 if (sbi->s_mb_stats) {
36323 printk(KERN_INFO
36324 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36325 - atomic_read(&sbi->s_bal_allocated),
36326 - atomic_read(&sbi->s_bal_reqs),
36327 - atomic_read(&sbi->s_bal_success));
36328 + atomic_read_unchecked(&sbi->s_bal_allocated),
36329 + atomic_read_unchecked(&sbi->s_bal_reqs),
36330 + atomic_read_unchecked(&sbi->s_bal_success));
36331 printk(KERN_INFO
36332 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36333 "%u 2^N hits, %u breaks, %u lost\n",
36334 - atomic_read(&sbi->s_bal_ex_scanned),
36335 - atomic_read(&sbi->s_bal_goals),
36336 - atomic_read(&sbi->s_bal_2orders),
36337 - atomic_read(&sbi->s_bal_breaks),
36338 - atomic_read(&sbi->s_mb_lost_chunks));
36339 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36340 + atomic_read_unchecked(&sbi->s_bal_goals),
36341 + atomic_read_unchecked(&sbi->s_bal_2orders),
36342 + atomic_read_unchecked(&sbi->s_bal_breaks),
36343 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36344 printk(KERN_INFO
36345 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36346 sbi->s_mb_buddies_generated++,
36347 sbi->s_mb_generation_time);
36348 printk(KERN_INFO
36349 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36350 - atomic_read(&sbi->s_mb_preallocated),
36351 - atomic_read(&sbi->s_mb_discarded));
36352 + atomic_read_unchecked(&sbi->s_mb_preallocated),
36353 + atomic_read_unchecked(&sbi->s_mb_discarded));
36354 }
36355
36356 free_percpu(sbi->s_locality_groups);
36357 @@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
36358 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36359
36360 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36361 - atomic_inc(&sbi->s_bal_reqs);
36362 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36363 + atomic_inc_unchecked(&sbi->s_bal_reqs);
36364 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36365 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36366 - atomic_inc(&sbi->s_bal_success);
36367 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36368 + atomic_inc_unchecked(&sbi->s_bal_success);
36369 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36370 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36371 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36372 - atomic_inc(&sbi->s_bal_goals);
36373 + atomic_inc_unchecked(&sbi->s_bal_goals);
36374 if (ac->ac_found > sbi->s_mb_max_to_scan)
36375 - atomic_inc(&sbi->s_bal_breaks);
36376 + atomic_inc_unchecked(&sbi->s_bal_breaks);
36377 }
36378
36379 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36380 @@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36381 trace_ext4_mb_new_inode_pa(ac, pa);
36382
36383 ext4_mb_use_inode_pa(ac, pa);
36384 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36385 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36386
36387 ei = EXT4_I(ac->ac_inode);
36388 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36389 @@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36390 trace_ext4_mb_new_group_pa(ac, pa);
36391
36392 ext4_mb_use_group_pa(ac, pa);
36393 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36394 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36395
36396 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36397 lg = ac->ac_lg;
36398 @@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36399 * from the bitmap and continue.
36400 */
36401 }
36402 - atomic_add(free, &sbi->s_mb_discarded);
36403 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
36404
36405 return err;
36406 }
36407 @@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36408 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36409 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36410 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36411 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36412 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36413 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36414
36415 return 0;
36416 diff -urNp linux-3.0.3/fs/fcntl.c linux-3.0.3/fs/fcntl.c
36417 --- linux-3.0.3/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
36418 +++ linux-3.0.3/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
36419 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36420 if (err)
36421 return err;
36422
36423 + if (gr_handle_chroot_fowner(pid, type))
36424 + return -ENOENT;
36425 + if (gr_check_protected_task_fowner(pid, type))
36426 + return -EACCES;
36427 +
36428 f_modown(filp, pid, type, force);
36429 return 0;
36430 }
36431 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36432 switch (cmd) {
36433 case F_DUPFD:
36434 case F_DUPFD_CLOEXEC:
36435 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36436 if (arg >= rlimit(RLIMIT_NOFILE))
36437 break;
36438 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36439 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36440 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36441 * is defined as O_NONBLOCK on some platforms and not on others.
36442 */
36443 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36444 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36445 O_RDONLY | O_WRONLY | O_RDWR |
36446 O_CREAT | O_EXCL | O_NOCTTY |
36447 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36448 __O_SYNC | O_DSYNC | FASYNC |
36449 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36450 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36451 - __FMODE_EXEC | O_PATH
36452 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
36453 ));
36454
36455 fasync_cache = kmem_cache_create("fasync_cache",
36456 diff -urNp linux-3.0.3/fs/fifo.c linux-3.0.3/fs/fifo.c
36457 --- linux-3.0.3/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
36458 +++ linux-3.0.3/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
36459 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36460 */
36461 filp->f_op = &read_pipefifo_fops;
36462 pipe->r_counter++;
36463 - if (pipe->readers++ == 0)
36464 + if (atomic_inc_return(&pipe->readers) == 1)
36465 wake_up_partner(inode);
36466
36467 - if (!pipe->writers) {
36468 + if (!atomic_read(&pipe->writers)) {
36469 if ((filp->f_flags & O_NONBLOCK)) {
36470 /* suppress POLLHUP until we have
36471 * seen a writer */
36472 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36473 * errno=ENXIO when there is no process reading the FIFO.
36474 */
36475 ret = -ENXIO;
36476 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36477 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36478 goto err;
36479
36480 filp->f_op = &write_pipefifo_fops;
36481 pipe->w_counter++;
36482 - if (!pipe->writers++)
36483 + if (atomic_inc_return(&pipe->writers) == 1)
36484 wake_up_partner(inode);
36485
36486 - if (!pipe->readers) {
36487 + if (!atomic_read(&pipe->readers)) {
36488 wait_for_partner(inode, &pipe->r_counter);
36489 if (signal_pending(current))
36490 goto err_wr;
36491 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36492 */
36493 filp->f_op = &rdwr_pipefifo_fops;
36494
36495 - pipe->readers++;
36496 - pipe->writers++;
36497 + atomic_inc(&pipe->readers);
36498 + atomic_inc(&pipe->writers);
36499 pipe->r_counter++;
36500 pipe->w_counter++;
36501 - if (pipe->readers == 1 || pipe->writers == 1)
36502 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36503 wake_up_partner(inode);
36504 break;
36505
36506 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36507 return 0;
36508
36509 err_rd:
36510 - if (!--pipe->readers)
36511 + if (atomic_dec_and_test(&pipe->readers))
36512 wake_up_interruptible(&pipe->wait);
36513 ret = -ERESTARTSYS;
36514 goto err;
36515
36516 err_wr:
36517 - if (!--pipe->writers)
36518 + if (atomic_dec_and_test(&pipe->writers))
36519 wake_up_interruptible(&pipe->wait);
36520 ret = -ERESTARTSYS;
36521 goto err;
36522
36523 err:
36524 - if (!pipe->readers && !pipe->writers)
36525 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36526 free_pipe_info(inode);
36527
36528 err_nocleanup:
36529 diff -urNp linux-3.0.3/fs/file.c linux-3.0.3/fs/file.c
36530 --- linux-3.0.3/fs/file.c 2011-07-21 22:17:23.000000000 -0400
36531 +++ linux-3.0.3/fs/file.c 2011-08-23 21:48:14.000000000 -0400
36532 @@ -15,6 +15,7 @@
36533 #include <linux/slab.h>
36534 #include <linux/vmalloc.h>
36535 #include <linux/file.h>
36536 +#include <linux/security.h>
36537 #include <linux/fdtable.h>
36538 #include <linux/bitops.h>
36539 #include <linux/interrupt.h>
36540 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36541 * N.B. For clone tasks sharing a files structure, this test
36542 * will limit the total number of files that can be opened.
36543 */
36544 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36545 if (nr >= rlimit(RLIMIT_NOFILE))
36546 return -EMFILE;
36547
36548 diff -urNp linux-3.0.3/fs/filesystems.c linux-3.0.3/fs/filesystems.c
36549 --- linux-3.0.3/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
36550 +++ linux-3.0.3/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
36551 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36552 int len = dot ? dot - name : strlen(name);
36553
36554 fs = __get_fs_type(name, len);
36555 +
36556 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
36557 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36558 +#else
36559 if (!fs && (request_module("%.*s", len, name) == 0))
36560 +#endif
36561 fs = __get_fs_type(name, len);
36562
36563 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36564 diff -urNp linux-3.0.3/fs/fscache/cookie.c linux-3.0.3/fs/fscache/cookie.c
36565 --- linux-3.0.3/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
36566 +++ linux-3.0.3/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
36567 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36568 parent ? (char *) parent->def->name : "<no-parent>",
36569 def->name, netfs_data);
36570
36571 - fscache_stat(&fscache_n_acquires);
36572 + fscache_stat_unchecked(&fscache_n_acquires);
36573
36574 /* if there's no parent cookie, then we don't create one here either */
36575 if (!parent) {
36576 - fscache_stat(&fscache_n_acquires_null);
36577 + fscache_stat_unchecked(&fscache_n_acquires_null);
36578 _leave(" [no parent]");
36579 return NULL;
36580 }
36581 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36582 /* allocate and initialise a cookie */
36583 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36584 if (!cookie) {
36585 - fscache_stat(&fscache_n_acquires_oom);
36586 + fscache_stat_unchecked(&fscache_n_acquires_oom);
36587 _leave(" [ENOMEM]");
36588 return NULL;
36589 }
36590 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36591
36592 switch (cookie->def->type) {
36593 case FSCACHE_COOKIE_TYPE_INDEX:
36594 - fscache_stat(&fscache_n_cookie_index);
36595 + fscache_stat_unchecked(&fscache_n_cookie_index);
36596 break;
36597 case FSCACHE_COOKIE_TYPE_DATAFILE:
36598 - fscache_stat(&fscache_n_cookie_data);
36599 + fscache_stat_unchecked(&fscache_n_cookie_data);
36600 break;
36601 default:
36602 - fscache_stat(&fscache_n_cookie_special);
36603 + fscache_stat_unchecked(&fscache_n_cookie_special);
36604 break;
36605 }
36606
36607 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36608 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36609 atomic_dec(&parent->n_children);
36610 __fscache_cookie_put(cookie);
36611 - fscache_stat(&fscache_n_acquires_nobufs);
36612 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36613 _leave(" = NULL");
36614 return NULL;
36615 }
36616 }
36617
36618 - fscache_stat(&fscache_n_acquires_ok);
36619 + fscache_stat_unchecked(&fscache_n_acquires_ok);
36620 _leave(" = %p", cookie);
36621 return cookie;
36622 }
36623 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36624 cache = fscache_select_cache_for_object(cookie->parent);
36625 if (!cache) {
36626 up_read(&fscache_addremove_sem);
36627 - fscache_stat(&fscache_n_acquires_no_cache);
36628 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36629 _leave(" = -ENOMEDIUM [no cache]");
36630 return -ENOMEDIUM;
36631 }
36632 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36633 object = cache->ops->alloc_object(cache, cookie);
36634 fscache_stat_d(&fscache_n_cop_alloc_object);
36635 if (IS_ERR(object)) {
36636 - fscache_stat(&fscache_n_object_no_alloc);
36637 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
36638 ret = PTR_ERR(object);
36639 goto error;
36640 }
36641
36642 - fscache_stat(&fscache_n_object_alloc);
36643 + fscache_stat_unchecked(&fscache_n_object_alloc);
36644
36645 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36646
36647 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36648 struct fscache_object *object;
36649 struct hlist_node *_p;
36650
36651 - fscache_stat(&fscache_n_updates);
36652 + fscache_stat_unchecked(&fscache_n_updates);
36653
36654 if (!cookie) {
36655 - fscache_stat(&fscache_n_updates_null);
36656 + fscache_stat_unchecked(&fscache_n_updates_null);
36657 _leave(" [no cookie]");
36658 return;
36659 }
36660 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36661 struct fscache_object *object;
36662 unsigned long event;
36663
36664 - fscache_stat(&fscache_n_relinquishes);
36665 + fscache_stat_unchecked(&fscache_n_relinquishes);
36666 if (retire)
36667 - fscache_stat(&fscache_n_relinquishes_retire);
36668 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36669
36670 if (!cookie) {
36671 - fscache_stat(&fscache_n_relinquishes_null);
36672 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
36673 _leave(" [no cookie]");
36674 return;
36675 }
36676 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36677
36678 /* wait for the cookie to finish being instantiated (or to fail) */
36679 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36680 - fscache_stat(&fscache_n_relinquishes_waitcrt);
36681 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36682 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36683 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36684 }
36685 diff -urNp linux-3.0.3/fs/fscache/internal.h linux-3.0.3/fs/fscache/internal.h
36686 --- linux-3.0.3/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
36687 +++ linux-3.0.3/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
36688 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36689 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36690 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36691
36692 -extern atomic_t fscache_n_op_pend;
36693 -extern atomic_t fscache_n_op_run;
36694 -extern atomic_t fscache_n_op_enqueue;
36695 -extern atomic_t fscache_n_op_deferred_release;
36696 -extern atomic_t fscache_n_op_release;
36697 -extern atomic_t fscache_n_op_gc;
36698 -extern atomic_t fscache_n_op_cancelled;
36699 -extern atomic_t fscache_n_op_rejected;
36700 -
36701 -extern atomic_t fscache_n_attr_changed;
36702 -extern atomic_t fscache_n_attr_changed_ok;
36703 -extern atomic_t fscache_n_attr_changed_nobufs;
36704 -extern atomic_t fscache_n_attr_changed_nomem;
36705 -extern atomic_t fscache_n_attr_changed_calls;
36706 -
36707 -extern atomic_t fscache_n_allocs;
36708 -extern atomic_t fscache_n_allocs_ok;
36709 -extern atomic_t fscache_n_allocs_wait;
36710 -extern atomic_t fscache_n_allocs_nobufs;
36711 -extern atomic_t fscache_n_allocs_intr;
36712 -extern atomic_t fscache_n_allocs_object_dead;
36713 -extern atomic_t fscache_n_alloc_ops;
36714 -extern atomic_t fscache_n_alloc_op_waits;
36715 -
36716 -extern atomic_t fscache_n_retrievals;
36717 -extern atomic_t fscache_n_retrievals_ok;
36718 -extern atomic_t fscache_n_retrievals_wait;
36719 -extern atomic_t fscache_n_retrievals_nodata;
36720 -extern atomic_t fscache_n_retrievals_nobufs;
36721 -extern atomic_t fscache_n_retrievals_intr;
36722 -extern atomic_t fscache_n_retrievals_nomem;
36723 -extern atomic_t fscache_n_retrievals_object_dead;
36724 -extern atomic_t fscache_n_retrieval_ops;
36725 -extern atomic_t fscache_n_retrieval_op_waits;
36726 -
36727 -extern atomic_t fscache_n_stores;
36728 -extern atomic_t fscache_n_stores_ok;
36729 -extern atomic_t fscache_n_stores_again;
36730 -extern atomic_t fscache_n_stores_nobufs;
36731 -extern atomic_t fscache_n_stores_oom;
36732 -extern atomic_t fscache_n_store_ops;
36733 -extern atomic_t fscache_n_store_calls;
36734 -extern atomic_t fscache_n_store_pages;
36735 -extern atomic_t fscache_n_store_radix_deletes;
36736 -extern atomic_t fscache_n_store_pages_over_limit;
36737 -
36738 -extern atomic_t fscache_n_store_vmscan_not_storing;
36739 -extern atomic_t fscache_n_store_vmscan_gone;
36740 -extern atomic_t fscache_n_store_vmscan_busy;
36741 -extern atomic_t fscache_n_store_vmscan_cancelled;
36742 -
36743 -extern atomic_t fscache_n_marks;
36744 -extern atomic_t fscache_n_uncaches;
36745 -
36746 -extern atomic_t fscache_n_acquires;
36747 -extern atomic_t fscache_n_acquires_null;
36748 -extern atomic_t fscache_n_acquires_no_cache;
36749 -extern atomic_t fscache_n_acquires_ok;
36750 -extern atomic_t fscache_n_acquires_nobufs;
36751 -extern atomic_t fscache_n_acquires_oom;
36752 -
36753 -extern atomic_t fscache_n_updates;
36754 -extern atomic_t fscache_n_updates_null;
36755 -extern atomic_t fscache_n_updates_run;
36756 -
36757 -extern atomic_t fscache_n_relinquishes;
36758 -extern atomic_t fscache_n_relinquishes_null;
36759 -extern atomic_t fscache_n_relinquishes_waitcrt;
36760 -extern atomic_t fscache_n_relinquishes_retire;
36761 -
36762 -extern atomic_t fscache_n_cookie_index;
36763 -extern atomic_t fscache_n_cookie_data;
36764 -extern atomic_t fscache_n_cookie_special;
36765 -
36766 -extern atomic_t fscache_n_object_alloc;
36767 -extern atomic_t fscache_n_object_no_alloc;
36768 -extern atomic_t fscache_n_object_lookups;
36769 -extern atomic_t fscache_n_object_lookups_negative;
36770 -extern atomic_t fscache_n_object_lookups_positive;
36771 -extern atomic_t fscache_n_object_lookups_timed_out;
36772 -extern atomic_t fscache_n_object_created;
36773 -extern atomic_t fscache_n_object_avail;
36774 -extern atomic_t fscache_n_object_dead;
36775 -
36776 -extern atomic_t fscache_n_checkaux_none;
36777 -extern atomic_t fscache_n_checkaux_okay;
36778 -extern atomic_t fscache_n_checkaux_update;
36779 -extern atomic_t fscache_n_checkaux_obsolete;
36780 +extern atomic_unchecked_t fscache_n_op_pend;
36781 +extern atomic_unchecked_t fscache_n_op_run;
36782 +extern atomic_unchecked_t fscache_n_op_enqueue;
36783 +extern atomic_unchecked_t fscache_n_op_deferred_release;
36784 +extern atomic_unchecked_t fscache_n_op_release;
36785 +extern atomic_unchecked_t fscache_n_op_gc;
36786 +extern atomic_unchecked_t fscache_n_op_cancelled;
36787 +extern atomic_unchecked_t fscache_n_op_rejected;
36788 +
36789 +extern atomic_unchecked_t fscache_n_attr_changed;
36790 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
36791 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36792 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36793 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
36794 +
36795 +extern atomic_unchecked_t fscache_n_allocs;
36796 +extern atomic_unchecked_t fscache_n_allocs_ok;
36797 +extern atomic_unchecked_t fscache_n_allocs_wait;
36798 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
36799 +extern atomic_unchecked_t fscache_n_allocs_intr;
36800 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
36801 +extern atomic_unchecked_t fscache_n_alloc_ops;
36802 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
36803 +
36804 +extern atomic_unchecked_t fscache_n_retrievals;
36805 +extern atomic_unchecked_t fscache_n_retrievals_ok;
36806 +extern atomic_unchecked_t fscache_n_retrievals_wait;
36807 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
36808 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36809 +extern atomic_unchecked_t fscache_n_retrievals_intr;
36810 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
36811 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36812 +extern atomic_unchecked_t fscache_n_retrieval_ops;
36813 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36814 +
36815 +extern atomic_unchecked_t fscache_n_stores;
36816 +extern atomic_unchecked_t fscache_n_stores_ok;
36817 +extern atomic_unchecked_t fscache_n_stores_again;
36818 +extern atomic_unchecked_t fscache_n_stores_nobufs;
36819 +extern atomic_unchecked_t fscache_n_stores_oom;
36820 +extern atomic_unchecked_t fscache_n_store_ops;
36821 +extern atomic_unchecked_t fscache_n_store_calls;
36822 +extern atomic_unchecked_t fscache_n_store_pages;
36823 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
36824 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36825 +
36826 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36827 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36828 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36829 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36830 +
36831 +extern atomic_unchecked_t fscache_n_marks;
36832 +extern atomic_unchecked_t fscache_n_uncaches;
36833 +
36834 +extern atomic_unchecked_t fscache_n_acquires;
36835 +extern atomic_unchecked_t fscache_n_acquires_null;
36836 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
36837 +extern atomic_unchecked_t fscache_n_acquires_ok;
36838 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
36839 +extern atomic_unchecked_t fscache_n_acquires_oom;
36840 +
36841 +extern atomic_unchecked_t fscache_n_updates;
36842 +extern atomic_unchecked_t fscache_n_updates_null;
36843 +extern atomic_unchecked_t fscache_n_updates_run;
36844 +
36845 +extern atomic_unchecked_t fscache_n_relinquishes;
36846 +extern atomic_unchecked_t fscache_n_relinquishes_null;
36847 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36848 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
36849 +
36850 +extern atomic_unchecked_t fscache_n_cookie_index;
36851 +extern atomic_unchecked_t fscache_n_cookie_data;
36852 +extern atomic_unchecked_t fscache_n_cookie_special;
36853 +
36854 +extern atomic_unchecked_t fscache_n_object_alloc;
36855 +extern atomic_unchecked_t fscache_n_object_no_alloc;
36856 +extern atomic_unchecked_t fscache_n_object_lookups;
36857 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
36858 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
36859 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36860 +extern atomic_unchecked_t fscache_n_object_created;
36861 +extern atomic_unchecked_t fscache_n_object_avail;
36862 +extern atomic_unchecked_t fscache_n_object_dead;
36863 +
36864 +extern atomic_unchecked_t fscache_n_checkaux_none;
36865 +extern atomic_unchecked_t fscache_n_checkaux_okay;
36866 +extern atomic_unchecked_t fscache_n_checkaux_update;
36867 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36868
36869 extern atomic_t fscache_n_cop_alloc_object;
36870 extern atomic_t fscache_n_cop_lookup_object;
36871 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36872 atomic_inc(stat);
36873 }
36874
36875 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36876 +{
36877 + atomic_inc_unchecked(stat);
36878 +}
36879 +
36880 static inline void fscache_stat_d(atomic_t *stat)
36881 {
36882 atomic_dec(stat);
36883 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
36884
36885 #define __fscache_stat(stat) (NULL)
36886 #define fscache_stat(stat) do {} while (0)
36887 +#define fscache_stat_unchecked(stat) do {} while (0)
36888 #define fscache_stat_d(stat) do {} while (0)
36889 #endif
36890
36891 diff -urNp linux-3.0.3/fs/fscache/object.c linux-3.0.3/fs/fscache/object.c
36892 --- linux-3.0.3/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
36893 +++ linux-3.0.3/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
36894 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
36895 /* update the object metadata on disk */
36896 case FSCACHE_OBJECT_UPDATING:
36897 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36898 - fscache_stat(&fscache_n_updates_run);
36899 + fscache_stat_unchecked(&fscache_n_updates_run);
36900 fscache_stat(&fscache_n_cop_update_object);
36901 object->cache->ops->update_object(object);
36902 fscache_stat_d(&fscache_n_cop_update_object);
36903 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
36904 spin_lock(&object->lock);
36905 object->state = FSCACHE_OBJECT_DEAD;
36906 spin_unlock(&object->lock);
36907 - fscache_stat(&fscache_n_object_dead);
36908 + fscache_stat_unchecked(&fscache_n_object_dead);
36909 goto terminal_transit;
36910
36911 /* handle the parent cache of this object being withdrawn from
36912 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
36913 spin_lock(&object->lock);
36914 object->state = FSCACHE_OBJECT_DEAD;
36915 spin_unlock(&object->lock);
36916 - fscache_stat(&fscache_n_object_dead);
36917 + fscache_stat_unchecked(&fscache_n_object_dead);
36918 goto terminal_transit;
36919
36920 /* complain about the object being woken up once it is
36921 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
36922 parent->cookie->def->name, cookie->def->name,
36923 object->cache->tag->name);
36924
36925 - fscache_stat(&fscache_n_object_lookups);
36926 + fscache_stat_unchecked(&fscache_n_object_lookups);
36927 fscache_stat(&fscache_n_cop_lookup_object);
36928 ret = object->cache->ops->lookup_object(object);
36929 fscache_stat_d(&fscache_n_cop_lookup_object);
36930 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
36931 if (ret == -ETIMEDOUT) {
36932 /* probably stuck behind another object, so move this one to
36933 * the back of the queue */
36934 - fscache_stat(&fscache_n_object_lookups_timed_out);
36935 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
36936 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36937 }
36938
36939 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
36940
36941 spin_lock(&object->lock);
36942 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36943 - fscache_stat(&fscache_n_object_lookups_negative);
36944 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
36945
36946 /* transit here to allow write requests to begin stacking up
36947 * and read requests to begin returning ENODATA */
36948 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
36949 * result, in which case there may be data available */
36950 spin_lock(&object->lock);
36951 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36952 - fscache_stat(&fscache_n_object_lookups_positive);
36953 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
36954
36955 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
36956
36957 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
36958 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36959 } else {
36960 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
36961 - fscache_stat(&fscache_n_object_created);
36962 + fscache_stat_unchecked(&fscache_n_object_created);
36963
36964 object->state = FSCACHE_OBJECT_AVAILABLE;
36965 spin_unlock(&object->lock);
36966 @@ -602,7 +602,7 @@ static void fscache_object_available(str
36967 fscache_enqueue_dependents(object);
36968
36969 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
36970 - fscache_stat(&fscache_n_object_avail);
36971 + fscache_stat_unchecked(&fscache_n_object_avail);
36972
36973 _leave("");
36974 }
36975 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
36976 enum fscache_checkaux result;
36977
36978 if (!object->cookie->def->check_aux) {
36979 - fscache_stat(&fscache_n_checkaux_none);
36980 + fscache_stat_unchecked(&fscache_n_checkaux_none);
36981 return FSCACHE_CHECKAUX_OKAY;
36982 }
36983
36984 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
36985 switch (result) {
36986 /* entry okay as is */
36987 case FSCACHE_CHECKAUX_OKAY:
36988 - fscache_stat(&fscache_n_checkaux_okay);
36989 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
36990 break;
36991
36992 /* entry requires update */
36993 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
36994 - fscache_stat(&fscache_n_checkaux_update);
36995 + fscache_stat_unchecked(&fscache_n_checkaux_update);
36996 break;
36997
36998 /* entry requires deletion */
36999 case FSCACHE_CHECKAUX_OBSOLETE:
37000 - fscache_stat(&fscache_n_checkaux_obsolete);
37001 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
37002 break;
37003
37004 default:
37005 diff -urNp linux-3.0.3/fs/fscache/operation.c linux-3.0.3/fs/fscache/operation.c
37006 --- linux-3.0.3/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
37007 +++ linux-3.0.3/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
37008 @@ -17,7 +17,7 @@
37009 #include <linux/slab.h>
37010 #include "internal.h"
37011
37012 -atomic_t fscache_op_debug_id;
37013 +atomic_unchecked_t fscache_op_debug_id;
37014 EXPORT_SYMBOL(fscache_op_debug_id);
37015
37016 /**
37017 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
37018 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
37019 ASSERTCMP(atomic_read(&op->usage), >, 0);
37020
37021 - fscache_stat(&fscache_n_op_enqueue);
37022 + fscache_stat_unchecked(&fscache_n_op_enqueue);
37023 switch (op->flags & FSCACHE_OP_TYPE) {
37024 case FSCACHE_OP_ASYNC:
37025 _debug("queue async");
37026 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
37027 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
37028 if (op->processor)
37029 fscache_enqueue_operation(op);
37030 - fscache_stat(&fscache_n_op_run);
37031 + fscache_stat_unchecked(&fscache_n_op_run);
37032 }
37033
37034 /*
37035 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
37036 if (object->n_ops > 1) {
37037 atomic_inc(&op->usage);
37038 list_add_tail(&op->pend_link, &object->pending_ops);
37039 - fscache_stat(&fscache_n_op_pend);
37040 + fscache_stat_unchecked(&fscache_n_op_pend);
37041 } else if (!list_empty(&object->pending_ops)) {
37042 atomic_inc(&op->usage);
37043 list_add_tail(&op->pend_link, &object->pending_ops);
37044 - fscache_stat(&fscache_n_op_pend);
37045 + fscache_stat_unchecked(&fscache_n_op_pend);
37046 fscache_start_operations(object);
37047 } else {
37048 ASSERTCMP(object->n_in_progress, ==, 0);
37049 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
37050 object->n_exclusive++; /* reads and writes must wait */
37051 atomic_inc(&op->usage);
37052 list_add_tail(&op->pend_link, &object->pending_ops);
37053 - fscache_stat(&fscache_n_op_pend);
37054 + fscache_stat_unchecked(&fscache_n_op_pend);
37055 ret = 0;
37056 } else {
37057 /* not allowed to submit ops in any other state */
37058 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
37059 if (object->n_exclusive > 0) {
37060 atomic_inc(&op->usage);
37061 list_add_tail(&op->pend_link, &object->pending_ops);
37062 - fscache_stat(&fscache_n_op_pend);
37063 + fscache_stat_unchecked(&fscache_n_op_pend);
37064 } else if (!list_empty(&object->pending_ops)) {
37065 atomic_inc(&op->usage);
37066 list_add_tail(&op->pend_link, &object->pending_ops);
37067 - fscache_stat(&fscache_n_op_pend);
37068 + fscache_stat_unchecked(&fscache_n_op_pend);
37069 fscache_start_operations(object);
37070 } else {
37071 ASSERTCMP(object->n_exclusive, ==, 0);
37072 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
37073 object->n_ops++;
37074 atomic_inc(&op->usage);
37075 list_add_tail(&op->pend_link, &object->pending_ops);
37076 - fscache_stat(&fscache_n_op_pend);
37077 + fscache_stat_unchecked(&fscache_n_op_pend);
37078 ret = 0;
37079 } else if (object->state == FSCACHE_OBJECT_DYING ||
37080 object->state == FSCACHE_OBJECT_LC_DYING ||
37081 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37082 - fscache_stat(&fscache_n_op_rejected);
37083 + fscache_stat_unchecked(&fscache_n_op_rejected);
37084 ret = -ENOBUFS;
37085 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37086 fscache_report_unexpected_submission(object, op, ostate);
37087 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
37088
37089 ret = -EBUSY;
37090 if (!list_empty(&op->pend_link)) {
37091 - fscache_stat(&fscache_n_op_cancelled);
37092 + fscache_stat_unchecked(&fscache_n_op_cancelled);
37093 list_del_init(&op->pend_link);
37094 object->n_ops--;
37095 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37096 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
37097 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37098 BUG();
37099
37100 - fscache_stat(&fscache_n_op_release);
37101 + fscache_stat_unchecked(&fscache_n_op_release);
37102
37103 if (op->release) {
37104 op->release(op);
37105 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
37106 * lock, and defer it otherwise */
37107 if (!spin_trylock(&object->lock)) {
37108 _debug("defer put");
37109 - fscache_stat(&fscache_n_op_deferred_release);
37110 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
37111
37112 cache = object->cache;
37113 spin_lock(&cache->op_gc_list_lock);
37114 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
37115
37116 _debug("GC DEFERRED REL OBJ%x OP%x",
37117 object->debug_id, op->debug_id);
37118 - fscache_stat(&fscache_n_op_gc);
37119 + fscache_stat_unchecked(&fscache_n_op_gc);
37120
37121 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37122
37123 diff -urNp linux-3.0.3/fs/fscache/page.c linux-3.0.3/fs/fscache/page.c
37124 --- linux-3.0.3/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
37125 +++ linux-3.0.3/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
37126 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37127 val = radix_tree_lookup(&cookie->stores, page->index);
37128 if (!val) {
37129 rcu_read_unlock();
37130 - fscache_stat(&fscache_n_store_vmscan_not_storing);
37131 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37132 __fscache_uncache_page(cookie, page);
37133 return true;
37134 }
37135 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37136 spin_unlock(&cookie->stores_lock);
37137
37138 if (xpage) {
37139 - fscache_stat(&fscache_n_store_vmscan_cancelled);
37140 - fscache_stat(&fscache_n_store_radix_deletes);
37141 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37142 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37143 ASSERTCMP(xpage, ==, page);
37144 } else {
37145 - fscache_stat(&fscache_n_store_vmscan_gone);
37146 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37147 }
37148
37149 wake_up_bit(&cookie->flags, 0);
37150 @@ -107,7 +107,7 @@ page_busy:
37151 /* we might want to wait here, but that could deadlock the allocator as
37152 * the work threads writing to the cache may all end up sleeping
37153 * on memory allocation */
37154 - fscache_stat(&fscache_n_store_vmscan_busy);
37155 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37156 return false;
37157 }
37158 EXPORT_SYMBOL(__fscache_maybe_release_page);
37159 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37160 FSCACHE_COOKIE_STORING_TAG);
37161 if (!radix_tree_tag_get(&cookie->stores, page->index,
37162 FSCACHE_COOKIE_PENDING_TAG)) {
37163 - fscache_stat(&fscache_n_store_radix_deletes);
37164 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37165 xpage = radix_tree_delete(&cookie->stores, page->index);
37166 }
37167 spin_unlock(&cookie->stores_lock);
37168 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37169
37170 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37171
37172 - fscache_stat(&fscache_n_attr_changed_calls);
37173 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37174
37175 if (fscache_object_is_active(object)) {
37176 fscache_stat(&fscache_n_cop_attr_changed);
37177 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
37178
37179 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37180
37181 - fscache_stat(&fscache_n_attr_changed);
37182 + fscache_stat_unchecked(&fscache_n_attr_changed);
37183
37184 op = kzalloc(sizeof(*op), GFP_KERNEL);
37185 if (!op) {
37186 - fscache_stat(&fscache_n_attr_changed_nomem);
37187 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37188 _leave(" = -ENOMEM");
37189 return -ENOMEM;
37190 }
37191 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
37192 if (fscache_submit_exclusive_op(object, op) < 0)
37193 goto nobufs;
37194 spin_unlock(&cookie->lock);
37195 - fscache_stat(&fscache_n_attr_changed_ok);
37196 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37197 fscache_put_operation(op);
37198 _leave(" = 0");
37199 return 0;
37200 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
37201 nobufs:
37202 spin_unlock(&cookie->lock);
37203 kfree(op);
37204 - fscache_stat(&fscache_n_attr_changed_nobufs);
37205 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37206 _leave(" = %d", -ENOBUFS);
37207 return -ENOBUFS;
37208 }
37209 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
37210 /* allocate a retrieval operation and attempt to submit it */
37211 op = kzalloc(sizeof(*op), GFP_NOIO);
37212 if (!op) {
37213 - fscache_stat(&fscache_n_retrievals_nomem);
37214 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37215 return NULL;
37216 }
37217
37218 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
37219 return 0;
37220 }
37221
37222 - fscache_stat(&fscache_n_retrievals_wait);
37223 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
37224
37225 jif = jiffies;
37226 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37227 fscache_wait_bit_interruptible,
37228 TASK_INTERRUPTIBLE) != 0) {
37229 - fscache_stat(&fscache_n_retrievals_intr);
37230 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37231 _leave(" = -ERESTARTSYS");
37232 return -ERESTARTSYS;
37233 }
37234 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
37235 */
37236 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37237 struct fscache_retrieval *op,
37238 - atomic_t *stat_op_waits,
37239 - atomic_t *stat_object_dead)
37240 + atomic_unchecked_t *stat_op_waits,
37241 + atomic_unchecked_t *stat_object_dead)
37242 {
37243 int ret;
37244
37245 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
37246 goto check_if_dead;
37247
37248 _debug(">>> WT");
37249 - fscache_stat(stat_op_waits);
37250 + fscache_stat_unchecked(stat_op_waits);
37251 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37252 fscache_wait_bit_interruptible,
37253 TASK_INTERRUPTIBLE) < 0) {
37254 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
37255
37256 check_if_dead:
37257 if (unlikely(fscache_object_is_dead(object))) {
37258 - fscache_stat(stat_object_dead);
37259 + fscache_stat_unchecked(stat_object_dead);
37260 return -ENOBUFS;
37261 }
37262 return 0;
37263 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
37264
37265 _enter("%p,%p,,,", cookie, page);
37266
37267 - fscache_stat(&fscache_n_retrievals);
37268 + fscache_stat_unchecked(&fscache_n_retrievals);
37269
37270 if (hlist_empty(&cookie->backing_objects))
37271 goto nobufs;
37272 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
37273 goto nobufs_unlock;
37274 spin_unlock(&cookie->lock);
37275
37276 - fscache_stat(&fscache_n_retrieval_ops);
37277 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37278
37279 /* pin the netfs read context in case we need to do the actual netfs
37280 * read because we've encountered a cache read failure */
37281 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
37282
37283 error:
37284 if (ret == -ENOMEM)
37285 - fscache_stat(&fscache_n_retrievals_nomem);
37286 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37287 else if (ret == -ERESTARTSYS)
37288 - fscache_stat(&fscache_n_retrievals_intr);
37289 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37290 else if (ret == -ENODATA)
37291 - fscache_stat(&fscache_n_retrievals_nodata);
37292 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37293 else if (ret < 0)
37294 - fscache_stat(&fscache_n_retrievals_nobufs);
37295 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37296 else
37297 - fscache_stat(&fscache_n_retrievals_ok);
37298 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37299
37300 fscache_put_retrieval(op);
37301 _leave(" = %d", ret);
37302 @@ -429,7 +429,7 @@ nobufs_unlock:
37303 spin_unlock(&cookie->lock);
37304 kfree(op);
37305 nobufs:
37306 - fscache_stat(&fscache_n_retrievals_nobufs);
37307 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37308 _leave(" = -ENOBUFS");
37309 return -ENOBUFS;
37310 }
37311 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
37312
37313 _enter("%p,,%d,,,", cookie, *nr_pages);
37314
37315 - fscache_stat(&fscache_n_retrievals);
37316 + fscache_stat_unchecked(&fscache_n_retrievals);
37317
37318 if (hlist_empty(&cookie->backing_objects))
37319 goto nobufs;
37320 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
37321 goto nobufs_unlock;
37322 spin_unlock(&cookie->lock);
37323
37324 - fscache_stat(&fscache_n_retrieval_ops);
37325 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37326
37327 /* pin the netfs read context in case we need to do the actual netfs
37328 * read because we've encountered a cache read failure */
37329 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
37330
37331 error:
37332 if (ret == -ENOMEM)
37333 - fscache_stat(&fscache_n_retrievals_nomem);
37334 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37335 else if (ret == -ERESTARTSYS)
37336 - fscache_stat(&fscache_n_retrievals_intr);
37337 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37338 else if (ret == -ENODATA)
37339 - fscache_stat(&fscache_n_retrievals_nodata);
37340 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37341 else if (ret < 0)
37342 - fscache_stat(&fscache_n_retrievals_nobufs);
37343 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37344 else
37345 - fscache_stat(&fscache_n_retrievals_ok);
37346 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37347
37348 fscache_put_retrieval(op);
37349 _leave(" = %d", ret);
37350 @@ -545,7 +545,7 @@ nobufs_unlock:
37351 spin_unlock(&cookie->lock);
37352 kfree(op);
37353 nobufs:
37354 - fscache_stat(&fscache_n_retrievals_nobufs);
37355 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37356 _leave(" = -ENOBUFS");
37357 return -ENOBUFS;
37358 }
37359 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
37360
37361 _enter("%p,%p,,,", cookie, page);
37362
37363 - fscache_stat(&fscache_n_allocs);
37364 + fscache_stat_unchecked(&fscache_n_allocs);
37365
37366 if (hlist_empty(&cookie->backing_objects))
37367 goto nobufs;
37368 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
37369 goto nobufs_unlock;
37370 spin_unlock(&cookie->lock);
37371
37372 - fscache_stat(&fscache_n_alloc_ops);
37373 + fscache_stat_unchecked(&fscache_n_alloc_ops);
37374
37375 ret = fscache_wait_for_retrieval_activation(
37376 object, op,
37377 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
37378
37379 error:
37380 if (ret == -ERESTARTSYS)
37381 - fscache_stat(&fscache_n_allocs_intr);
37382 + fscache_stat_unchecked(&fscache_n_allocs_intr);
37383 else if (ret < 0)
37384 - fscache_stat(&fscache_n_allocs_nobufs);
37385 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37386 else
37387 - fscache_stat(&fscache_n_allocs_ok);
37388 + fscache_stat_unchecked(&fscache_n_allocs_ok);
37389
37390 fscache_put_retrieval(op);
37391 _leave(" = %d", ret);
37392 @@ -625,7 +625,7 @@ nobufs_unlock:
37393 spin_unlock(&cookie->lock);
37394 kfree(op);
37395 nobufs:
37396 - fscache_stat(&fscache_n_allocs_nobufs);
37397 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37398 _leave(" = -ENOBUFS");
37399 return -ENOBUFS;
37400 }
37401 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
37402
37403 spin_lock(&cookie->stores_lock);
37404
37405 - fscache_stat(&fscache_n_store_calls);
37406 + fscache_stat_unchecked(&fscache_n_store_calls);
37407
37408 /* find a page to store */
37409 page = NULL;
37410 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
37411 page = results[0];
37412 _debug("gang %d [%lx]", n, page->index);
37413 if (page->index > op->store_limit) {
37414 - fscache_stat(&fscache_n_store_pages_over_limit);
37415 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37416 goto superseded;
37417 }
37418
37419 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
37420 spin_unlock(&cookie->stores_lock);
37421 spin_unlock(&object->lock);
37422
37423 - fscache_stat(&fscache_n_store_pages);
37424 + fscache_stat_unchecked(&fscache_n_store_pages);
37425 fscache_stat(&fscache_n_cop_write_page);
37426 ret = object->cache->ops->write_page(op, page);
37427 fscache_stat_d(&fscache_n_cop_write_page);
37428 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
37429 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37430 ASSERT(PageFsCache(page));
37431
37432 - fscache_stat(&fscache_n_stores);
37433 + fscache_stat_unchecked(&fscache_n_stores);
37434
37435 op = kzalloc(sizeof(*op), GFP_NOIO);
37436 if (!op)
37437 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
37438 spin_unlock(&cookie->stores_lock);
37439 spin_unlock(&object->lock);
37440
37441 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37442 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37443 op->store_limit = object->store_limit;
37444
37445 if (fscache_submit_op(object, &op->op) < 0)
37446 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
37447
37448 spin_unlock(&cookie->lock);
37449 radix_tree_preload_end();
37450 - fscache_stat(&fscache_n_store_ops);
37451 - fscache_stat(&fscache_n_stores_ok);
37452 + fscache_stat_unchecked(&fscache_n_store_ops);
37453 + fscache_stat_unchecked(&fscache_n_stores_ok);
37454
37455 /* the work queue now carries its own ref on the object */
37456 fscache_put_operation(&op->op);
37457 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
37458 return 0;
37459
37460 already_queued:
37461 - fscache_stat(&fscache_n_stores_again);
37462 + fscache_stat_unchecked(&fscache_n_stores_again);
37463 already_pending:
37464 spin_unlock(&cookie->stores_lock);
37465 spin_unlock(&object->lock);
37466 spin_unlock(&cookie->lock);
37467 radix_tree_preload_end();
37468 kfree(op);
37469 - fscache_stat(&fscache_n_stores_ok);
37470 + fscache_stat_unchecked(&fscache_n_stores_ok);
37471 _leave(" = 0");
37472 return 0;
37473
37474 @@ -851,14 +851,14 @@ nobufs:
37475 spin_unlock(&cookie->lock);
37476 radix_tree_preload_end();
37477 kfree(op);
37478 - fscache_stat(&fscache_n_stores_nobufs);
37479 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
37480 _leave(" = -ENOBUFS");
37481 return -ENOBUFS;
37482
37483 nomem_free:
37484 kfree(op);
37485 nomem:
37486 - fscache_stat(&fscache_n_stores_oom);
37487 + fscache_stat_unchecked(&fscache_n_stores_oom);
37488 _leave(" = -ENOMEM");
37489 return -ENOMEM;
37490 }
37491 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
37492 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37493 ASSERTCMP(page, !=, NULL);
37494
37495 - fscache_stat(&fscache_n_uncaches);
37496 + fscache_stat_unchecked(&fscache_n_uncaches);
37497
37498 /* cache withdrawal may beat us to it */
37499 if (!PageFsCache(page))
37500 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
37501 unsigned long loop;
37502
37503 #ifdef CONFIG_FSCACHE_STATS
37504 - atomic_add(pagevec->nr, &fscache_n_marks);
37505 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37506 #endif
37507
37508 for (loop = 0; loop < pagevec->nr; loop++) {
37509 diff -urNp linux-3.0.3/fs/fscache/stats.c linux-3.0.3/fs/fscache/stats.c
37510 --- linux-3.0.3/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
37511 +++ linux-3.0.3/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
37512 @@ -18,95 +18,95 @@
37513 /*
37514 * operation counters
37515 */
37516 -atomic_t fscache_n_op_pend;
37517 -atomic_t fscache_n_op_run;
37518 -atomic_t fscache_n_op_enqueue;
37519 -atomic_t fscache_n_op_requeue;
37520 -atomic_t fscache_n_op_deferred_release;
37521 -atomic_t fscache_n_op_release;
37522 -atomic_t fscache_n_op_gc;
37523 -atomic_t fscache_n_op_cancelled;
37524 -atomic_t fscache_n_op_rejected;
37525 -
37526 -atomic_t fscache_n_attr_changed;
37527 -atomic_t fscache_n_attr_changed_ok;
37528 -atomic_t fscache_n_attr_changed_nobufs;
37529 -atomic_t fscache_n_attr_changed_nomem;
37530 -atomic_t fscache_n_attr_changed_calls;
37531 -
37532 -atomic_t fscache_n_allocs;
37533 -atomic_t fscache_n_allocs_ok;
37534 -atomic_t fscache_n_allocs_wait;
37535 -atomic_t fscache_n_allocs_nobufs;
37536 -atomic_t fscache_n_allocs_intr;
37537 -atomic_t fscache_n_allocs_object_dead;
37538 -atomic_t fscache_n_alloc_ops;
37539 -atomic_t fscache_n_alloc_op_waits;
37540 -
37541 -atomic_t fscache_n_retrievals;
37542 -atomic_t fscache_n_retrievals_ok;
37543 -atomic_t fscache_n_retrievals_wait;
37544 -atomic_t fscache_n_retrievals_nodata;
37545 -atomic_t fscache_n_retrievals_nobufs;
37546 -atomic_t fscache_n_retrievals_intr;
37547 -atomic_t fscache_n_retrievals_nomem;
37548 -atomic_t fscache_n_retrievals_object_dead;
37549 -atomic_t fscache_n_retrieval_ops;
37550 -atomic_t fscache_n_retrieval_op_waits;
37551 -
37552 -atomic_t fscache_n_stores;
37553 -atomic_t fscache_n_stores_ok;
37554 -atomic_t fscache_n_stores_again;
37555 -atomic_t fscache_n_stores_nobufs;
37556 -atomic_t fscache_n_stores_oom;
37557 -atomic_t fscache_n_store_ops;
37558 -atomic_t fscache_n_store_calls;
37559 -atomic_t fscache_n_store_pages;
37560 -atomic_t fscache_n_store_radix_deletes;
37561 -atomic_t fscache_n_store_pages_over_limit;
37562 -
37563 -atomic_t fscache_n_store_vmscan_not_storing;
37564 -atomic_t fscache_n_store_vmscan_gone;
37565 -atomic_t fscache_n_store_vmscan_busy;
37566 -atomic_t fscache_n_store_vmscan_cancelled;
37567 -
37568 -atomic_t fscache_n_marks;
37569 -atomic_t fscache_n_uncaches;
37570 -
37571 -atomic_t fscache_n_acquires;
37572 -atomic_t fscache_n_acquires_null;
37573 -atomic_t fscache_n_acquires_no_cache;
37574 -atomic_t fscache_n_acquires_ok;
37575 -atomic_t fscache_n_acquires_nobufs;
37576 -atomic_t fscache_n_acquires_oom;
37577 -
37578 -atomic_t fscache_n_updates;
37579 -atomic_t fscache_n_updates_null;
37580 -atomic_t fscache_n_updates_run;
37581 -
37582 -atomic_t fscache_n_relinquishes;
37583 -atomic_t fscache_n_relinquishes_null;
37584 -atomic_t fscache_n_relinquishes_waitcrt;
37585 -atomic_t fscache_n_relinquishes_retire;
37586 -
37587 -atomic_t fscache_n_cookie_index;
37588 -atomic_t fscache_n_cookie_data;
37589 -atomic_t fscache_n_cookie_special;
37590 -
37591 -atomic_t fscache_n_object_alloc;
37592 -atomic_t fscache_n_object_no_alloc;
37593 -atomic_t fscache_n_object_lookups;
37594 -atomic_t fscache_n_object_lookups_negative;
37595 -atomic_t fscache_n_object_lookups_positive;
37596 -atomic_t fscache_n_object_lookups_timed_out;
37597 -atomic_t fscache_n_object_created;
37598 -atomic_t fscache_n_object_avail;
37599 -atomic_t fscache_n_object_dead;
37600 -
37601 -atomic_t fscache_n_checkaux_none;
37602 -atomic_t fscache_n_checkaux_okay;
37603 -atomic_t fscache_n_checkaux_update;
37604 -atomic_t fscache_n_checkaux_obsolete;
37605 +atomic_unchecked_t fscache_n_op_pend;
37606 +atomic_unchecked_t fscache_n_op_run;
37607 +atomic_unchecked_t fscache_n_op_enqueue;
37608 +atomic_unchecked_t fscache_n_op_requeue;
37609 +atomic_unchecked_t fscache_n_op_deferred_release;
37610 +atomic_unchecked_t fscache_n_op_release;
37611 +atomic_unchecked_t fscache_n_op_gc;
37612 +atomic_unchecked_t fscache_n_op_cancelled;
37613 +atomic_unchecked_t fscache_n_op_rejected;
37614 +
37615 +atomic_unchecked_t fscache_n_attr_changed;
37616 +atomic_unchecked_t fscache_n_attr_changed_ok;
37617 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
37618 +atomic_unchecked_t fscache_n_attr_changed_nomem;
37619 +atomic_unchecked_t fscache_n_attr_changed_calls;
37620 +
37621 +atomic_unchecked_t fscache_n_allocs;
37622 +atomic_unchecked_t fscache_n_allocs_ok;
37623 +atomic_unchecked_t fscache_n_allocs_wait;
37624 +atomic_unchecked_t fscache_n_allocs_nobufs;
37625 +atomic_unchecked_t fscache_n_allocs_intr;
37626 +atomic_unchecked_t fscache_n_allocs_object_dead;
37627 +atomic_unchecked_t fscache_n_alloc_ops;
37628 +atomic_unchecked_t fscache_n_alloc_op_waits;
37629 +
37630 +atomic_unchecked_t fscache_n_retrievals;
37631 +atomic_unchecked_t fscache_n_retrievals_ok;
37632 +atomic_unchecked_t fscache_n_retrievals_wait;
37633 +atomic_unchecked_t fscache_n_retrievals_nodata;
37634 +atomic_unchecked_t fscache_n_retrievals_nobufs;
37635 +atomic_unchecked_t fscache_n_retrievals_intr;
37636 +atomic_unchecked_t fscache_n_retrievals_nomem;
37637 +atomic_unchecked_t fscache_n_retrievals_object_dead;
37638 +atomic_unchecked_t fscache_n_retrieval_ops;
37639 +atomic_unchecked_t fscache_n_retrieval_op_waits;
37640 +
37641 +atomic_unchecked_t fscache_n_stores;
37642 +atomic_unchecked_t fscache_n_stores_ok;
37643 +atomic_unchecked_t fscache_n_stores_again;
37644 +atomic_unchecked_t fscache_n_stores_nobufs;
37645 +atomic_unchecked_t fscache_n_stores_oom;
37646 +atomic_unchecked_t fscache_n_store_ops;
37647 +atomic_unchecked_t fscache_n_store_calls;
37648 +atomic_unchecked_t fscache_n_store_pages;
37649 +atomic_unchecked_t fscache_n_store_radix_deletes;
37650 +atomic_unchecked_t fscache_n_store_pages_over_limit;
37651 +
37652 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37653 +atomic_unchecked_t fscache_n_store_vmscan_gone;
37654 +atomic_unchecked_t fscache_n_store_vmscan_busy;
37655 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37656 +
37657 +atomic_unchecked_t fscache_n_marks;
37658 +atomic_unchecked_t fscache_n_uncaches;
37659 +
37660 +atomic_unchecked_t fscache_n_acquires;
37661 +atomic_unchecked_t fscache_n_acquires_null;
37662 +atomic_unchecked_t fscache_n_acquires_no_cache;
37663 +atomic_unchecked_t fscache_n_acquires_ok;
37664 +atomic_unchecked_t fscache_n_acquires_nobufs;
37665 +atomic_unchecked_t fscache_n_acquires_oom;
37666 +
37667 +atomic_unchecked_t fscache_n_updates;
37668 +atomic_unchecked_t fscache_n_updates_null;
37669 +atomic_unchecked_t fscache_n_updates_run;
37670 +
37671 +atomic_unchecked_t fscache_n_relinquishes;
37672 +atomic_unchecked_t fscache_n_relinquishes_null;
37673 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37674 +atomic_unchecked_t fscache_n_relinquishes_retire;
37675 +
37676 +atomic_unchecked_t fscache_n_cookie_index;
37677 +atomic_unchecked_t fscache_n_cookie_data;
37678 +atomic_unchecked_t fscache_n_cookie_special;
37679 +
37680 +atomic_unchecked_t fscache_n_object_alloc;
37681 +atomic_unchecked_t fscache_n_object_no_alloc;
37682 +atomic_unchecked_t fscache_n_object_lookups;
37683 +atomic_unchecked_t fscache_n_object_lookups_negative;
37684 +atomic_unchecked_t fscache_n_object_lookups_positive;
37685 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
37686 +atomic_unchecked_t fscache_n_object_created;
37687 +atomic_unchecked_t fscache_n_object_avail;
37688 +atomic_unchecked_t fscache_n_object_dead;
37689 +
37690 +atomic_unchecked_t fscache_n_checkaux_none;
37691 +atomic_unchecked_t fscache_n_checkaux_okay;
37692 +atomic_unchecked_t fscache_n_checkaux_update;
37693 +atomic_unchecked_t fscache_n_checkaux_obsolete;
37694
37695 atomic_t fscache_n_cop_alloc_object;
37696 atomic_t fscache_n_cop_lookup_object;
37697 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37698 seq_puts(m, "FS-Cache statistics\n");
37699
37700 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37701 - atomic_read(&fscache_n_cookie_index),
37702 - atomic_read(&fscache_n_cookie_data),
37703 - atomic_read(&fscache_n_cookie_special));
37704 + atomic_read_unchecked(&fscache_n_cookie_index),
37705 + atomic_read_unchecked(&fscache_n_cookie_data),
37706 + atomic_read_unchecked(&fscache_n_cookie_special));
37707
37708 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37709 - atomic_read(&fscache_n_object_alloc),
37710 - atomic_read(&fscache_n_object_no_alloc),
37711 - atomic_read(&fscache_n_object_avail),
37712 - atomic_read(&fscache_n_object_dead));
37713 + atomic_read_unchecked(&fscache_n_object_alloc),
37714 + atomic_read_unchecked(&fscache_n_object_no_alloc),
37715 + atomic_read_unchecked(&fscache_n_object_avail),
37716 + atomic_read_unchecked(&fscache_n_object_dead));
37717 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37718 - atomic_read(&fscache_n_checkaux_none),
37719 - atomic_read(&fscache_n_checkaux_okay),
37720 - atomic_read(&fscache_n_checkaux_update),
37721 - atomic_read(&fscache_n_checkaux_obsolete));
37722 + atomic_read_unchecked(&fscache_n_checkaux_none),
37723 + atomic_read_unchecked(&fscache_n_checkaux_okay),
37724 + atomic_read_unchecked(&fscache_n_checkaux_update),
37725 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37726
37727 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37728 - atomic_read(&fscache_n_marks),
37729 - atomic_read(&fscache_n_uncaches));
37730 + atomic_read_unchecked(&fscache_n_marks),
37731 + atomic_read_unchecked(&fscache_n_uncaches));
37732
37733 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37734 " oom=%u\n",
37735 - atomic_read(&fscache_n_acquires),
37736 - atomic_read(&fscache_n_acquires_null),
37737 - atomic_read(&fscache_n_acquires_no_cache),
37738 - atomic_read(&fscache_n_acquires_ok),
37739 - atomic_read(&fscache_n_acquires_nobufs),
37740 - atomic_read(&fscache_n_acquires_oom));
37741 + atomic_read_unchecked(&fscache_n_acquires),
37742 + atomic_read_unchecked(&fscache_n_acquires_null),
37743 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
37744 + atomic_read_unchecked(&fscache_n_acquires_ok),
37745 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
37746 + atomic_read_unchecked(&fscache_n_acquires_oom));
37747
37748 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37749 - atomic_read(&fscache_n_object_lookups),
37750 - atomic_read(&fscache_n_object_lookups_negative),
37751 - atomic_read(&fscache_n_object_lookups_positive),
37752 - atomic_read(&fscache_n_object_created),
37753 - atomic_read(&fscache_n_object_lookups_timed_out));
37754 + atomic_read_unchecked(&fscache_n_object_lookups),
37755 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
37756 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
37757 + atomic_read_unchecked(&fscache_n_object_created),
37758 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37759
37760 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37761 - atomic_read(&fscache_n_updates),
37762 - atomic_read(&fscache_n_updates_null),
37763 - atomic_read(&fscache_n_updates_run));
37764 + atomic_read_unchecked(&fscache_n_updates),
37765 + atomic_read_unchecked(&fscache_n_updates_null),
37766 + atomic_read_unchecked(&fscache_n_updates_run));
37767
37768 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37769 - atomic_read(&fscache_n_relinquishes),
37770 - atomic_read(&fscache_n_relinquishes_null),
37771 - atomic_read(&fscache_n_relinquishes_waitcrt),
37772 - atomic_read(&fscache_n_relinquishes_retire));
37773 + atomic_read_unchecked(&fscache_n_relinquishes),
37774 + atomic_read_unchecked(&fscache_n_relinquishes_null),
37775 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37776 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
37777
37778 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37779 - atomic_read(&fscache_n_attr_changed),
37780 - atomic_read(&fscache_n_attr_changed_ok),
37781 - atomic_read(&fscache_n_attr_changed_nobufs),
37782 - atomic_read(&fscache_n_attr_changed_nomem),
37783 - atomic_read(&fscache_n_attr_changed_calls));
37784 + atomic_read_unchecked(&fscache_n_attr_changed),
37785 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
37786 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37787 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37788 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
37789
37790 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37791 - atomic_read(&fscache_n_allocs),
37792 - atomic_read(&fscache_n_allocs_ok),
37793 - atomic_read(&fscache_n_allocs_wait),
37794 - atomic_read(&fscache_n_allocs_nobufs),
37795 - atomic_read(&fscache_n_allocs_intr));
37796 + atomic_read_unchecked(&fscache_n_allocs),
37797 + atomic_read_unchecked(&fscache_n_allocs_ok),
37798 + atomic_read_unchecked(&fscache_n_allocs_wait),
37799 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
37800 + atomic_read_unchecked(&fscache_n_allocs_intr));
37801 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37802 - atomic_read(&fscache_n_alloc_ops),
37803 - atomic_read(&fscache_n_alloc_op_waits),
37804 - atomic_read(&fscache_n_allocs_object_dead));
37805 + atomic_read_unchecked(&fscache_n_alloc_ops),
37806 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
37807 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
37808
37809 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37810 " int=%u oom=%u\n",
37811 - atomic_read(&fscache_n_retrievals),
37812 - atomic_read(&fscache_n_retrievals_ok),
37813 - atomic_read(&fscache_n_retrievals_wait),
37814 - atomic_read(&fscache_n_retrievals_nodata),
37815 - atomic_read(&fscache_n_retrievals_nobufs),
37816 - atomic_read(&fscache_n_retrievals_intr),
37817 - atomic_read(&fscache_n_retrievals_nomem));
37818 + atomic_read_unchecked(&fscache_n_retrievals),
37819 + atomic_read_unchecked(&fscache_n_retrievals_ok),
37820 + atomic_read_unchecked(&fscache_n_retrievals_wait),
37821 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
37822 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37823 + atomic_read_unchecked(&fscache_n_retrievals_intr),
37824 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
37825 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37826 - atomic_read(&fscache_n_retrieval_ops),
37827 - atomic_read(&fscache_n_retrieval_op_waits),
37828 - atomic_read(&fscache_n_retrievals_object_dead));
37829 + atomic_read_unchecked(&fscache_n_retrieval_ops),
37830 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37831 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37832
37833 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37834 - atomic_read(&fscache_n_stores),
37835 - atomic_read(&fscache_n_stores_ok),
37836 - atomic_read(&fscache_n_stores_again),
37837 - atomic_read(&fscache_n_stores_nobufs),
37838 - atomic_read(&fscache_n_stores_oom));
37839 + atomic_read_unchecked(&fscache_n_stores),
37840 + atomic_read_unchecked(&fscache_n_stores_ok),
37841 + atomic_read_unchecked(&fscache_n_stores_again),
37842 + atomic_read_unchecked(&fscache_n_stores_nobufs),
37843 + atomic_read_unchecked(&fscache_n_stores_oom));
37844 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37845 - atomic_read(&fscache_n_store_ops),
37846 - atomic_read(&fscache_n_store_calls),
37847 - atomic_read(&fscache_n_store_pages),
37848 - atomic_read(&fscache_n_store_radix_deletes),
37849 - atomic_read(&fscache_n_store_pages_over_limit));
37850 + atomic_read_unchecked(&fscache_n_store_ops),
37851 + atomic_read_unchecked(&fscache_n_store_calls),
37852 + atomic_read_unchecked(&fscache_n_store_pages),
37853 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
37854 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37855
37856 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37857 - atomic_read(&fscache_n_store_vmscan_not_storing),
37858 - atomic_read(&fscache_n_store_vmscan_gone),
37859 - atomic_read(&fscache_n_store_vmscan_busy),
37860 - atomic_read(&fscache_n_store_vmscan_cancelled));
37861 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37862 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37863 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37864 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37865
37866 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37867 - atomic_read(&fscache_n_op_pend),
37868 - atomic_read(&fscache_n_op_run),
37869 - atomic_read(&fscache_n_op_enqueue),
37870 - atomic_read(&fscache_n_op_cancelled),
37871 - atomic_read(&fscache_n_op_rejected));
37872 + atomic_read_unchecked(&fscache_n_op_pend),
37873 + atomic_read_unchecked(&fscache_n_op_run),
37874 + atomic_read_unchecked(&fscache_n_op_enqueue),
37875 + atomic_read_unchecked(&fscache_n_op_cancelled),
37876 + atomic_read_unchecked(&fscache_n_op_rejected));
37877 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37878 - atomic_read(&fscache_n_op_deferred_release),
37879 - atomic_read(&fscache_n_op_release),
37880 - atomic_read(&fscache_n_op_gc));
37881 + atomic_read_unchecked(&fscache_n_op_deferred_release),
37882 + atomic_read_unchecked(&fscache_n_op_release),
37883 + atomic_read_unchecked(&fscache_n_op_gc));
37884
37885 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37886 atomic_read(&fscache_n_cop_alloc_object),
37887 diff -urNp linux-3.0.3/fs/fs_struct.c linux-3.0.3/fs/fs_struct.c
37888 --- linux-3.0.3/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
37889 +++ linux-3.0.3/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
37890 @@ -4,6 +4,7 @@
37891 #include <linux/path.h>
37892 #include <linux/slab.h>
37893 #include <linux/fs_struct.h>
37894 +#include <linux/grsecurity.h>
37895 #include "internal.h"
37896
37897 static inline void path_get_longterm(struct path *path)
37898 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37899 old_root = fs->root;
37900 fs->root = *path;
37901 path_get_longterm(path);
37902 + gr_set_chroot_entries(current, path);
37903 write_seqcount_end(&fs->seq);
37904 spin_unlock(&fs->lock);
37905 if (old_root.dentry)
37906 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37907 && fs->root.mnt == old_root->mnt) {
37908 path_get_longterm(new_root);
37909 fs->root = *new_root;
37910 + gr_set_chroot_entries(p, new_root);
37911 count++;
37912 }
37913 if (fs->pwd.dentry == old_root->dentry
37914 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
37915 spin_lock(&fs->lock);
37916 write_seqcount_begin(&fs->seq);
37917 tsk->fs = NULL;
37918 - kill = !--fs->users;
37919 + gr_clear_chroot_entries(tsk);
37920 + kill = !atomic_dec_return(&fs->users);
37921 write_seqcount_end(&fs->seq);
37922 spin_unlock(&fs->lock);
37923 task_unlock(tsk);
37924 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
37925 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
37926 /* We don't need to lock fs - think why ;-) */
37927 if (fs) {
37928 - fs->users = 1;
37929 + atomic_set(&fs->users, 1);
37930 fs->in_exec = 0;
37931 spin_lock_init(&fs->lock);
37932 seqcount_init(&fs->seq);
37933 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
37934 spin_lock(&old->lock);
37935 fs->root = old->root;
37936 path_get_longterm(&fs->root);
37937 + /* instead of calling gr_set_chroot_entries here,
37938 + we call it from every caller of this function
37939 + */
37940 fs->pwd = old->pwd;
37941 path_get_longterm(&fs->pwd);
37942 spin_unlock(&old->lock);
37943 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
37944
37945 task_lock(current);
37946 spin_lock(&fs->lock);
37947 - kill = !--fs->users;
37948 + kill = !atomic_dec_return(&fs->users);
37949 current->fs = new_fs;
37950 + gr_set_chroot_entries(current, &new_fs->root);
37951 spin_unlock(&fs->lock);
37952 task_unlock(current);
37953
37954 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
37955
37956 /* to be mentioned only in INIT_TASK */
37957 struct fs_struct init_fs = {
37958 - .users = 1,
37959 + .users = ATOMIC_INIT(1),
37960 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
37961 .seq = SEQCNT_ZERO,
37962 .umask = 0022,
37963 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
37964 task_lock(current);
37965
37966 spin_lock(&init_fs.lock);
37967 - init_fs.users++;
37968 + atomic_inc(&init_fs.users);
37969 spin_unlock(&init_fs.lock);
37970
37971 spin_lock(&fs->lock);
37972 current->fs = &init_fs;
37973 - kill = !--fs->users;
37974 + gr_set_chroot_entries(current, &current->fs->root);
37975 + kill = !atomic_dec_return(&fs->users);
37976 spin_unlock(&fs->lock);
37977
37978 task_unlock(current);
37979 diff -urNp linux-3.0.3/fs/fuse/cuse.c linux-3.0.3/fs/fuse/cuse.c
37980 --- linux-3.0.3/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
37981 +++ linux-3.0.3/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
37982 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
37983 INIT_LIST_HEAD(&cuse_conntbl[i]);
37984
37985 /* inherit and extend fuse_dev_operations */
37986 - cuse_channel_fops = fuse_dev_operations;
37987 - cuse_channel_fops.owner = THIS_MODULE;
37988 - cuse_channel_fops.open = cuse_channel_open;
37989 - cuse_channel_fops.release = cuse_channel_release;
37990 + pax_open_kernel();
37991 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
37992 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
37993 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
37994 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
37995 + pax_close_kernel();
37996
37997 cuse_class = class_create(THIS_MODULE, "cuse");
37998 if (IS_ERR(cuse_class))
37999 diff -urNp linux-3.0.3/fs/fuse/dev.c linux-3.0.3/fs/fuse/dev.c
38000 --- linux-3.0.3/fs/fuse/dev.c 2011-07-21 22:17:23.000000000 -0400
38001 +++ linux-3.0.3/fs/fuse/dev.c 2011-08-23 21:47:56.000000000 -0400
38002 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
38003 ret = 0;
38004 pipe_lock(pipe);
38005
38006 - if (!pipe->readers) {
38007 + if (!atomic_read(&pipe->readers)) {
38008 send_sig(SIGPIPE, current, 0);
38009 if (!ret)
38010 ret = -EPIPE;
38011 diff -urNp linux-3.0.3/fs/fuse/dir.c linux-3.0.3/fs/fuse/dir.c
38012 --- linux-3.0.3/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
38013 +++ linux-3.0.3/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
38014 @@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
38015 return link;
38016 }
38017
38018 -static void free_link(char *link)
38019 +static void free_link(const char *link)
38020 {
38021 if (!IS_ERR(link))
38022 free_page((unsigned long) link);
38023 diff -urNp linux-3.0.3/fs/gfs2/inode.c linux-3.0.3/fs/gfs2/inode.c
38024 --- linux-3.0.3/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
38025 +++ linux-3.0.3/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
38026 @@ -1525,7 +1525,7 @@ out:
38027
38028 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38029 {
38030 - char *s = nd_get_link(nd);
38031 + const char *s = nd_get_link(nd);
38032 if (!IS_ERR(s))
38033 kfree(s);
38034 }
38035 diff -urNp linux-3.0.3/fs/hfsplus/catalog.c linux-3.0.3/fs/hfsplus/catalog.c
38036 --- linux-3.0.3/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
38037 +++ linux-3.0.3/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
38038 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
38039 int err;
38040 u16 type;
38041
38042 + pax_track_stack();
38043 +
38044 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
38045 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
38046 if (err)
38047 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
38048 int entry_size;
38049 int err;
38050
38051 + pax_track_stack();
38052 +
38053 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38054 str->name, cnid, inode->i_nlink);
38055 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38056 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38057 int entry_size, type;
38058 int err = 0;
38059
38060 + pax_track_stack();
38061 +
38062 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38063 cnid, src_dir->i_ino, src_name->name,
38064 dst_dir->i_ino, dst_name->name);
38065 diff -urNp linux-3.0.3/fs/hfsplus/dir.c linux-3.0.3/fs/hfsplus/dir.c
38066 --- linux-3.0.3/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
38067 +++ linux-3.0.3/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
38068 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38069 struct hfsplus_readdir_data *rd;
38070 u16 type;
38071
38072 + pax_track_stack();
38073 +
38074 if (filp->f_pos >= inode->i_size)
38075 return 0;
38076
38077 diff -urNp linux-3.0.3/fs/hfsplus/inode.c linux-3.0.3/fs/hfsplus/inode.c
38078 --- linux-3.0.3/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
38079 +++ linux-3.0.3/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
38080 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38081 int res = 0;
38082 u16 type;
38083
38084 + pax_track_stack();
38085 +
38086 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38087
38088 HFSPLUS_I(inode)->linkid = 0;
38089 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38090 struct hfs_find_data fd;
38091 hfsplus_cat_entry entry;
38092
38093 + pax_track_stack();
38094 +
38095 if (HFSPLUS_IS_RSRC(inode))
38096 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38097
38098 diff -urNp linux-3.0.3/fs/hfsplus/ioctl.c linux-3.0.3/fs/hfsplus/ioctl.c
38099 --- linux-3.0.3/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
38100 +++ linux-3.0.3/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
38101 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38102 struct hfsplus_cat_file *file;
38103 int res;
38104
38105 + pax_track_stack();
38106 +
38107 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38108 return -EOPNOTSUPP;
38109
38110 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38111 struct hfsplus_cat_file *file;
38112 ssize_t res = 0;
38113
38114 + pax_track_stack();
38115 +
38116 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38117 return -EOPNOTSUPP;
38118
38119 diff -urNp linux-3.0.3/fs/hfsplus/super.c linux-3.0.3/fs/hfsplus/super.c
38120 --- linux-3.0.3/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
38121 +++ linux-3.0.3/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
38122 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38123 struct nls_table *nls = NULL;
38124 int err;
38125
38126 + pax_track_stack();
38127 +
38128 err = -EINVAL;
38129 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38130 if (!sbi)
38131 diff -urNp linux-3.0.3/fs/hugetlbfs/inode.c linux-3.0.3/fs/hugetlbfs/inode.c
38132 --- linux-3.0.3/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38133 +++ linux-3.0.3/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38134 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38135 .kill_sb = kill_litter_super,
38136 };
38137
38138 -static struct vfsmount *hugetlbfs_vfsmount;
38139 +struct vfsmount *hugetlbfs_vfsmount;
38140
38141 static int can_do_hugetlb_shm(void)
38142 {
38143 diff -urNp linux-3.0.3/fs/inode.c linux-3.0.3/fs/inode.c
38144 --- linux-3.0.3/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
38145 +++ linux-3.0.3/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
38146 @@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
38147
38148 #ifdef CONFIG_SMP
38149 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38150 - static atomic_t shared_last_ino;
38151 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38152 + static atomic_unchecked_t shared_last_ino;
38153 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38154
38155 res = next - LAST_INO_BATCH;
38156 }
38157 diff -urNp linux-3.0.3/fs/jbd/checkpoint.c linux-3.0.3/fs/jbd/checkpoint.c
38158 --- linux-3.0.3/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
38159 +++ linux-3.0.3/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
38160 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38161 tid_t this_tid;
38162 int result;
38163
38164 + pax_track_stack();
38165 +
38166 jbd_debug(1, "Start checkpoint\n");
38167
38168 /*
38169 diff -urNp linux-3.0.3/fs/jffs2/compr_rtime.c linux-3.0.3/fs/jffs2/compr_rtime.c
38170 --- linux-3.0.3/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
38171 +++ linux-3.0.3/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
38172 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38173 int outpos = 0;
38174 int pos=0;
38175
38176 + pax_track_stack();
38177 +
38178 memset(positions,0,sizeof(positions));
38179
38180 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38181 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38182 int outpos = 0;
38183 int pos=0;
38184
38185 + pax_track_stack();
38186 +
38187 memset(positions,0,sizeof(positions));
38188
38189 while (outpos<destlen) {
38190 diff -urNp linux-3.0.3/fs/jffs2/compr_rubin.c linux-3.0.3/fs/jffs2/compr_rubin.c
38191 --- linux-3.0.3/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
38192 +++ linux-3.0.3/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
38193 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38194 int ret;
38195 uint32_t mysrclen, mydstlen;
38196
38197 + pax_track_stack();
38198 +
38199 mysrclen = *sourcelen;
38200 mydstlen = *dstlen - 8;
38201
38202 diff -urNp linux-3.0.3/fs/jffs2/erase.c linux-3.0.3/fs/jffs2/erase.c
38203 --- linux-3.0.3/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
38204 +++ linux-3.0.3/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
38205 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38206 struct jffs2_unknown_node marker = {
38207 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38208 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38209 - .totlen = cpu_to_je32(c->cleanmarker_size)
38210 + .totlen = cpu_to_je32(c->cleanmarker_size),
38211 + .hdr_crc = cpu_to_je32(0)
38212 };
38213
38214 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38215 diff -urNp linux-3.0.3/fs/jffs2/wbuf.c linux-3.0.3/fs/jffs2/wbuf.c
38216 --- linux-3.0.3/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
38217 +++ linux-3.0.3/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
38218 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38219 {
38220 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38221 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38222 - .totlen = constant_cpu_to_je32(8)
38223 + .totlen = constant_cpu_to_je32(8),
38224 + .hdr_crc = constant_cpu_to_je32(0)
38225 };
38226
38227 /*
38228 diff -urNp linux-3.0.3/fs/jffs2/xattr.c linux-3.0.3/fs/jffs2/xattr.c
38229 --- linux-3.0.3/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
38230 +++ linux-3.0.3/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
38231 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38232
38233 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38234
38235 + pax_track_stack();
38236 +
38237 /* Phase.1 : Merge same xref */
38238 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38239 xref_tmphash[i] = NULL;
38240 diff -urNp linux-3.0.3/fs/jfs/super.c linux-3.0.3/fs/jfs/super.c
38241 --- linux-3.0.3/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
38242 +++ linux-3.0.3/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
38243 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38244
38245 jfs_inode_cachep =
38246 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38247 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38248 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38249 init_once);
38250 if (jfs_inode_cachep == NULL)
38251 return -ENOMEM;
38252 diff -urNp linux-3.0.3/fs/Kconfig.binfmt linux-3.0.3/fs/Kconfig.binfmt
38253 --- linux-3.0.3/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
38254 +++ linux-3.0.3/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
38255 @@ -86,7 +86,7 @@ config HAVE_AOUT
38256
38257 config BINFMT_AOUT
38258 tristate "Kernel support for a.out and ECOFF binaries"
38259 - depends on HAVE_AOUT
38260 + depends on HAVE_AOUT && BROKEN
38261 ---help---
38262 A.out (Assembler.OUTput) is a set of formats for libraries and
38263 executables used in the earliest versions of UNIX. Linux used
38264 diff -urNp linux-3.0.3/fs/libfs.c linux-3.0.3/fs/libfs.c
38265 --- linux-3.0.3/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
38266 +++ linux-3.0.3/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
38267 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38268
38269 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38270 struct dentry *next;
38271 + char d_name[sizeof(next->d_iname)];
38272 + const unsigned char *name;
38273 +
38274 next = list_entry(p, struct dentry, d_u.d_child);
38275 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38276 if (!simple_positive(next)) {
38277 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38278
38279 spin_unlock(&next->d_lock);
38280 spin_unlock(&dentry->d_lock);
38281 - if (filldir(dirent, next->d_name.name,
38282 + name = next->d_name.name;
38283 + if (name == next->d_iname) {
38284 + memcpy(d_name, name, next->d_name.len);
38285 + name = d_name;
38286 + }
38287 + if (filldir(dirent, name,
38288 next->d_name.len, filp->f_pos,
38289 next->d_inode->i_ino,
38290 dt_type(next->d_inode)) < 0)
38291 diff -urNp linux-3.0.3/fs/lockd/clntproc.c linux-3.0.3/fs/lockd/clntproc.c
38292 --- linux-3.0.3/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
38293 +++ linux-3.0.3/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
38294 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38295 /*
38296 * Cookie counter for NLM requests
38297 */
38298 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38299 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38300
38301 void nlmclnt_next_cookie(struct nlm_cookie *c)
38302 {
38303 - u32 cookie = atomic_inc_return(&nlm_cookie);
38304 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38305
38306 memcpy(c->data, &cookie, 4);
38307 c->len=4;
38308 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38309 struct nlm_rqst reqst, *req;
38310 int status;
38311
38312 + pax_track_stack();
38313 +
38314 req = &reqst;
38315 memset(req, 0, sizeof(*req));
38316 locks_init_lock(&req->a_args.lock.fl);
38317 diff -urNp linux-3.0.3/fs/locks.c linux-3.0.3/fs/locks.c
38318 --- linux-3.0.3/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
38319 +++ linux-3.0.3/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
38320 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38321 return;
38322
38323 if (filp->f_op && filp->f_op->flock) {
38324 - struct file_lock fl = {
38325 + struct file_lock flock = {
38326 .fl_pid = current->tgid,
38327 .fl_file = filp,
38328 .fl_flags = FL_FLOCK,
38329 .fl_type = F_UNLCK,
38330 .fl_end = OFFSET_MAX,
38331 };
38332 - filp->f_op->flock(filp, F_SETLKW, &fl);
38333 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
38334 - fl.fl_ops->fl_release_private(&fl);
38335 + filp->f_op->flock(filp, F_SETLKW, &flock);
38336 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
38337 + flock.fl_ops->fl_release_private(&flock);
38338 }
38339
38340 lock_flocks();
38341 diff -urNp linux-3.0.3/fs/logfs/super.c linux-3.0.3/fs/logfs/super.c
38342 --- linux-3.0.3/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
38343 +++ linux-3.0.3/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
38344 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38345 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38346 int err, valid0, valid1;
38347
38348 + pax_track_stack();
38349 +
38350 /* read first superblock */
38351 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38352 if (err)
38353 diff -urNp linux-3.0.3/fs/namei.c linux-3.0.3/fs/namei.c
38354 --- linux-3.0.3/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
38355 +++ linux-3.0.3/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
38356 @@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
38357 return ret;
38358
38359 /*
38360 - * Read/write DACs are always overridable.
38361 - * Executable DACs are overridable for all directories and
38362 - * for non-directories that have least one exec bit set.
38363 + * Searching includes executable on directories, else just read.
38364 */
38365 - if (!(mask & MAY_EXEC) || execute_ok(inode))
38366 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38367 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38368 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38369 +#ifdef CONFIG_GRKERNSEC
38370 + if (flags & IPERM_FLAG_RCU)
38371 + return -ECHILD;
38372 +#endif
38373 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38374 return 0;
38375 + }
38376
38377 /*
38378 - * Searching includes executable on directories, else just read.
38379 + * Read/write DACs are always overridable.
38380 + * Executable DACs are overridable for all directories and
38381 + * for non-directories that have least one exec bit set.
38382 */
38383 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38384 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38385 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38386 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38387 +#ifdef CONFIG_GRKERNSEC
38388 + if (flags & IPERM_FLAG_RCU)
38389 + return -ECHILD;
38390 +#endif
38391 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38392 return 0;
38393 + }
38394
38395 return -EACCES;
38396 }
38397 @@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
38398 br_read_unlock(vfsmount_lock);
38399 }
38400
38401 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38402 + return -ENOENT;
38403 +
38404 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38405 return 0;
38406
38407 @@ -593,9 +606,16 @@ static inline int exec_permission(struct
38408 if (ret == -ECHILD)
38409 return ret;
38410
38411 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38412 - ns_capable(ns, CAP_DAC_READ_SEARCH))
38413 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38414 goto ok;
38415 + else {
38416 +#ifdef CONFIG_GRKERNSEC
38417 + if (flags & IPERM_FLAG_RCU)
38418 + return -ECHILD;
38419 +#endif
38420 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38421 + goto ok;
38422 + }
38423
38424 return ret;
38425 ok:
38426 @@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
38427 return error;
38428 }
38429
38430 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
38431 + dentry->d_inode, dentry, nd->path.mnt)) {
38432 + error = -EACCES;
38433 + *p = ERR_PTR(error); /* no ->put_link(), please */
38434 + path_put(&nd->path);
38435 + return error;
38436 + }
38437 +
38438 nd->last_type = LAST_BIND;
38439 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38440 error = PTR_ERR(*p);
38441 if (!IS_ERR(*p)) {
38442 - char *s = nd_get_link(nd);
38443 + const char *s = nd_get_link(nd);
38444 error = 0;
38445 if (s)
38446 error = __vfs_follow_link(nd, s);
38447 @@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
38448 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38449
38450 if (likely(!retval)) {
38451 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38452 + return -ENOENT;
38453 +
38454 if (unlikely(!audit_dummy_context())) {
38455 if (nd->path.dentry && nd->inode)
38456 audit_inode(name, nd->path.dentry);
38457 @@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
38458 return error;
38459 }
38460
38461 +/*
38462 + * Note that while the flag value (low two bits) for sys_open means:
38463 + * 00 - read-only
38464 + * 01 - write-only
38465 + * 10 - read-write
38466 + * 11 - special
38467 + * it is changed into
38468 + * 00 - no permissions needed
38469 + * 01 - read-permission
38470 + * 10 - write-permission
38471 + * 11 - read-write
38472 + * for the internal routines (ie open_namei()/follow_link() etc)
38473 + * This is more logical, and also allows the 00 "no perm needed"
38474 + * to be used for symlinks (where the permissions are checked
38475 + * later).
38476 + *
38477 +*/
38478 +static inline int open_to_namei_flags(int flag)
38479 +{
38480 + if ((flag+1) & O_ACCMODE)
38481 + flag++;
38482 + return flag;
38483 +}
38484 +
38485 static int may_open(struct path *path, int acc_mode, int flag)
38486 {
38487 struct dentry *dentry = path->dentry;
38488 @@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
38489 /*
38490 * Ensure there are no outstanding leases on the file.
38491 */
38492 - return break_lease(inode, flag);
38493 + error = break_lease(inode, flag);
38494 +
38495 + if (error)
38496 + return error;
38497 +
38498 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38499 + error = -EPERM;
38500 + goto exit;
38501 + }
38502 +
38503 + if (gr_handle_rawio(inode)) {
38504 + error = -EPERM;
38505 + goto exit;
38506 + }
38507 +
38508 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38509 + error = -EACCES;
38510 + goto exit;
38511 + }
38512 +exit:
38513 + return error;
38514 }
38515
38516 static int handle_truncate(struct file *filp)
38517 @@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
38518 }
38519
38520 /*
38521 - * Note that while the flag value (low two bits) for sys_open means:
38522 - * 00 - read-only
38523 - * 01 - write-only
38524 - * 10 - read-write
38525 - * 11 - special
38526 - * it is changed into
38527 - * 00 - no permissions needed
38528 - * 01 - read-permission
38529 - * 10 - write-permission
38530 - * 11 - read-write
38531 - * for the internal routines (ie open_namei()/follow_link() etc)
38532 - * This is more logical, and also allows the 00 "no perm needed"
38533 - * to be used for symlinks (where the permissions are checked
38534 - * later).
38535 - *
38536 -*/
38537 -static inline int open_to_namei_flags(int flag)
38538 -{
38539 - if ((flag+1) & O_ACCMODE)
38540 - flag++;
38541 - return flag;
38542 -}
38543 -
38544 -/*
38545 * Handle the last step of open()
38546 */
38547 static struct file *do_last(struct nameidata *nd, struct path *path,
38548 @@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
38549 struct dentry *dir = nd->path.dentry;
38550 struct dentry *dentry;
38551 int open_flag = op->open_flag;
38552 + int flag = open_to_namei_flags(open_flag);
38553 int will_truncate = open_flag & O_TRUNC;
38554 int want_write = 0;
38555 int acc_mode = op->acc_mode;
38556 @@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
38557 /* Negative dentry, just create the file */
38558 if (!dentry->d_inode) {
38559 int mode = op->mode;
38560 +
38561 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38562 + error = -EACCES;
38563 + goto exit_mutex_unlock;
38564 + }
38565 +
38566 if (!IS_POSIXACL(dir->d_inode))
38567 mode &= ~current_umask();
38568 /*
38569 @@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
38570 error = vfs_create(dir->d_inode, dentry, mode, nd);
38571 if (error)
38572 goto exit_mutex_unlock;
38573 + else
38574 + gr_handle_create(path->dentry, path->mnt);
38575 mutex_unlock(&dir->d_inode->i_mutex);
38576 dput(nd->path.dentry);
38577 nd->path.dentry = dentry;
38578 @@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
38579 /*
38580 * It already exists.
38581 */
38582 +
38583 + /* only check if O_CREAT is specified, all other checks need to go
38584 + into may_open */
38585 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38586 + error = -EACCES;
38587 + goto exit_mutex_unlock;
38588 + }
38589 +
38590 mutex_unlock(&dir->d_inode->i_mutex);
38591 audit_inode(pathname, path->dentry);
38592
38593 @@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38594 error = may_mknod(mode);
38595 if (error)
38596 goto out_dput;
38597 +
38598 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38599 + error = -EPERM;
38600 + goto out_dput;
38601 + }
38602 +
38603 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38604 + error = -EACCES;
38605 + goto out_dput;
38606 + }
38607 +
38608 error = mnt_want_write(nd.path.mnt);
38609 if (error)
38610 goto out_dput;
38611 @@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38612 }
38613 out_drop_write:
38614 mnt_drop_write(nd.path.mnt);
38615 +
38616 + if (!error)
38617 + gr_handle_create(dentry, nd.path.mnt);
38618 out_dput:
38619 dput(dentry);
38620 out_unlock:
38621 @@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38622 if (IS_ERR(dentry))
38623 goto out_unlock;
38624
38625 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38626 + error = -EACCES;
38627 + goto out_dput;
38628 + }
38629 +
38630 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38631 mode &= ~current_umask();
38632 error = mnt_want_write(nd.path.mnt);
38633 @@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38634 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38635 out_drop_write:
38636 mnt_drop_write(nd.path.mnt);
38637 +
38638 + if (!error)
38639 + gr_handle_create(dentry, nd.path.mnt);
38640 +
38641 out_dput:
38642 dput(dentry);
38643 out_unlock:
38644 @@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
38645 char * name;
38646 struct dentry *dentry;
38647 struct nameidata nd;
38648 + ino_t saved_ino = 0;
38649 + dev_t saved_dev = 0;
38650
38651 error = user_path_parent(dfd, pathname, &nd, &name);
38652 if (error)
38653 @@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
38654 error = -ENOENT;
38655 goto exit3;
38656 }
38657 +
38658 + if (dentry->d_inode->i_nlink <= 1) {
38659 + saved_ino = dentry->d_inode->i_ino;
38660 + saved_dev = gr_get_dev_from_dentry(dentry);
38661 + }
38662 +
38663 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38664 + error = -EACCES;
38665 + goto exit3;
38666 + }
38667 +
38668 error = mnt_want_write(nd.path.mnt);
38669 if (error)
38670 goto exit3;
38671 @@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
38672 if (error)
38673 goto exit4;
38674 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38675 + if (!error && (saved_dev || saved_ino))
38676 + gr_handle_delete(saved_ino, saved_dev);
38677 exit4:
38678 mnt_drop_write(nd.path.mnt);
38679 exit3:
38680 @@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
38681 struct dentry *dentry;
38682 struct nameidata nd;
38683 struct inode *inode = NULL;
38684 + ino_t saved_ino = 0;
38685 + dev_t saved_dev = 0;
38686
38687 error = user_path_parent(dfd, pathname, &nd, &name);
38688 if (error)
38689 @@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
38690 if (!inode)
38691 goto slashes;
38692 ihold(inode);
38693 +
38694 + if (inode->i_nlink <= 1) {
38695 + saved_ino = inode->i_ino;
38696 + saved_dev = gr_get_dev_from_dentry(dentry);
38697 + }
38698 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38699 + error = -EACCES;
38700 + goto exit2;
38701 + }
38702 +
38703 error = mnt_want_write(nd.path.mnt);
38704 if (error)
38705 goto exit2;
38706 @@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
38707 if (error)
38708 goto exit3;
38709 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38710 + if (!error && (saved_ino || saved_dev))
38711 + gr_handle_delete(saved_ino, saved_dev);
38712 exit3:
38713 mnt_drop_write(nd.path.mnt);
38714 exit2:
38715 @@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38716 if (IS_ERR(dentry))
38717 goto out_unlock;
38718
38719 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38720 + error = -EACCES;
38721 + goto out_dput;
38722 + }
38723 +
38724 error = mnt_want_write(nd.path.mnt);
38725 if (error)
38726 goto out_dput;
38727 @@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38728 if (error)
38729 goto out_drop_write;
38730 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38731 + if (!error)
38732 + gr_handle_create(dentry, nd.path.mnt);
38733 out_drop_write:
38734 mnt_drop_write(nd.path.mnt);
38735 out_dput:
38736 @@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38737 error = PTR_ERR(new_dentry);
38738 if (IS_ERR(new_dentry))
38739 goto out_unlock;
38740 +
38741 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38742 + old_path.dentry->d_inode,
38743 + old_path.dentry->d_inode->i_mode, to)) {
38744 + error = -EACCES;
38745 + goto out_dput;
38746 + }
38747 +
38748 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38749 + old_path.dentry, old_path.mnt, to)) {
38750 + error = -EACCES;
38751 + goto out_dput;
38752 + }
38753 +
38754 error = mnt_want_write(nd.path.mnt);
38755 if (error)
38756 goto out_dput;
38757 @@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38758 if (error)
38759 goto out_drop_write;
38760 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38761 + if (!error)
38762 + gr_handle_create(new_dentry, nd.path.mnt);
38763 out_drop_write:
38764 mnt_drop_write(nd.path.mnt);
38765 out_dput:
38766 @@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38767 char *to;
38768 int error;
38769
38770 + pax_track_stack();
38771 +
38772 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38773 if (error)
38774 goto exit;
38775 @@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38776 if (new_dentry == trap)
38777 goto exit5;
38778
38779 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38780 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
38781 + to);
38782 + if (error)
38783 + goto exit5;
38784 +
38785 error = mnt_want_write(oldnd.path.mnt);
38786 if (error)
38787 goto exit5;
38788 @@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38789 goto exit6;
38790 error = vfs_rename(old_dir->d_inode, old_dentry,
38791 new_dir->d_inode, new_dentry);
38792 + if (!error)
38793 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38794 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38795 exit6:
38796 mnt_drop_write(oldnd.path.mnt);
38797 exit5:
38798 @@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
38799
38800 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38801 {
38802 + char tmpbuf[64];
38803 + const char *newlink;
38804 int len;
38805
38806 len = PTR_ERR(link);
38807 @@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
38808 len = strlen(link);
38809 if (len > (unsigned) buflen)
38810 len = buflen;
38811 - if (copy_to_user(buffer, link, len))
38812 +
38813 + if (len < sizeof(tmpbuf)) {
38814 + memcpy(tmpbuf, link, len);
38815 + newlink = tmpbuf;
38816 + } else
38817 + newlink = link;
38818 +
38819 + if (copy_to_user(buffer, newlink, len))
38820 len = -EFAULT;
38821 out:
38822 return len;
38823 diff -urNp linux-3.0.3/fs/namespace.c linux-3.0.3/fs/namespace.c
38824 --- linux-3.0.3/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
38825 +++ linux-3.0.3/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
38826 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38827 if (!(sb->s_flags & MS_RDONLY))
38828 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38829 up_write(&sb->s_umount);
38830 +
38831 + gr_log_remount(mnt->mnt_devname, retval);
38832 +
38833 return retval;
38834 }
38835
38836 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38837 br_write_unlock(vfsmount_lock);
38838 up_write(&namespace_sem);
38839 release_mounts(&umount_list);
38840 +
38841 + gr_log_unmount(mnt->mnt_devname, retval);
38842 +
38843 return retval;
38844 }
38845
38846 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38847 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38848 MS_STRICTATIME);
38849
38850 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38851 + retval = -EPERM;
38852 + goto dput_out;
38853 + }
38854 +
38855 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38856 + retval = -EPERM;
38857 + goto dput_out;
38858 + }
38859 +
38860 if (flags & MS_REMOUNT)
38861 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38862 data_page);
38863 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38864 dev_name, data_page);
38865 dput_out:
38866 path_put(&path);
38867 +
38868 + gr_log_mount(dev_name, dir_name, retval);
38869 +
38870 return retval;
38871 }
38872
38873 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38874 if (error)
38875 goto out2;
38876
38877 + if (gr_handle_chroot_pivot()) {
38878 + error = -EPERM;
38879 + goto out2;
38880 + }
38881 +
38882 get_fs_root(current->fs, &root);
38883 error = lock_mount(&old);
38884 if (error)
38885 diff -urNp linux-3.0.3/fs/ncpfs/dir.c linux-3.0.3/fs/ncpfs/dir.c
38886 --- linux-3.0.3/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38887 +++ linux-3.0.3/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
38888 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38889 int res, val = 0, len;
38890 __u8 __name[NCP_MAXPATHLEN + 1];
38891
38892 + pax_track_stack();
38893 +
38894 if (dentry == dentry->d_sb->s_root)
38895 return 1;
38896
38897 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
38898 int error, res, len;
38899 __u8 __name[NCP_MAXPATHLEN + 1];
38900
38901 + pax_track_stack();
38902 +
38903 error = -EIO;
38904 if (!ncp_conn_valid(server))
38905 goto finished;
38906 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
38907 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
38908 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
38909
38910 + pax_track_stack();
38911 +
38912 ncp_age_dentry(server, dentry);
38913 len = sizeof(__name);
38914 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
38915 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
38916 int error, len;
38917 __u8 __name[NCP_MAXPATHLEN + 1];
38918
38919 + pax_track_stack();
38920 +
38921 DPRINTK("ncp_mkdir: making %s/%s\n",
38922 dentry->d_parent->d_name.name, dentry->d_name.name);
38923
38924 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
38925 int old_len, new_len;
38926 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
38927
38928 + pax_track_stack();
38929 +
38930 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
38931 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
38932 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
38933 diff -urNp linux-3.0.3/fs/ncpfs/inode.c linux-3.0.3/fs/ncpfs/inode.c
38934 --- linux-3.0.3/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38935 +++ linux-3.0.3/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38936 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
38937 #endif
38938 struct ncp_entry_info finfo;
38939
38940 + pax_track_stack();
38941 +
38942 memset(&data, 0, sizeof(data));
38943 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
38944 if (!server)
38945 diff -urNp linux-3.0.3/fs/nfs/inode.c linux-3.0.3/fs/nfs/inode.c
38946 --- linux-3.0.3/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38947 +++ linux-3.0.3/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38948 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
38949 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
38950 nfsi->attrtimeo_timestamp = jiffies;
38951
38952 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
38953 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
38954 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
38955 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
38956 else
38957 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
38958 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
38959 }
38960
38961 -static atomic_long_t nfs_attr_generation_counter;
38962 +static atomic_long_unchecked_t nfs_attr_generation_counter;
38963
38964 static unsigned long nfs_read_attr_generation_counter(void)
38965 {
38966 - return atomic_long_read(&nfs_attr_generation_counter);
38967 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
38968 }
38969
38970 unsigned long nfs_inc_attr_generation_counter(void)
38971 {
38972 - return atomic_long_inc_return(&nfs_attr_generation_counter);
38973 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
38974 }
38975
38976 void nfs_fattr_init(struct nfs_fattr *fattr)
38977 diff -urNp linux-3.0.3/fs/nfsd/nfs4state.c linux-3.0.3/fs/nfsd/nfs4state.c
38978 --- linux-3.0.3/fs/nfsd/nfs4state.c 2011-08-23 21:44:40.000000000 -0400
38979 +++ linux-3.0.3/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
38980 @@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
38981 unsigned int strhashval;
38982 int err;
38983
38984 + pax_track_stack();
38985 +
38986 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
38987 (long long) lock->lk_offset,
38988 (long long) lock->lk_length);
38989 diff -urNp linux-3.0.3/fs/nfsd/nfs4xdr.c linux-3.0.3/fs/nfsd/nfs4xdr.c
38990 --- linux-3.0.3/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
38991 +++ linux-3.0.3/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
38992 @@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
38993 .dentry = dentry,
38994 };
38995
38996 + pax_track_stack();
38997 +
38998 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
38999 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
39000 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
39001 diff -urNp linux-3.0.3/fs/nfsd/vfs.c linux-3.0.3/fs/nfsd/vfs.c
39002 --- linux-3.0.3/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
39003 +++ linux-3.0.3/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
39004 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
39005 } else {
39006 oldfs = get_fs();
39007 set_fs(KERNEL_DS);
39008 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
39009 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
39010 set_fs(oldfs);
39011 }
39012
39013 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
39014
39015 /* Write the data. */
39016 oldfs = get_fs(); set_fs(KERNEL_DS);
39017 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
39018 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
39019 set_fs(oldfs);
39020 if (host_err < 0)
39021 goto out_nfserr;
39022 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
39023 */
39024
39025 oldfs = get_fs(); set_fs(KERNEL_DS);
39026 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
39027 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
39028 set_fs(oldfs);
39029
39030 if (host_err < 0)
39031 diff -urNp linux-3.0.3/fs/notify/fanotify/fanotify_user.c linux-3.0.3/fs/notify/fanotify/fanotify_user.c
39032 --- linux-3.0.3/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
39033 +++ linux-3.0.3/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
39034 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
39035 goto out_close_fd;
39036
39037 ret = -EFAULT;
39038 - if (copy_to_user(buf, &fanotify_event_metadata,
39039 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
39040 + copy_to_user(buf, &fanotify_event_metadata,
39041 fanotify_event_metadata.event_len))
39042 goto out_kill_access_response;
39043
39044 diff -urNp linux-3.0.3/fs/notify/notification.c linux-3.0.3/fs/notify/notification.c
39045 --- linux-3.0.3/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
39046 +++ linux-3.0.3/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
39047 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
39048 * get set to 0 so it will never get 'freed'
39049 */
39050 static struct fsnotify_event *q_overflow_event;
39051 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39052 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39053
39054 /**
39055 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
39056 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
39057 */
39058 u32 fsnotify_get_cookie(void)
39059 {
39060 - return atomic_inc_return(&fsnotify_sync_cookie);
39061 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
39062 }
39063 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39064
39065 diff -urNp linux-3.0.3/fs/ntfs/dir.c linux-3.0.3/fs/ntfs/dir.c
39066 --- linux-3.0.3/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
39067 +++ linux-3.0.3/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
39068 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
39069 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39070 ~(s64)(ndir->itype.index.block_size - 1)));
39071 /* Bounds checks. */
39072 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39073 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39074 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39075 "inode 0x%lx or driver bug.", vdir->i_ino);
39076 goto err_out;
39077 diff -urNp linux-3.0.3/fs/ntfs/file.c linux-3.0.3/fs/ntfs/file.c
39078 --- linux-3.0.3/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
39079 +++ linux-3.0.3/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
39080 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39081 #endif /* NTFS_RW */
39082 };
39083
39084 -const struct file_operations ntfs_empty_file_ops = {};
39085 +const struct file_operations ntfs_empty_file_ops __read_only;
39086
39087 -const struct inode_operations ntfs_empty_inode_ops = {};
39088 +const struct inode_operations ntfs_empty_inode_ops __read_only;
39089 diff -urNp linux-3.0.3/fs/ocfs2/localalloc.c linux-3.0.3/fs/ocfs2/localalloc.c
39090 --- linux-3.0.3/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
39091 +++ linux-3.0.3/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
39092 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39093 goto bail;
39094 }
39095
39096 - atomic_inc(&osb->alloc_stats.moves);
39097 + atomic_inc_unchecked(&osb->alloc_stats.moves);
39098
39099 bail:
39100 if (handle)
39101 diff -urNp linux-3.0.3/fs/ocfs2/namei.c linux-3.0.3/fs/ocfs2/namei.c
39102 --- linux-3.0.3/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
39103 +++ linux-3.0.3/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
39104 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39105 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39106 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39107
39108 + pax_track_stack();
39109 +
39110 /* At some point it might be nice to break this function up a
39111 * bit. */
39112
39113 diff -urNp linux-3.0.3/fs/ocfs2/ocfs2.h linux-3.0.3/fs/ocfs2/ocfs2.h
39114 --- linux-3.0.3/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
39115 +++ linux-3.0.3/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
39116 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
39117
39118 struct ocfs2_alloc_stats
39119 {
39120 - atomic_t moves;
39121 - atomic_t local_data;
39122 - atomic_t bitmap_data;
39123 - atomic_t bg_allocs;
39124 - atomic_t bg_extends;
39125 + atomic_unchecked_t moves;
39126 + atomic_unchecked_t local_data;
39127 + atomic_unchecked_t bitmap_data;
39128 + atomic_unchecked_t bg_allocs;
39129 + atomic_unchecked_t bg_extends;
39130 };
39131
39132 enum ocfs2_local_alloc_state
39133 diff -urNp linux-3.0.3/fs/ocfs2/suballoc.c linux-3.0.3/fs/ocfs2/suballoc.c
39134 --- linux-3.0.3/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
39135 +++ linux-3.0.3/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
39136 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39137 mlog_errno(status);
39138 goto bail;
39139 }
39140 - atomic_inc(&osb->alloc_stats.bg_extends);
39141 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39142
39143 /* You should never ask for this much metadata */
39144 BUG_ON(bits_wanted >
39145 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39146 mlog_errno(status);
39147 goto bail;
39148 }
39149 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39150 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39151
39152 *suballoc_loc = res.sr_bg_blkno;
39153 *suballoc_bit_start = res.sr_bit_offset;
39154 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39155 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39156 res->sr_bits);
39157
39158 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39159 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39160
39161 BUG_ON(res->sr_bits != 1);
39162
39163 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39164 mlog_errno(status);
39165 goto bail;
39166 }
39167 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39168 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39169
39170 BUG_ON(res.sr_bits != 1);
39171
39172 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39173 cluster_start,
39174 num_clusters);
39175 if (!status)
39176 - atomic_inc(&osb->alloc_stats.local_data);
39177 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
39178 } else {
39179 if (min_clusters > (osb->bitmap_cpg - 1)) {
39180 /* The only paths asking for contiguousness
39181 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39182 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39183 res.sr_bg_blkno,
39184 res.sr_bit_offset);
39185 - atomic_inc(&osb->alloc_stats.bitmap_data);
39186 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39187 *num_clusters = res.sr_bits;
39188 }
39189 }
39190 diff -urNp linux-3.0.3/fs/ocfs2/super.c linux-3.0.3/fs/ocfs2/super.c
39191 --- linux-3.0.3/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
39192 +++ linux-3.0.3/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
39193 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39194 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39195 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39196 "Stats",
39197 - atomic_read(&osb->alloc_stats.bitmap_data),
39198 - atomic_read(&osb->alloc_stats.local_data),
39199 - atomic_read(&osb->alloc_stats.bg_allocs),
39200 - atomic_read(&osb->alloc_stats.moves),
39201 - atomic_read(&osb->alloc_stats.bg_extends));
39202 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39203 + atomic_read_unchecked(&osb->alloc_stats.local_data),
39204 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39205 + atomic_read_unchecked(&osb->alloc_stats.moves),
39206 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39207
39208 out += snprintf(buf + out, len - out,
39209 "%10s => State: %u Descriptor: %llu Size: %u bits "
39210 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
39211 spin_lock_init(&osb->osb_xattr_lock);
39212 ocfs2_init_steal_slots(osb);
39213
39214 - atomic_set(&osb->alloc_stats.moves, 0);
39215 - atomic_set(&osb->alloc_stats.local_data, 0);
39216 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
39217 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
39218 - atomic_set(&osb->alloc_stats.bg_extends, 0);
39219 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39220 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39221 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39222 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39223 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39224
39225 /* Copy the blockcheck stats from the superblock probe */
39226 osb->osb_ecc_stats = *stats;
39227 diff -urNp linux-3.0.3/fs/ocfs2/symlink.c linux-3.0.3/fs/ocfs2/symlink.c
39228 --- linux-3.0.3/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
39229 +++ linux-3.0.3/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
39230 @@ -142,7 +142,7 @@ bail:
39231
39232 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39233 {
39234 - char *link = nd_get_link(nd);
39235 + const char *link = nd_get_link(nd);
39236 if (!IS_ERR(link))
39237 kfree(link);
39238 }
39239 diff -urNp linux-3.0.3/fs/open.c linux-3.0.3/fs/open.c
39240 --- linux-3.0.3/fs/open.c 2011-07-21 22:17:23.000000000 -0400
39241 +++ linux-3.0.3/fs/open.c 2011-08-23 21:48:14.000000000 -0400
39242 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39243 error = locks_verify_truncate(inode, NULL, length);
39244 if (!error)
39245 error = security_path_truncate(&path);
39246 +
39247 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39248 + error = -EACCES;
39249 +
39250 if (!error)
39251 error = do_truncate(path.dentry, length, 0, NULL);
39252
39253 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39254 if (__mnt_is_readonly(path.mnt))
39255 res = -EROFS;
39256
39257 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39258 + res = -EACCES;
39259 +
39260 out_path_release:
39261 path_put(&path);
39262 out:
39263 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39264 if (error)
39265 goto dput_and_out;
39266
39267 + gr_log_chdir(path.dentry, path.mnt);
39268 +
39269 set_fs_pwd(current->fs, &path);
39270
39271 dput_and_out:
39272 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39273 goto out_putf;
39274
39275 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39276 +
39277 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39278 + error = -EPERM;
39279 +
39280 + if (!error)
39281 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39282 +
39283 if (!error)
39284 set_fs_pwd(current->fs, &file->f_path);
39285 out_putf:
39286 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39287 if (error)
39288 goto dput_and_out;
39289
39290 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39291 + goto dput_and_out;
39292 +
39293 + if (gr_handle_chroot_caps(&path)) {
39294 + error = -ENOMEM;
39295 + goto dput_and_out;
39296 + }
39297 +
39298 set_fs_root(current->fs, &path);
39299 +
39300 + gr_handle_chroot_chdir(&path);
39301 +
39302 error = 0;
39303 dput_and_out:
39304 path_put(&path);
39305 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39306 err = mnt_want_write_file(file);
39307 if (err)
39308 goto out_putf;
39309 +
39310 mutex_lock(&inode->i_mutex);
39311 +
39312 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39313 + err = -EACCES;
39314 + goto out_unlock;
39315 + }
39316 +
39317 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39318 if (err)
39319 goto out_unlock;
39320 if (mode == (mode_t) -1)
39321 mode = inode->i_mode;
39322 +
39323 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39324 + err = -EACCES;
39325 + goto out_unlock;
39326 + }
39327 +
39328 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39329 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39330 err = notify_change(dentry, &newattrs);
39331 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39332 error = mnt_want_write(path.mnt);
39333 if (error)
39334 goto dput_and_out;
39335 +
39336 mutex_lock(&inode->i_mutex);
39337 +
39338 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39339 + error = -EACCES;
39340 + goto out_unlock;
39341 + }
39342 +
39343 error = security_path_chmod(path.dentry, path.mnt, mode);
39344 if (error)
39345 goto out_unlock;
39346 if (mode == (mode_t) -1)
39347 mode = inode->i_mode;
39348 +
39349 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39350 + error = -EACCES;
39351 + goto out_unlock;
39352 + }
39353 +
39354 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39355 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39356 error = notify_change(path.dentry, &newattrs);
39357 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39358 int error;
39359 struct iattr newattrs;
39360
39361 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
39362 + return -EACCES;
39363 +
39364 newattrs.ia_valid = ATTR_CTIME;
39365 if (user != (uid_t) -1) {
39366 newattrs.ia_valid |= ATTR_UID;
39367 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39368 if (!IS_ERR(tmp)) {
39369 fd = get_unused_fd_flags(flags);
39370 if (fd >= 0) {
39371 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39372 + struct file *f;
39373 + /* don't allow to be set by userland */
39374 + flags &= ~FMODE_GREXEC;
39375 + f = do_filp_open(dfd, tmp, &op, lookup);
39376 if (IS_ERR(f)) {
39377 put_unused_fd(fd);
39378 fd = PTR_ERR(f);
39379 diff -urNp linux-3.0.3/fs/partitions/ldm.c linux-3.0.3/fs/partitions/ldm.c
39380 --- linux-3.0.3/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
39381 +++ linux-3.0.3/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
39382 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39383 ldm_error ("A VBLK claims to have %d parts.", num);
39384 return false;
39385 }
39386 +
39387 if (rec >= num) {
39388 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39389 return false;
39390 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39391 goto found;
39392 }
39393
39394 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39395 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39396 if (!f) {
39397 ldm_crit ("Out of memory.");
39398 return false;
39399 diff -urNp linux-3.0.3/fs/pipe.c linux-3.0.3/fs/pipe.c
39400 --- linux-3.0.3/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
39401 +++ linux-3.0.3/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
39402 @@ -420,9 +420,9 @@ redo:
39403 }
39404 if (bufs) /* More to do? */
39405 continue;
39406 - if (!pipe->writers)
39407 + if (!atomic_read(&pipe->writers))
39408 break;
39409 - if (!pipe->waiting_writers) {
39410 + if (!atomic_read(&pipe->waiting_writers)) {
39411 /* syscall merging: Usually we must not sleep
39412 * if O_NONBLOCK is set, or if we got some data.
39413 * But if a writer sleeps in kernel space, then
39414 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39415 mutex_lock(&inode->i_mutex);
39416 pipe = inode->i_pipe;
39417
39418 - if (!pipe->readers) {
39419 + if (!atomic_read(&pipe->readers)) {
39420 send_sig(SIGPIPE, current, 0);
39421 ret = -EPIPE;
39422 goto out;
39423 @@ -530,7 +530,7 @@ redo1:
39424 for (;;) {
39425 int bufs;
39426
39427 - if (!pipe->readers) {
39428 + if (!atomic_read(&pipe->readers)) {
39429 send_sig(SIGPIPE, current, 0);
39430 if (!ret)
39431 ret = -EPIPE;
39432 @@ -616,9 +616,9 @@ redo2:
39433 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39434 do_wakeup = 0;
39435 }
39436 - pipe->waiting_writers++;
39437 + atomic_inc(&pipe->waiting_writers);
39438 pipe_wait(pipe);
39439 - pipe->waiting_writers--;
39440 + atomic_dec(&pipe->waiting_writers);
39441 }
39442 out:
39443 mutex_unlock(&inode->i_mutex);
39444 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39445 mask = 0;
39446 if (filp->f_mode & FMODE_READ) {
39447 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39448 - if (!pipe->writers && filp->f_version != pipe->w_counter)
39449 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39450 mask |= POLLHUP;
39451 }
39452
39453 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39454 * Most Unices do not set POLLERR for FIFOs but on Linux they
39455 * behave exactly like pipes for poll().
39456 */
39457 - if (!pipe->readers)
39458 + if (!atomic_read(&pipe->readers))
39459 mask |= POLLERR;
39460 }
39461
39462 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39463
39464 mutex_lock(&inode->i_mutex);
39465 pipe = inode->i_pipe;
39466 - pipe->readers -= decr;
39467 - pipe->writers -= decw;
39468 + atomic_sub(decr, &pipe->readers);
39469 + atomic_sub(decw, &pipe->writers);
39470
39471 - if (!pipe->readers && !pipe->writers) {
39472 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39473 free_pipe_info(inode);
39474 } else {
39475 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39476 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39477
39478 if (inode->i_pipe) {
39479 ret = 0;
39480 - inode->i_pipe->readers++;
39481 + atomic_inc(&inode->i_pipe->readers);
39482 }
39483
39484 mutex_unlock(&inode->i_mutex);
39485 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39486
39487 if (inode->i_pipe) {
39488 ret = 0;
39489 - inode->i_pipe->writers++;
39490 + atomic_inc(&inode->i_pipe->writers);
39491 }
39492
39493 mutex_unlock(&inode->i_mutex);
39494 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39495 if (inode->i_pipe) {
39496 ret = 0;
39497 if (filp->f_mode & FMODE_READ)
39498 - inode->i_pipe->readers++;
39499 + atomic_inc(&inode->i_pipe->readers);
39500 if (filp->f_mode & FMODE_WRITE)
39501 - inode->i_pipe->writers++;
39502 + atomic_inc(&inode->i_pipe->writers);
39503 }
39504
39505 mutex_unlock(&inode->i_mutex);
39506 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39507 inode->i_pipe = NULL;
39508 }
39509
39510 -static struct vfsmount *pipe_mnt __read_mostly;
39511 +struct vfsmount *pipe_mnt __read_mostly;
39512
39513 /*
39514 * pipefs_dname() is called from d_path().
39515 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39516 goto fail_iput;
39517 inode->i_pipe = pipe;
39518
39519 - pipe->readers = pipe->writers = 1;
39520 + atomic_set(&pipe->readers, 1);
39521 + atomic_set(&pipe->writers, 1);
39522 inode->i_fop = &rdwr_pipefifo_fops;
39523
39524 /*
39525 diff -urNp linux-3.0.3/fs/proc/array.c linux-3.0.3/fs/proc/array.c
39526 --- linux-3.0.3/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
39527 +++ linux-3.0.3/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
39528 @@ -60,6 +60,7 @@
39529 #include <linux/tty.h>
39530 #include <linux/string.h>
39531 #include <linux/mman.h>
39532 +#include <linux/grsecurity.h>
39533 #include <linux/proc_fs.h>
39534 #include <linux/ioport.h>
39535 #include <linux/uaccess.h>
39536 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39537 seq_putc(m, '\n');
39538 }
39539
39540 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39541 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
39542 +{
39543 + if (p->mm)
39544 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39545 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39546 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39547 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39548 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39549 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39550 + else
39551 + seq_printf(m, "PaX:\t-----\n");
39552 +}
39553 +#endif
39554 +
39555 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39556 struct pid *pid, struct task_struct *task)
39557 {
39558 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39559 task_cpus_allowed(m, task);
39560 cpuset_task_status_allowed(m, task);
39561 task_context_switch_counts(m, task);
39562 +
39563 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39564 + task_pax(m, task);
39565 +#endif
39566 +
39567 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39568 + task_grsec_rbac(m, task);
39569 +#endif
39570 +
39571 return 0;
39572 }
39573
39574 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39575 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39576 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39577 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39578 +#endif
39579 +
39580 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39581 struct pid *pid, struct task_struct *task, int whole)
39582 {
39583 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39584 cputime_t cutime, cstime, utime, stime;
39585 cputime_t cgtime, gtime;
39586 unsigned long rsslim = 0;
39587 - char tcomm[sizeof(task->comm)];
39588 + char tcomm[sizeof(task->comm)] = { 0 };
39589 unsigned long flags;
39590
39591 + pax_track_stack();
39592 +
39593 state = *get_task_state(task);
39594 vsize = eip = esp = 0;
39595 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39596 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39597 gtime = task->gtime;
39598 }
39599
39600 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39601 + if (PAX_RAND_FLAGS(mm)) {
39602 + eip = 0;
39603 + esp = 0;
39604 + wchan = 0;
39605 + }
39606 +#endif
39607 +#ifdef CONFIG_GRKERNSEC_HIDESYM
39608 + wchan = 0;
39609 + eip =0;
39610 + esp =0;
39611 +#endif
39612 +
39613 /* scale priority and nice values from timeslices to -20..20 */
39614 /* to make it look like a "normal" Unix priority/nice value */
39615 priority = task_prio(task);
39616 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39617 vsize,
39618 mm ? get_mm_rss(mm) : 0,
39619 rsslim,
39620 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39621 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39622 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39623 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39624 +#else
39625 mm ? (permitted ? mm->start_code : 1) : 0,
39626 mm ? (permitted ? mm->end_code : 1) : 0,
39627 (permitted && mm) ? mm->start_stack : 0,
39628 +#endif
39629 esp,
39630 eip,
39631 /* The signal information here is obsolete.
39632 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39633
39634 return 0;
39635 }
39636 +
39637 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39638 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39639 +{
39640 + u32 curr_ip = 0;
39641 + unsigned long flags;
39642 +
39643 + if (lock_task_sighand(task, &flags)) {
39644 + curr_ip = task->signal->curr_ip;
39645 + unlock_task_sighand(task, &flags);
39646 + }
39647 +
39648 + return sprintf(buffer, "%pI4\n", &curr_ip);
39649 +}
39650 +#endif
39651 diff -urNp linux-3.0.3/fs/proc/base.c linux-3.0.3/fs/proc/base.c
39652 --- linux-3.0.3/fs/proc/base.c 2011-08-23 21:44:40.000000000 -0400
39653 +++ linux-3.0.3/fs/proc/base.c 2011-08-23 21:48:14.000000000 -0400
39654 @@ -107,6 +107,22 @@ struct pid_entry {
39655 union proc_op op;
39656 };
39657
39658 +struct getdents_callback {
39659 + struct linux_dirent __user * current_dir;
39660 + struct linux_dirent __user * previous;
39661 + struct file * file;
39662 + int count;
39663 + int error;
39664 +};
39665 +
39666 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39667 + loff_t offset, u64 ino, unsigned int d_type)
39668 +{
39669 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
39670 + buf->error = -EINVAL;
39671 + return 0;
39672 +}
39673 +
39674 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39675 .name = (NAME), \
39676 .len = sizeof(NAME) - 1, \
39677 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
39678 if (task == current)
39679 return mm;
39680
39681 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39682 + return ERR_PTR(-EPERM);
39683 +
39684 /*
39685 * If current is actively ptrace'ing, and would also be
39686 * permitted to freshly attach with ptrace now, permit it.
39687 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
39688 if (!mm->arg_end)
39689 goto out_mm; /* Shh! No looking before we're done */
39690
39691 + if (gr_acl_handle_procpidmem(task))
39692 + goto out_mm;
39693 +
39694 len = mm->arg_end - mm->arg_start;
39695
39696 if (len > PAGE_SIZE)
39697 @@ -309,12 +331,28 @@ out:
39698 return res;
39699 }
39700
39701 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39702 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39703 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39704 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39705 +#endif
39706 +
39707 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39708 {
39709 struct mm_struct *mm = mm_for_maps(task);
39710 int res = PTR_ERR(mm);
39711 if (mm && !IS_ERR(mm)) {
39712 unsigned int nwords = 0;
39713 +
39714 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39715 + /* allow if we're currently ptracing this task */
39716 + if (PAX_RAND_FLAGS(mm) &&
39717 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39718 + mmput(mm);
39719 + return res;
39720 + }
39721 +#endif
39722 +
39723 do {
39724 nwords += 2;
39725 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39726 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
39727 }
39728
39729
39730 -#ifdef CONFIG_KALLSYMS
39731 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39732 /*
39733 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39734 * Returns the resolved symbol. If that fails, simply return the address.
39735 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
39736 mutex_unlock(&task->signal->cred_guard_mutex);
39737 }
39738
39739 -#ifdef CONFIG_STACKTRACE
39740 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39741
39742 #define MAX_STACK_TRACE_DEPTH 64
39743
39744 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
39745 return count;
39746 }
39747
39748 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39749 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39750 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39751 {
39752 long nr;
39753 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
39754 /************************************************************************/
39755
39756 /* permission checks */
39757 -static int proc_fd_access_allowed(struct inode *inode)
39758 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39759 {
39760 struct task_struct *task;
39761 int allowed = 0;
39762 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
39763 */
39764 task = get_proc_task(inode);
39765 if (task) {
39766 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39767 + if (log)
39768 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39769 + else
39770 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39771 put_task_struct(task);
39772 }
39773 return allowed;
39774 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
39775 if (!task)
39776 goto out_no_task;
39777
39778 + if (gr_acl_handle_procpidmem(task))
39779 + goto out;
39780 +
39781 ret = -ENOMEM;
39782 page = (char *)__get_free_page(GFP_TEMPORARY);
39783 if (!page)
39784 @@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
39785 path_put(&nd->path);
39786
39787 /* Are we allowed to snoop on the tasks file descriptors? */
39788 - if (!proc_fd_access_allowed(inode))
39789 + if (!proc_fd_access_allowed(inode,0))
39790 goto out;
39791
39792 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39793 @@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
39794 struct path path;
39795
39796 /* Are we allowed to snoop on the tasks file descriptors? */
39797 - if (!proc_fd_access_allowed(inode))
39798 - goto out;
39799 + /* logging this is needed for learning on chromium to work properly,
39800 + but we don't want to flood the logs from 'ps' which does a readlink
39801 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39802 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
39803 + */
39804 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39805 + if (!proc_fd_access_allowed(inode,0))
39806 + goto out;
39807 + } else {
39808 + if (!proc_fd_access_allowed(inode,1))
39809 + goto out;
39810 + }
39811
39812 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39813 if (error)
39814 @@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
39815 rcu_read_lock();
39816 cred = __task_cred(task);
39817 inode->i_uid = cred->euid;
39818 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39819 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39820 +#else
39821 inode->i_gid = cred->egid;
39822 +#endif
39823 rcu_read_unlock();
39824 }
39825 security_task_to_inode(task, inode);
39826 @@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
39827 struct inode *inode = dentry->d_inode;
39828 struct task_struct *task;
39829 const struct cred *cred;
39830 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39831 + const struct cred *tmpcred = current_cred();
39832 +#endif
39833
39834 generic_fillattr(inode, stat);
39835
39836 @@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
39837 stat->uid = 0;
39838 stat->gid = 0;
39839 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39840 +
39841 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39842 + rcu_read_unlock();
39843 + return -ENOENT;
39844 + }
39845 +
39846 if (task) {
39847 + cred = __task_cred(task);
39848 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39849 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39850 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39851 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39852 +#endif
39853 + ) {
39854 +#endif
39855 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39856 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39857 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39858 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39859 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39860 +#endif
39861 task_dumpable(task)) {
39862 - cred = __task_cred(task);
39863 stat->uid = cred->euid;
39864 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39865 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39866 +#else
39867 stat->gid = cred->egid;
39868 +#endif
39869 }
39870 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39871 + } else {
39872 + rcu_read_unlock();
39873 + return -ENOENT;
39874 + }
39875 +#endif
39876 }
39877 rcu_read_unlock();
39878 return 0;
39879 @@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
39880
39881 if (task) {
39882 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39883 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39884 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39885 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39886 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39887 +#endif
39888 task_dumpable(task)) {
39889 rcu_read_lock();
39890 cred = __task_cred(task);
39891 inode->i_uid = cred->euid;
39892 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39893 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39894 +#else
39895 inode->i_gid = cred->egid;
39896 +#endif
39897 rcu_read_unlock();
39898 } else {
39899 inode->i_uid = 0;
39900 @@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
39901 int fd = proc_fd(inode);
39902
39903 if (task) {
39904 - files = get_files_struct(task);
39905 + if (!gr_acl_handle_procpidmem(task))
39906 + files = get_files_struct(task);
39907 put_task_struct(task);
39908 }
39909 if (files) {
39910 @@ -2169,11 +2268,21 @@ static const struct file_operations proc
39911 */
39912 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
39913 {
39914 + struct task_struct *task;
39915 int rv = generic_permission(inode, mask, flags, NULL);
39916 - if (rv == 0)
39917 - return 0;
39918 +
39919 if (task_pid(current) == proc_pid(inode))
39920 rv = 0;
39921 +
39922 + task = get_proc_task(inode);
39923 + if (task == NULL)
39924 + return rv;
39925 +
39926 + if (gr_acl_handle_procpidmem(task))
39927 + rv = -EACCES;
39928 +
39929 + put_task_struct(task);
39930 +
39931 return rv;
39932 }
39933
39934 @@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
39935 if (!task)
39936 goto out_no_task;
39937
39938 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39939 + goto out;
39940 +
39941 /*
39942 * Yes, it does not scale. And it should not. Don't add
39943 * new entries into /proc/<tgid>/ without very good reasons.
39944 @@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
39945 if (!task)
39946 goto out_no_task;
39947
39948 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39949 + goto out;
39950 +
39951 ret = 0;
39952 i = filp->f_pos;
39953 switch (i) {
39954 @@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
39955 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
39956 void *cookie)
39957 {
39958 - char *s = nd_get_link(nd);
39959 + const char *s = nd_get_link(nd);
39960 if (!IS_ERR(s))
39961 __putname(s);
39962 }
39963 @@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
39964 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
39965 #endif
39966 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
39967 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39968 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39969 INF("syscall", S_IRUGO, proc_pid_syscall),
39970 #endif
39971 INF("cmdline", S_IRUGO, proc_pid_cmdline),
39972 @@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
39973 #ifdef CONFIG_SECURITY
39974 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
39975 #endif
39976 -#ifdef CONFIG_KALLSYMS
39977 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39978 INF("wchan", S_IRUGO, proc_pid_wchan),
39979 #endif
39980 -#ifdef CONFIG_STACKTRACE
39981 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39982 ONE("stack", S_IRUGO, proc_pid_stack),
39983 #endif
39984 #ifdef CONFIG_SCHEDSTATS
39985 @@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
39986 #ifdef CONFIG_HARDWALL
39987 INF("hardwall", S_IRUGO, proc_pid_hardwall),
39988 #endif
39989 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39990 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
39991 +#endif
39992 };
39993
39994 static int proc_tgid_base_readdir(struct file * filp,
39995 @@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
39996 if (!inode)
39997 goto out;
39998
39999 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40000 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
40001 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40002 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40003 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
40004 +#else
40005 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
40006 +#endif
40007 inode->i_op = &proc_tgid_base_inode_operations;
40008 inode->i_fop = &proc_tgid_base_operations;
40009 inode->i_flags|=S_IMMUTABLE;
40010 @@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
40011 if (!task)
40012 goto out;
40013
40014 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40015 + goto out_put_task;
40016 +
40017 result = proc_pid_instantiate(dir, dentry, task, NULL);
40018 +out_put_task:
40019 put_task_struct(task);
40020 out:
40021 return result;
40022 @@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
40023 {
40024 unsigned int nr;
40025 struct task_struct *reaper;
40026 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40027 + const struct cred *tmpcred = current_cred();
40028 + const struct cred *itercred;
40029 +#endif
40030 + filldir_t __filldir = filldir;
40031 struct tgid_iter iter;
40032 struct pid_namespace *ns;
40033
40034 @@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
40035 for (iter = next_tgid(ns, iter);
40036 iter.task;
40037 iter.tgid += 1, iter = next_tgid(ns, iter)) {
40038 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40039 + rcu_read_lock();
40040 + itercred = __task_cred(iter.task);
40041 +#endif
40042 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
40043 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40044 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
40045 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40046 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
40047 +#endif
40048 + )
40049 +#endif
40050 + )
40051 + __filldir = &gr_fake_filldir;
40052 + else
40053 + __filldir = filldir;
40054 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40055 + rcu_read_unlock();
40056 +#endif
40057 filp->f_pos = iter.tgid + TGID_OFFSET;
40058 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40059 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40060 put_task_struct(iter.task);
40061 goto out;
40062 }
40063 @@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
40064 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40065 #endif
40066 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40067 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40068 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40069 INF("syscall", S_IRUGO, proc_pid_syscall),
40070 #endif
40071 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40072 @@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
40073 #ifdef CONFIG_SECURITY
40074 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40075 #endif
40076 -#ifdef CONFIG_KALLSYMS
40077 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40078 INF("wchan", S_IRUGO, proc_pid_wchan),
40079 #endif
40080 -#ifdef CONFIG_STACKTRACE
40081 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40082 ONE("stack", S_IRUGO, proc_pid_stack),
40083 #endif
40084 #ifdef CONFIG_SCHEDSTATS
40085 diff -urNp linux-3.0.3/fs/proc/cmdline.c linux-3.0.3/fs/proc/cmdline.c
40086 --- linux-3.0.3/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
40087 +++ linux-3.0.3/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
40088 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
40089
40090 static int __init proc_cmdline_init(void)
40091 {
40092 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40093 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40094 +#else
40095 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40096 +#endif
40097 return 0;
40098 }
40099 module_init(proc_cmdline_init);
40100 diff -urNp linux-3.0.3/fs/proc/devices.c linux-3.0.3/fs/proc/devices.c
40101 --- linux-3.0.3/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
40102 +++ linux-3.0.3/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
40103 @@ -64,7 +64,11 @@ static const struct file_operations proc
40104
40105 static int __init proc_devices_init(void)
40106 {
40107 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40108 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40109 +#else
40110 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40111 +#endif
40112 return 0;
40113 }
40114 module_init(proc_devices_init);
40115 diff -urNp linux-3.0.3/fs/proc/inode.c linux-3.0.3/fs/proc/inode.c
40116 --- linux-3.0.3/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
40117 +++ linux-3.0.3/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
40118 @@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
40119 if (de->mode) {
40120 inode->i_mode = de->mode;
40121 inode->i_uid = de->uid;
40122 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40123 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40124 +#else
40125 inode->i_gid = de->gid;
40126 +#endif
40127 }
40128 if (de->size)
40129 inode->i_size = de->size;
40130 diff -urNp linux-3.0.3/fs/proc/internal.h linux-3.0.3/fs/proc/internal.h
40131 --- linux-3.0.3/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
40132 +++ linux-3.0.3/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
40133 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40134 struct pid *pid, struct task_struct *task);
40135 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40136 struct pid *pid, struct task_struct *task);
40137 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40138 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40139 +#endif
40140 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40141
40142 extern const struct file_operations proc_maps_operations;
40143 diff -urNp linux-3.0.3/fs/proc/Kconfig linux-3.0.3/fs/proc/Kconfig
40144 --- linux-3.0.3/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
40145 +++ linux-3.0.3/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
40146 @@ -30,12 +30,12 @@ config PROC_FS
40147
40148 config PROC_KCORE
40149 bool "/proc/kcore support" if !ARM
40150 - depends on PROC_FS && MMU
40151 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40152
40153 config PROC_VMCORE
40154 bool "/proc/vmcore support"
40155 - depends on PROC_FS && CRASH_DUMP
40156 - default y
40157 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40158 + default n
40159 help
40160 Exports the dump image of crashed kernel in ELF format.
40161
40162 @@ -59,8 +59,8 @@ config PROC_SYSCTL
40163 limited in memory.
40164
40165 config PROC_PAGE_MONITOR
40166 - default y
40167 - depends on PROC_FS && MMU
40168 + default n
40169 + depends on PROC_FS && MMU && !GRKERNSEC
40170 bool "Enable /proc page monitoring" if EXPERT
40171 help
40172 Various /proc files exist to monitor process memory utilization:
40173 diff -urNp linux-3.0.3/fs/proc/kcore.c linux-3.0.3/fs/proc/kcore.c
40174 --- linux-3.0.3/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
40175 +++ linux-3.0.3/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
40176 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40177 off_t offset = 0;
40178 struct kcore_list *m;
40179
40180 + pax_track_stack();
40181 +
40182 /* setup ELF header */
40183 elf = (struct elfhdr *) bufp;
40184 bufp += sizeof(struct elfhdr);
40185 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40186 * the addresses in the elf_phdr on our list.
40187 */
40188 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40189 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40190 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40191 + if (tsz > buflen)
40192 tsz = buflen;
40193 -
40194 +
40195 while (buflen) {
40196 struct kcore_list *m;
40197
40198 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40199 kfree(elf_buf);
40200 } else {
40201 if (kern_addr_valid(start)) {
40202 - unsigned long n;
40203 + char *elf_buf;
40204 + mm_segment_t oldfs;
40205
40206 - n = copy_to_user(buffer, (char *)start, tsz);
40207 - /*
40208 - * We cannot distingush between fault on source
40209 - * and fault on destination. When this happens
40210 - * we clear too and hope it will trigger the
40211 - * EFAULT again.
40212 - */
40213 - if (n) {
40214 - if (clear_user(buffer + tsz - n,
40215 - n))
40216 + elf_buf = kmalloc(tsz, GFP_KERNEL);
40217 + if (!elf_buf)
40218 + return -ENOMEM;
40219 + oldfs = get_fs();
40220 + set_fs(KERNEL_DS);
40221 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40222 + set_fs(oldfs);
40223 + if (copy_to_user(buffer, elf_buf, tsz)) {
40224 + kfree(elf_buf);
40225 return -EFAULT;
40226 + }
40227 }
40228 + set_fs(oldfs);
40229 + kfree(elf_buf);
40230 } else {
40231 if (clear_user(buffer, tsz))
40232 return -EFAULT;
40233 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40234
40235 static int open_kcore(struct inode *inode, struct file *filp)
40236 {
40237 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40238 + return -EPERM;
40239 +#endif
40240 if (!capable(CAP_SYS_RAWIO))
40241 return -EPERM;
40242 if (kcore_need_update)
40243 diff -urNp linux-3.0.3/fs/proc/meminfo.c linux-3.0.3/fs/proc/meminfo.c
40244 --- linux-3.0.3/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
40245 +++ linux-3.0.3/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
40246 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40247 unsigned long pages[NR_LRU_LISTS];
40248 int lru;
40249
40250 + pax_track_stack();
40251 +
40252 /*
40253 * display in kilobytes.
40254 */
40255 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40256 vmi.used >> 10,
40257 vmi.largest_chunk >> 10
40258 #ifdef CONFIG_MEMORY_FAILURE
40259 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40260 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40261 #endif
40262 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40263 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40264 diff -urNp linux-3.0.3/fs/proc/nommu.c linux-3.0.3/fs/proc/nommu.c
40265 --- linux-3.0.3/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
40266 +++ linux-3.0.3/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
40267 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40268 if (len < 1)
40269 len = 1;
40270 seq_printf(m, "%*c", len, ' ');
40271 - seq_path(m, &file->f_path, "");
40272 + seq_path(m, &file->f_path, "\n\\");
40273 }
40274
40275 seq_putc(m, '\n');
40276 diff -urNp linux-3.0.3/fs/proc/proc_net.c linux-3.0.3/fs/proc/proc_net.c
40277 --- linux-3.0.3/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
40278 +++ linux-3.0.3/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
40279 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40280 struct task_struct *task;
40281 struct nsproxy *ns;
40282 struct net *net = NULL;
40283 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40284 + const struct cred *cred = current_cred();
40285 +#endif
40286 +
40287 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40288 + if (cred->fsuid)
40289 + return net;
40290 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40291 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40292 + return net;
40293 +#endif
40294
40295 rcu_read_lock();
40296 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40297 diff -urNp linux-3.0.3/fs/proc/proc_sysctl.c linux-3.0.3/fs/proc/proc_sysctl.c
40298 --- linux-3.0.3/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
40299 +++ linux-3.0.3/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
40300 @@ -8,6 +8,8 @@
40301 #include <linux/namei.h>
40302 #include "internal.h"
40303
40304 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40305 +
40306 static const struct dentry_operations proc_sys_dentry_operations;
40307 static const struct file_operations proc_sys_file_operations;
40308 static const struct inode_operations proc_sys_inode_operations;
40309 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40310 if (!p)
40311 goto out;
40312
40313 + if (gr_handle_sysctl(p, MAY_EXEC))
40314 + goto out;
40315 +
40316 err = ERR_PTR(-ENOMEM);
40317 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40318 if (h)
40319 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40320 if (*pos < file->f_pos)
40321 continue;
40322
40323 + if (gr_handle_sysctl(table, 0))
40324 + continue;
40325 +
40326 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40327 if (res)
40328 return res;
40329 @@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
40330 if (IS_ERR(head))
40331 return PTR_ERR(head);
40332
40333 + if (table && gr_handle_sysctl(table, MAY_EXEC))
40334 + return -ENOENT;
40335 +
40336 generic_fillattr(inode, stat);
40337 if (table)
40338 stat->mode = (stat->mode & S_IFMT) | table->mode;
40339 diff -urNp linux-3.0.3/fs/proc/root.c linux-3.0.3/fs/proc/root.c
40340 --- linux-3.0.3/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
40341 +++ linux-3.0.3/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
40342 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
40343 #ifdef CONFIG_PROC_DEVICETREE
40344 proc_device_tree_init();
40345 #endif
40346 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40347 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40348 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40349 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40350 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40351 +#endif
40352 +#else
40353 proc_mkdir("bus", NULL);
40354 +#endif
40355 proc_sys_init();
40356 }
40357
40358 diff -urNp linux-3.0.3/fs/proc/task_mmu.c linux-3.0.3/fs/proc/task_mmu.c
40359 --- linux-3.0.3/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
40360 +++ linux-3.0.3/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
40361 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40362 "VmExe:\t%8lu kB\n"
40363 "VmLib:\t%8lu kB\n"
40364 "VmPTE:\t%8lu kB\n"
40365 - "VmSwap:\t%8lu kB\n",
40366 - hiwater_vm << (PAGE_SHIFT-10),
40367 + "VmSwap:\t%8lu kB\n"
40368 +
40369 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40370 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40371 +#endif
40372 +
40373 + ,hiwater_vm << (PAGE_SHIFT-10),
40374 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40375 mm->locked_vm << (PAGE_SHIFT-10),
40376 hiwater_rss << (PAGE_SHIFT-10),
40377 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40378 data << (PAGE_SHIFT-10),
40379 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40380 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40381 - swap << (PAGE_SHIFT-10));
40382 + swap << (PAGE_SHIFT-10)
40383 +
40384 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40385 + , mm->context.user_cs_base, mm->context.user_cs_limit
40386 +#endif
40387 +
40388 + );
40389 }
40390
40391 unsigned long task_vsize(struct mm_struct *mm)
40392 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40393 return ret;
40394 }
40395
40396 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40397 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40398 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
40399 + _mm->pax_flags & MF_PAX_SEGMEXEC))
40400 +#endif
40401 +
40402 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40403 {
40404 struct mm_struct *mm = vma->vm_mm;
40405 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40406 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40407 }
40408
40409 - /* We don't show the stack guard page in /proc/maps */
40410 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40411 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40412 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40413 +#else
40414 start = vma->vm_start;
40415 - if (stack_guard_page_start(vma, start))
40416 - start += PAGE_SIZE;
40417 end = vma->vm_end;
40418 - if (stack_guard_page_end(vma, end))
40419 - end -= PAGE_SIZE;
40420 +#endif
40421
40422 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40423 start,
40424 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40425 flags & VM_WRITE ? 'w' : '-',
40426 flags & VM_EXEC ? 'x' : '-',
40427 flags & VM_MAYSHARE ? 's' : 'p',
40428 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40429 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40430 +#else
40431 pgoff,
40432 +#endif
40433 MAJOR(dev), MINOR(dev), ino, &len);
40434
40435 /*
40436 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40437 */
40438 if (file) {
40439 pad_len_spaces(m, len);
40440 - seq_path(m, &file->f_path, "\n");
40441 + seq_path(m, &file->f_path, "\n\\");
40442 } else {
40443 const char *name = arch_vma_name(vma);
40444 if (!name) {
40445 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40446 if (vma->vm_start <= mm->brk &&
40447 vma->vm_end >= mm->start_brk) {
40448 name = "[heap]";
40449 - } else if (vma->vm_start <= mm->start_stack &&
40450 - vma->vm_end >= mm->start_stack) {
40451 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40452 + (vma->vm_start <= mm->start_stack &&
40453 + vma->vm_end >= mm->start_stack)) {
40454 name = "[stack]";
40455 }
40456 } else {
40457 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40458 };
40459
40460 memset(&mss, 0, sizeof mss);
40461 - mss.vma = vma;
40462 - /* mmap_sem is held in m_start */
40463 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40464 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40465 -
40466 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40467 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40468 +#endif
40469 + mss.vma = vma;
40470 + /* mmap_sem is held in m_start */
40471 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40472 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40473 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40474 + }
40475 +#endif
40476 show_map_vma(m, vma);
40477
40478 seq_printf(m,
40479 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40480 "KernelPageSize: %8lu kB\n"
40481 "MMUPageSize: %8lu kB\n"
40482 "Locked: %8lu kB\n",
40483 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40484 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40485 +#else
40486 (vma->vm_end - vma->vm_start) >> 10,
40487 +#endif
40488 mss.resident >> 10,
40489 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40490 mss.shared_clean >> 10,
40491 @@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
40492
40493 if (file) {
40494 seq_printf(m, " file=");
40495 - seq_path(m, &file->f_path, "\n\t= ");
40496 + seq_path(m, &file->f_path, "\n\t\\= ");
40497 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
40498 seq_printf(m, " heap");
40499 } else if (vma->vm_start <= mm->start_stack &&
40500 diff -urNp linux-3.0.3/fs/proc/task_nommu.c linux-3.0.3/fs/proc/task_nommu.c
40501 --- linux-3.0.3/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
40502 +++ linux-3.0.3/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
40503 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40504 else
40505 bytes += kobjsize(mm);
40506
40507 - if (current->fs && current->fs->users > 1)
40508 + if (current->fs && atomic_read(&current->fs->users) > 1)
40509 sbytes += kobjsize(current->fs);
40510 else
40511 bytes += kobjsize(current->fs);
40512 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40513
40514 if (file) {
40515 pad_len_spaces(m, len);
40516 - seq_path(m, &file->f_path, "");
40517 + seq_path(m, &file->f_path, "\n\\");
40518 } else if (mm) {
40519 if (vma->vm_start <= mm->start_stack &&
40520 vma->vm_end >= mm->start_stack) {
40521 diff -urNp linux-3.0.3/fs/quota/netlink.c linux-3.0.3/fs/quota/netlink.c
40522 --- linux-3.0.3/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
40523 +++ linux-3.0.3/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
40524 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40525 void quota_send_warning(short type, unsigned int id, dev_t dev,
40526 const char warntype)
40527 {
40528 - static atomic_t seq;
40529 + static atomic_unchecked_t seq;
40530 struct sk_buff *skb;
40531 void *msg_head;
40532 int ret;
40533 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40534 "VFS: Not enough memory to send quota warning.\n");
40535 return;
40536 }
40537 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40538 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40539 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40540 if (!msg_head) {
40541 printk(KERN_ERR
40542 diff -urNp linux-3.0.3/fs/readdir.c linux-3.0.3/fs/readdir.c
40543 --- linux-3.0.3/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
40544 +++ linux-3.0.3/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
40545 @@ -17,6 +17,7 @@
40546 #include <linux/security.h>
40547 #include <linux/syscalls.h>
40548 #include <linux/unistd.h>
40549 +#include <linux/namei.h>
40550
40551 #include <asm/uaccess.h>
40552
40553 @@ -67,6 +68,7 @@ struct old_linux_dirent {
40554
40555 struct readdir_callback {
40556 struct old_linux_dirent __user * dirent;
40557 + struct file * file;
40558 int result;
40559 };
40560
40561 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40562 buf->result = -EOVERFLOW;
40563 return -EOVERFLOW;
40564 }
40565 +
40566 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40567 + return 0;
40568 +
40569 buf->result++;
40570 dirent = buf->dirent;
40571 if (!access_ok(VERIFY_WRITE, dirent,
40572 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40573
40574 buf.result = 0;
40575 buf.dirent = dirent;
40576 + buf.file = file;
40577
40578 error = vfs_readdir(file, fillonedir, &buf);
40579 if (buf.result)
40580 @@ -142,6 +149,7 @@ struct linux_dirent {
40581 struct getdents_callback {
40582 struct linux_dirent __user * current_dir;
40583 struct linux_dirent __user * previous;
40584 + struct file * file;
40585 int count;
40586 int error;
40587 };
40588 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40589 buf->error = -EOVERFLOW;
40590 return -EOVERFLOW;
40591 }
40592 +
40593 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40594 + return 0;
40595 +
40596 dirent = buf->previous;
40597 if (dirent) {
40598 if (__put_user(offset, &dirent->d_off))
40599 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40600 buf.previous = NULL;
40601 buf.count = count;
40602 buf.error = 0;
40603 + buf.file = file;
40604
40605 error = vfs_readdir(file, filldir, &buf);
40606 if (error >= 0)
40607 @@ -229,6 +242,7 @@ out:
40608 struct getdents_callback64 {
40609 struct linux_dirent64 __user * current_dir;
40610 struct linux_dirent64 __user * previous;
40611 + struct file *file;
40612 int count;
40613 int error;
40614 };
40615 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40616 buf->error = -EINVAL; /* only used if we fail.. */
40617 if (reclen > buf->count)
40618 return -EINVAL;
40619 +
40620 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40621 + return 0;
40622 +
40623 dirent = buf->previous;
40624 if (dirent) {
40625 if (__put_user(offset, &dirent->d_off))
40626 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40627
40628 buf.current_dir = dirent;
40629 buf.previous = NULL;
40630 + buf.file = file;
40631 buf.count = count;
40632 buf.error = 0;
40633
40634 diff -urNp linux-3.0.3/fs/reiserfs/dir.c linux-3.0.3/fs/reiserfs/dir.c
40635 --- linux-3.0.3/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
40636 +++ linux-3.0.3/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
40637 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40638 struct reiserfs_dir_entry de;
40639 int ret = 0;
40640
40641 + pax_track_stack();
40642 +
40643 reiserfs_write_lock(inode->i_sb);
40644
40645 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40646 diff -urNp linux-3.0.3/fs/reiserfs/do_balan.c linux-3.0.3/fs/reiserfs/do_balan.c
40647 --- linux-3.0.3/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
40648 +++ linux-3.0.3/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
40649 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40650 return;
40651 }
40652
40653 - atomic_inc(&(fs_generation(tb->tb_sb)));
40654 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40655 do_balance_starts(tb);
40656
40657 /* balance leaf returns 0 except if combining L R and S into
40658 diff -urNp linux-3.0.3/fs/reiserfs/journal.c linux-3.0.3/fs/reiserfs/journal.c
40659 --- linux-3.0.3/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
40660 +++ linux-3.0.3/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
40661 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40662 struct buffer_head *bh;
40663 int i, j;
40664
40665 + pax_track_stack();
40666 +
40667 bh = __getblk(dev, block, bufsize);
40668 if (buffer_uptodate(bh))
40669 return (bh);
40670 diff -urNp linux-3.0.3/fs/reiserfs/namei.c linux-3.0.3/fs/reiserfs/namei.c
40671 --- linux-3.0.3/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
40672 +++ linux-3.0.3/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
40673 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40674 unsigned long savelink = 1;
40675 struct timespec ctime;
40676
40677 + pax_track_stack();
40678 +
40679 /* three balancings: (1) old name removal, (2) new name insertion
40680 and (3) maybe "save" link insertion
40681 stat data updates: (1) old directory,
40682 diff -urNp linux-3.0.3/fs/reiserfs/procfs.c linux-3.0.3/fs/reiserfs/procfs.c
40683 --- linux-3.0.3/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
40684 +++ linux-3.0.3/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
40685 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40686 "SMALL_TAILS " : "NO_TAILS ",
40687 replay_only(sb) ? "REPLAY_ONLY " : "",
40688 convert_reiserfs(sb) ? "CONV " : "",
40689 - atomic_read(&r->s_generation_counter),
40690 + atomic_read_unchecked(&r->s_generation_counter),
40691 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40692 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40693 SF(s_good_search_by_key_reada), SF(s_bmaps),
40694 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40695 struct journal_params *jp = &rs->s_v1.s_journal;
40696 char b[BDEVNAME_SIZE];
40697
40698 + pax_track_stack();
40699 +
40700 seq_printf(m, /* on-disk fields */
40701 "jp_journal_1st_block: \t%i\n"
40702 "jp_journal_dev: \t%s[%x]\n"
40703 diff -urNp linux-3.0.3/fs/reiserfs/stree.c linux-3.0.3/fs/reiserfs/stree.c
40704 --- linux-3.0.3/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
40705 +++ linux-3.0.3/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
40706 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40707 int iter = 0;
40708 #endif
40709
40710 + pax_track_stack();
40711 +
40712 BUG_ON(!th->t_trans_id);
40713
40714 init_tb_struct(th, &s_del_balance, sb, path,
40715 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40716 int retval;
40717 int quota_cut_bytes = 0;
40718
40719 + pax_track_stack();
40720 +
40721 BUG_ON(!th->t_trans_id);
40722
40723 le_key2cpu_key(&cpu_key, key);
40724 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40725 int quota_cut_bytes;
40726 loff_t tail_pos = 0;
40727
40728 + pax_track_stack();
40729 +
40730 BUG_ON(!th->t_trans_id);
40731
40732 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40733 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40734 int retval;
40735 int fs_gen;
40736
40737 + pax_track_stack();
40738 +
40739 BUG_ON(!th->t_trans_id);
40740
40741 fs_gen = get_generation(inode->i_sb);
40742 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40743 int fs_gen = 0;
40744 int quota_bytes = 0;
40745
40746 + pax_track_stack();
40747 +
40748 BUG_ON(!th->t_trans_id);
40749
40750 if (inode) { /* Do we count quotas for item? */
40751 diff -urNp linux-3.0.3/fs/reiserfs/super.c linux-3.0.3/fs/reiserfs/super.c
40752 --- linux-3.0.3/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
40753 +++ linux-3.0.3/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
40754 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40755 {.option_name = NULL}
40756 };
40757
40758 + pax_track_stack();
40759 +
40760 *blocks = 0;
40761 if (!options || !*options)
40762 /* use default configuration: create tails, journaling on, no
40763 diff -urNp linux-3.0.3/fs/select.c linux-3.0.3/fs/select.c
40764 --- linux-3.0.3/fs/select.c 2011-07-21 22:17:23.000000000 -0400
40765 +++ linux-3.0.3/fs/select.c 2011-08-23 21:48:14.000000000 -0400
40766 @@ -20,6 +20,7 @@
40767 #include <linux/module.h>
40768 #include <linux/slab.h>
40769 #include <linux/poll.h>
40770 +#include <linux/security.h>
40771 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40772 #include <linux/file.h>
40773 #include <linux/fdtable.h>
40774 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40775 int retval, i, timed_out = 0;
40776 unsigned long slack = 0;
40777
40778 + pax_track_stack();
40779 +
40780 rcu_read_lock();
40781 retval = max_select_fd(n, fds);
40782 rcu_read_unlock();
40783 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40784 /* Allocate small arguments on the stack to save memory and be faster */
40785 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40786
40787 + pax_track_stack();
40788 +
40789 ret = -EINVAL;
40790 if (n < 0)
40791 goto out_nofds;
40792 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40793 struct poll_list *walk = head;
40794 unsigned long todo = nfds;
40795
40796 + pax_track_stack();
40797 +
40798 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40799 if (nfds > rlimit(RLIMIT_NOFILE))
40800 return -EINVAL;
40801
40802 diff -urNp linux-3.0.3/fs/seq_file.c linux-3.0.3/fs/seq_file.c
40803 --- linux-3.0.3/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
40804 +++ linux-3.0.3/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
40805 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40806 return 0;
40807 }
40808 if (!m->buf) {
40809 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40810 + m->size = PAGE_SIZE;
40811 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40812 if (!m->buf)
40813 return -ENOMEM;
40814 }
40815 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40816 Eoverflow:
40817 m->op->stop(m, p);
40818 kfree(m->buf);
40819 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40820 + m->size <<= 1;
40821 + m->buf = kmalloc(m->size, GFP_KERNEL);
40822 return !m->buf ? -ENOMEM : -EAGAIN;
40823 }
40824
40825 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40826 m->version = file->f_version;
40827 /* grab buffer if we didn't have one */
40828 if (!m->buf) {
40829 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40830 + m->size = PAGE_SIZE;
40831 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40832 if (!m->buf)
40833 goto Enomem;
40834 }
40835 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40836 goto Fill;
40837 m->op->stop(m, p);
40838 kfree(m->buf);
40839 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40840 + m->size <<= 1;
40841 + m->buf = kmalloc(m->size, GFP_KERNEL);
40842 if (!m->buf)
40843 goto Enomem;
40844 m->count = 0;
40845 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40846 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40847 void *data)
40848 {
40849 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40850 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40851 int res = -ENOMEM;
40852
40853 if (op) {
40854 diff -urNp linux-3.0.3/fs/splice.c linux-3.0.3/fs/splice.c
40855 --- linux-3.0.3/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
40856 +++ linux-3.0.3/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
40857 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40858 pipe_lock(pipe);
40859
40860 for (;;) {
40861 - if (!pipe->readers) {
40862 + if (!atomic_read(&pipe->readers)) {
40863 send_sig(SIGPIPE, current, 0);
40864 if (!ret)
40865 ret = -EPIPE;
40866 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40867 do_wakeup = 0;
40868 }
40869
40870 - pipe->waiting_writers++;
40871 + atomic_inc(&pipe->waiting_writers);
40872 pipe_wait(pipe);
40873 - pipe->waiting_writers--;
40874 + atomic_dec(&pipe->waiting_writers);
40875 }
40876
40877 pipe_unlock(pipe);
40878 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
40879 .spd_release = spd_release_page,
40880 };
40881
40882 + pax_track_stack();
40883 +
40884 if (splice_grow_spd(pipe, &spd))
40885 return -ENOMEM;
40886
40887 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
40888 old_fs = get_fs();
40889 set_fs(get_ds());
40890 /* The cast to a user pointer is valid due to the set_fs() */
40891 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40892 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40893 set_fs(old_fs);
40894
40895 return res;
40896 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
40897 old_fs = get_fs();
40898 set_fs(get_ds());
40899 /* The cast to a user pointer is valid due to the set_fs() */
40900 - res = vfs_write(file, (const char __user *)buf, count, &pos);
40901 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
40902 set_fs(old_fs);
40903
40904 return res;
40905 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
40906 .spd_release = spd_release_page,
40907 };
40908
40909 + pax_track_stack();
40910 +
40911 if (splice_grow_spd(pipe, &spd))
40912 return -ENOMEM;
40913
40914 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
40915 goto err;
40916
40917 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
40918 - vec[i].iov_base = (void __user *) page_address(page);
40919 + vec[i].iov_base = (__force void __user *) page_address(page);
40920 vec[i].iov_len = this_len;
40921 spd.pages[i] = page;
40922 spd.nr_pages++;
40923 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
40924 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
40925 {
40926 while (!pipe->nrbufs) {
40927 - if (!pipe->writers)
40928 + if (!atomic_read(&pipe->writers))
40929 return 0;
40930
40931 - if (!pipe->waiting_writers && sd->num_spliced)
40932 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
40933 return 0;
40934
40935 if (sd->flags & SPLICE_F_NONBLOCK)
40936 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
40937 * out of the pipe right after the splice_to_pipe(). So set
40938 * PIPE_READERS appropriately.
40939 */
40940 - pipe->readers = 1;
40941 + atomic_set(&pipe->readers, 1);
40942
40943 current->splice_pipe = pipe;
40944 }
40945 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
40946 };
40947 long ret;
40948
40949 + pax_track_stack();
40950 +
40951 pipe = get_pipe_info(file);
40952 if (!pipe)
40953 return -EBADF;
40954 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
40955 ret = -ERESTARTSYS;
40956 break;
40957 }
40958 - if (!pipe->writers)
40959 + if (!atomic_read(&pipe->writers))
40960 break;
40961 - if (!pipe->waiting_writers) {
40962 + if (!atomic_read(&pipe->waiting_writers)) {
40963 if (flags & SPLICE_F_NONBLOCK) {
40964 ret = -EAGAIN;
40965 break;
40966 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
40967 pipe_lock(pipe);
40968
40969 while (pipe->nrbufs >= pipe->buffers) {
40970 - if (!pipe->readers) {
40971 + if (!atomic_read(&pipe->readers)) {
40972 send_sig(SIGPIPE, current, 0);
40973 ret = -EPIPE;
40974 break;
40975 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
40976 ret = -ERESTARTSYS;
40977 break;
40978 }
40979 - pipe->waiting_writers++;
40980 + atomic_inc(&pipe->waiting_writers);
40981 pipe_wait(pipe);
40982 - pipe->waiting_writers--;
40983 + atomic_dec(&pipe->waiting_writers);
40984 }
40985
40986 pipe_unlock(pipe);
40987 @@ -1819,14 +1825,14 @@ retry:
40988 pipe_double_lock(ipipe, opipe);
40989
40990 do {
40991 - if (!opipe->readers) {
40992 + if (!atomic_read(&opipe->readers)) {
40993 send_sig(SIGPIPE, current, 0);
40994 if (!ret)
40995 ret = -EPIPE;
40996 break;
40997 }
40998
40999 - if (!ipipe->nrbufs && !ipipe->writers)
41000 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
41001 break;
41002
41003 /*
41004 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
41005 pipe_double_lock(ipipe, opipe);
41006
41007 do {
41008 - if (!opipe->readers) {
41009 + if (!atomic_read(&opipe->readers)) {
41010 send_sig(SIGPIPE, current, 0);
41011 if (!ret)
41012 ret = -EPIPE;
41013 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
41014 * return EAGAIN if we have the potential of some data in the
41015 * future, otherwise just return 0
41016 */
41017 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
41018 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
41019 ret = -EAGAIN;
41020
41021 pipe_unlock(ipipe);
41022 diff -urNp linux-3.0.3/fs/sysfs/file.c linux-3.0.3/fs/sysfs/file.c
41023 --- linux-3.0.3/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
41024 +++ linux-3.0.3/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
41025 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
41026
41027 struct sysfs_open_dirent {
41028 atomic_t refcnt;
41029 - atomic_t event;
41030 + atomic_unchecked_t event;
41031 wait_queue_head_t poll;
41032 struct list_head buffers; /* goes through sysfs_buffer.list */
41033 };
41034 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
41035 if (!sysfs_get_active(attr_sd))
41036 return -ENODEV;
41037
41038 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
41039 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
41040 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
41041
41042 sysfs_put_active(attr_sd);
41043 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
41044 return -ENOMEM;
41045
41046 atomic_set(&new_od->refcnt, 0);
41047 - atomic_set(&new_od->event, 1);
41048 + atomic_set_unchecked(&new_od->event, 1);
41049 init_waitqueue_head(&new_od->poll);
41050 INIT_LIST_HEAD(&new_od->buffers);
41051 goto retry;
41052 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
41053
41054 sysfs_put_active(attr_sd);
41055
41056 - if (buffer->event != atomic_read(&od->event))
41057 + if (buffer->event != atomic_read_unchecked(&od->event))
41058 goto trigger;
41059
41060 return DEFAULT_POLLMASK;
41061 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
41062
41063 od = sd->s_attr.open;
41064 if (od) {
41065 - atomic_inc(&od->event);
41066 + atomic_inc_unchecked(&od->event);
41067 wake_up_interruptible(&od->poll);
41068 }
41069
41070 diff -urNp linux-3.0.3/fs/sysfs/mount.c linux-3.0.3/fs/sysfs/mount.c
41071 --- linux-3.0.3/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
41072 +++ linux-3.0.3/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
41073 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41074 .s_name = "",
41075 .s_count = ATOMIC_INIT(1),
41076 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41077 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41078 + .s_mode = S_IFDIR | S_IRWXU,
41079 +#else
41080 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41081 +#endif
41082 .s_ino = 1,
41083 };
41084
41085 diff -urNp linux-3.0.3/fs/sysfs/symlink.c linux-3.0.3/fs/sysfs/symlink.c
41086 --- linux-3.0.3/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
41087 +++ linux-3.0.3/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
41088 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41089
41090 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41091 {
41092 - char *page = nd_get_link(nd);
41093 + const char *page = nd_get_link(nd);
41094 if (!IS_ERR(page))
41095 free_page((unsigned long)page);
41096 }
41097 diff -urNp linux-3.0.3/fs/udf/inode.c linux-3.0.3/fs/udf/inode.c
41098 --- linux-3.0.3/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
41099 +++ linux-3.0.3/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
41100 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41101 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41102 int lastblock = 0;
41103
41104 + pax_track_stack();
41105 +
41106 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41107 prev_epos.block = iinfo->i_location;
41108 prev_epos.bh = NULL;
41109 diff -urNp linux-3.0.3/fs/udf/misc.c linux-3.0.3/fs/udf/misc.c
41110 --- linux-3.0.3/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
41111 +++ linux-3.0.3/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
41112 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41113
41114 u8 udf_tag_checksum(const struct tag *t)
41115 {
41116 - u8 *data = (u8 *)t;
41117 + const u8 *data = (const u8 *)t;
41118 u8 checksum = 0;
41119 int i;
41120 for (i = 0; i < sizeof(struct tag); ++i)
41121 diff -urNp linux-3.0.3/fs/utimes.c linux-3.0.3/fs/utimes.c
41122 --- linux-3.0.3/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
41123 +++ linux-3.0.3/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
41124 @@ -1,6 +1,7 @@
41125 #include <linux/compiler.h>
41126 #include <linux/file.h>
41127 #include <linux/fs.h>
41128 +#include <linux/security.h>
41129 #include <linux/linkage.h>
41130 #include <linux/mount.h>
41131 #include <linux/namei.h>
41132 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41133 goto mnt_drop_write_and_out;
41134 }
41135 }
41136 +
41137 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41138 + error = -EACCES;
41139 + goto mnt_drop_write_and_out;
41140 + }
41141 +
41142 mutex_lock(&inode->i_mutex);
41143 error = notify_change(path->dentry, &newattrs);
41144 mutex_unlock(&inode->i_mutex);
41145 diff -urNp linux-3.0.3/fs/xattr_acl.c linux-3.0.3/fs/xattr_acl.c
41146 --- linux-3.0.3/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
41147 +++ linux-3.0.3/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
41148 @@ -17,8 +17,8 @@
41149 struct posix_acl *
41150 posix_acl_from_xattr(const void *value, size_t size)
41151 {
41152 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41153 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41154 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41155 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41156 int count;
41157 struct posix_acl *acl;
41158 struct posix_acl_entry *acl_e;
41159 diff -urNp linux-3.0.3/fs/xattr.c linux-3.0.3/fs/xattr.c
41160 --- linux-3.0.3/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
41161 +++ linux-3.0.3/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
41162 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41163 * Extended attribute SET operations
41164 */
41165 static long
41166 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
41167 +setxattr(struct path *path, const char __user *name, const void __user *value,
41168 size_t size, int flags)
41169 {
41170 int error;
41171 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
41172 return PTR_ERR(kvalue);
41173 }
41174
41175 - error = vfs_setxattr(d, kname, kvalue, size, flags);
41176 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41177 + error = -EACCES;
41178 + goto out;
41179 + }
41180 +
41181 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41182 +out:
41183 kfree(kvalue);
41184 return error;
41185 }
41186 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41187 return error;
41188 error = mnt_want_write(path.mnt);
41189 if (!error) {
41190 - error = setxattr(path.dentry, name, value, size, flags);
41191 + error = setxattr(&path, name, value, size, flags);
41192 mnt_drop_write(path.mnt);
41193 }
41194 path_put(&path);
41195 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41196 return error;
41197 error = mnt_want_write(path.mnt);
41198 if (!error) {
41199 - error = setxattr(path.dentry, name, value, size, flags);
41200 + error = setxattr(&path, name, value, size, flags);
41201 mnt_drop_write(path.mnt);
41202 }
41203 path_put(&path);
41204 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41205 const void __user *,value, size_t, size, int, flags)
41206 {
41207 struct file *f;
41208 - struct dentry *dentry;
41209 int error = -EBADF;
41210
41211 f = fget(fd);
41212 if (!f)
41213 return error;
41214 - dentry = f->f_path.dentry;
41215 - audit_inode(NULL, dentry);
41216 + audit_inode(NULL, f->f_path.dentry);
41217 error = mnt_want_write_file(f);
41218 if (!error) {
41219 - error = setxattr(dentry, name, value, size, flags);
41220 + error = setxattr(&f->f_path, name, value, size, flags);
41221 mnt_drop_write(f->f_path.mnt);
41222 }
41223 fput(f);
41224 diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c
41225 --- linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
41226 +++ linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
41227 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41228 xfs_fsop_geom_t fsgeo;
41229 int error;
41230
41231 + memset(&fsgeo, 0, sizeof(fsgeo));
41232 error = xfs_fs_geometry(mp, &fsgeo, 3);
41233 if (error)
41234 return -error;
41235 diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c
41236 --- linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
41237 +++ linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
41238 @@ -128,7 +128,7 @@ xfs_find_handle(
41239 }
41240
41241 error = -EFAULT;
41242 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41243 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41244 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41245 goto out_put;
41246
41247 diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c
41248 --- linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
41249 +++ linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
41250 @@ -437,7 +437,7 @@ xfs_vn_put_link(
41251 struct nameidata *nd,
41252 void *p)
41253 {
41254 - char *s = nd_get_link(nd);
41255 + const char *s = nd_get_link(nd);
41256
41257 if (!IS_ERR(s))
41258 kfree(s);
41259 diff -urNp linux-3.0.3/fs/xfs/xfs_bmap.c linux-3.0.3/fs/xfs/xfs_bmap.c
41260 --- linux-3.0.3/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
41261 +++ linux-3.0.3/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
41262 @@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
41263 int nmap,
41264 int ret_nmap);
41265 #else
41266 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41267 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41268 #endif /* DEBUG */
41269
41270 STATIC int
41271 diff -urNp linux-3.0.3/fs/xfs/xfs_dir2_sf.c linux-3.0.3/fs/xfs/xfs_dir2_sf.c
41272 --- linux-3.0.3/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
41273 +++ linux-3.0.3/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
41274 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41275 }
41276
41277 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41278 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41279 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41280 + char name[sfep->namelen];
41281 + memcpy(name, sfep->name, sfep->namelen);
41282 + if (filldir(dirent, name, sfep->namelen,
41283 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
41284 + *offset = off & 0x7fffffff;
41285 + return 0;
41286 + }
41287 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41288 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41289 *offset = off & 0x7fffffff;
41290 return 0;
41291 diff -urNp linux-3.0.3/grsecurity/gracl_alloc.c linux-3.0.3/grsecurity/gracl_alloc.c
41292 --- linux-3.0.3/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41293 +++ linux-3.0.3/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
41294 @@ -0,0 +1,105 @@
41295 +#include <linux/kernel.h>
41296 +#include <linux/mm.h>
41297 +#include <linux/slab.h>
41298 +#include <linux/vmalloc.h>
41299 +#include <linux/gracl.h>
41300 +#include <linux/grsecurity.h>
41301 +
41302 +static unsigned long alloc_stack_next = 1;
41303 +static unsigned long alloc_stack_size = 1;
41304 +static void **alloc_stack;
41305 +
41306 +static __inline__ int
41307 +alloc_pop(void)
41308 +{
41309 + if (alloc_stack_next == 1)
41310 + return 0;
41311 +
41312 + kfree(alloc_stack[alloc_stack_next - 2]);
41313 +
41314 + alloc_stack_next--;
41315 +
41316 + return 1;
41317 +}
41318 +
41319 +static __inline__ int
41320 +alloc_push(void *buf)
41321 +{
41322 + if (alloc_stack_next >= alloc_stack_size)
41323 + return 1;
41324 +
41325 + alloc_stack[alloc_stack_next - 1] = buf;
41326 +
41327 + alloc_stack_next++;
41328 +
41329 + return 0;
41330 +}
41331 +
41332 +void *
41333 +acl_alloc(unsigned long len)
41334 +{
41335 + void *ret = NULL;
41336 +
41337 + if (!len || len > PAGE_SIZE)
41338 + goto out;
41339 +
41340 + ret = kmalloc(len, GFP_KERNEL);
41341 +
41342 + if (ret) {
41343 + if (alloc_push(ret)) {
41344 + kfree(ret);
41345 + ret = NULL;
41346 + }
41347 + }
41348 +
41349 +out:
41350 + return ret;
41351 +}
41352 +
41353 +void *
41354 +acl_alloc_num(unsigned long num, unsigned long len)
41355 +{
41356 + if (!len || (num > (PAGE_SIZE / len)))
41357 + return NULL;
41358 +
41359 + return acl_alloc(num * len);
41360 +}
41361 +
41362 +void
41363 +acl_free_all(void)
41364 +{
41365 + if (gr_acl_is_enabled() || !alloc_stack)
41366 + return;
41367 +
41368 + while (alloc_pop()) ;
41369 +
41370 + if (alloc_stack) {
41371 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41372 + kfree(alloc_stack);
41373 + else
41374 + vfree(alloc_stack);
41375 + }
41376 +
41377 + alloc_stack = NULL;
41378 + alloc_stack_size = 1;
41379 + alloc_stack_next = 1;
41380 +
41381 + return;
41382 +}
41383 +
41384 +int
41385 +acl_alloc_stack_init(unsigned long size)
41386 +{
41387 + if ((size * sizeof (void *)) <= PAGE_SIZE)
41388 + alloc_stack =
41389 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41390 + else
41391 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
41392 +
41393 + alloc_stack_size = size;
41394 +
41395 + if (!alloc_stack)
41396 + return 0;
41397 + else
41398 + return 1;
41399 +}
41400 diff -urNp linux-3.0.3/grsecurity/gracl.c linux-3.0.3/grsecurity/gracl.c
41401 --- linux-3.0.3/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41402 +++ linux-3.0.3/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
41403 @@ -0,0 +1,4106 @@
41404 +#include <linux/kernel.h>
41405 +#include <linux/module.h>
41406 +#include <linux/sched.h>
41407 +#include <linux/mm.h>
41408 +#include <linux/file.h>
41409 +#include <linux/fs.h>
41410 +#include <linux/namei.h>
41411 +#include <linux/mount.h>
41412 +#include <linux/tty.h>
41413 +#include <linux/proc_fs.h>
41414 +#include <linux/lglock.h>
41415 +#include <linux/slab.h>
41416 +#include <linux/vmalloc.h>
41417 +#include <linux/types.h>
41418 +#include <linux/sysctl.h>
41419 +#include <linux/netdevice.h>
41420 +#include <linux/ptrace.h>
41421 +#include <linux/gracl.h>
41422 +#include <linux/gralloc.h>
41423 +#include <linux/grsecurity.h>
41424 +#include <linux/grinternal.h>
41425 +#include <linux/pid_namespace.h>
41426 +#include <linux/fdtable.h>
41427 +#include <linux/percpu.h>
41428 +
41429 +#include <asm/uaccess.h>
41430 +#include <asm/errno.h>
41431 +#include <asm/mman.h>
41432 +
41433 +static struct acl_role_db acl_role_set;
41434 +static struct name_db name_set;
41435 +static struct inodev_db inodev_set;
41436 +
41437 +/* for keeping track of userspace pointers used for subjects, so we
41438 + can share references in the kernel as well
41439 +*/
41440 +
41441 +static struct path real_root;
41442 +
41443 +static struct acl_subj_map_db subj_map_set;
41444 +
41445 +static struct acl_role_label *default_role;
41446 +
41447 +static struct acl_role_label *role_list;
41448 +
41449 +static u16 acl_sp_role_value;
41450 +
41451 +extern char *gr_shared_page[4];
41452 +static DEFINE_MUTEX(gr_dev_mutex);
41453 +DEFINE_RWLOCK(gr_inode_lock);
41454 +
41455 +struct gr_arg *gr_usermode;
41456 +
41457 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
41458 +
41459 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41460 +extern void gr_clear_learn_entries(void);
41461 +
41462 +#ifdef CONFIG_GRKERNSEC_RESLOG
41463 +extern void gr_log_resource(const struct task_struct *task,
41464 + const int res, const unsigned long wanted, const int gt);
41465 +#endif
41466 +
41467 +unsigned char *gr_system_salt;
41468 +unsigned char *gr_system_sum;
41469 +
41470 +static struct sprole_pw **acl_special_roles = NULL;
41471 +static __u16 num_sprole_pws = 0;
41472 +
41473 +static struct acl_role_label *kernel_role = NULL;
41474 +
41475 +static unsigned int gr_auth_attempts = 0;
41476 +static unsigned long gr_auth_expires = 0UL;
41477 +
41478 +#ifdef CONFIG_NET
41479 +extern struct vfsmount *sock_mnt;
41480 +#endif
41481 +
41482 +extern struct vfsmount *pipe_mnt;
41483 +extern struct vfsmount *shm_mnt;
41484 +#ifdef CONFIG_HUGETLBFS
41485 +extern struct vfsmount *hugetlbfs_vfsmount;
41486 +#endif
41487 +
41488 +static struct acl_object_label *fakefs_obj_rw;
41489 +static struct acl_object_label *fakefs_obj_rwx;
41490 +
41491 +extern int gr_init_uidset(void);
41492 +extern void gr_free_uidset(void);
41493 +extern void gr_remove_uid(uid_t uid);
41494 +extern int gr_find_uid(uid_t uid);
41495 +
41496 +DECLARE_BRLOCK(vfsmount_lock);
41497 +
41498 +__inline__ int
41499 +gr_acl_is_enabled(void)
41500 +{
41501 + return (gr_status & GR_READY);
41502 +}
41503 +
41504 +#ifdef CONFIG_BTRFS_FS
41505 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41506 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41507 +#endif
41508 +
41509 +static inline dev_t __get_dev(const struct dentry *dentry)
41510 +{
41511 +#ifdef CONFIG_BTRFS_FS
41512 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41513 + return get_btrfs_dev_from_inode(dentry->d_inode);
41514 + else
41515 +#endif
41516 + return dentry->d_inode->i_sb->s_dev;
41517 +}
41518 +
41519 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41520 +{
41521 + return __get_dev(dentry);
41522 +}
41523 +
41524 +static char gr_task_roletype_to_char(struct task_struct *task)
41525 +{
41526 + switch (task->role->roletype &
41527 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41528 + GR_ROLE_SPECIAL)) {
41529 + case GR_ROLE_DEFAULT:
41530 + return 'D';
41531 + case GR_ROLE_USER:
41532 + return 'U';
41533 + case GR_ROLE_GROUP:
41534 + return 'G';
41535 + case GR_ROLE_SPECIAL:
41536 + return 'S';
41537 + }
41538 +
41539 + return 'X';
41540 +}
41541 +
41542 +char gr_roletype_to_char(void)
41543 +{
41544 + return gr_task_roletype_to_char(current);
41545 +}
41546 +
41547 +__inline__ int
41548 +gr_acl_tpe_check(void)
41549 +{
41550 + if (unlikely(!(gr_status & GR_READY)))
41551 + return 0;
41552 + if (current->role->roletype & GR_ROLE_TPE)
41553 + return 1;
41554 + else
41555 + return 0;
41556 +}
41557 +
41558 +int
41559 +gr_handle_rawio(const struct inode *inode)
41560 +{
41561 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41562 + if (inode && S_ISBLK(inode->i_mode) &&
41563 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41564 + !capable(CAP_SYS_RAWIO))
41565 + return 1;
41566 +#endif
41567 + return 0;
41568 +}
41569 +
41570 +static int
41571 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41572 +{
41573 + if (likely(lena != lenb))
41574 + return 0;
41575 +
41576 + return !memcmp(a, b, lena);
41577 +}
41578 +
41579 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41580 +{
41581 + *buflen -= namelen;
41582 + if (*buflen < 0)
41583 + return -ENAMETOOLONG;
41584 + *buffer -= namelen;
41585 + memcpy(*buffer, str, namelen);
41586 + return 0;
41587 +}
41588 +
41589 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41590 +{
41591 + return prepend(buffer, buflen, name->name, name->len);
41592 +}
41593 +
41594 +static int prepend_path(const struct path *path, struct path *root,
41595 + char **buffer, int *buflen)
41596 +{
41597 + struct dentry *dentry = path->dentry;
41598 + struct vfsmount *vfsmnt = path->mnt;
41599 + bool slash = false;
41600 + int error = 0;
41601 +
41602 + while (dentry != root->dentry || vfsmnt != root->mnt) {
41603 + struct dentry * parent;
41604 +
41605 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41606 + /* Global root? */
41607 + if (vfsmnt->mnt_parent == vfsmnt) {
41608 + goto out;
41609 + }
41610 + dentry = vfsmnt->mnt_mountpoint;
41611 + vfsmnt = vfsmnt->mnt_parent;
41612 + continue;
41613 + }
41614 + parent = dentry->d_parent;
41615 + prefetch(parent);
41616 + spin_lock(&dentry->d_lock);
41617 + error = prepend_name(buffer, buflen, &dentry->d_name);
41618 + spin_unlock(&dentry->d_lock);
41619 + if (!error)
41620 + error = prepend(buffer, buflen, "/", 1);
41621 + if (error)
41622 + break;
41623 +
41624 + slash = true;
41625 + dentry = parent;
41626 + }
41627 +
41628 +out:
41629 + if (!error && !slash)
41630 + error = prepend(buffer, buflen, "/", 1);
41631 +
41632 + return error;
41633 +}
41634 +
41635 +/* this must be called with vfsmount_lock and rename_lock held */
41636 +
41637 +static char *__our_d_path(const struct path *path, struct path *root,
41638 + char *buf, int buflen)
41639 +{
41640 + char *res = buf + buflen;
41641 + int error;
41642 +
41643 + prepend(&res, &buflen, "\0", 1);
41644 + error = prepend_path(path, root, &res, &buflen);
41645 + if (error)
41646 + return ERR_PTR(error);
41647 +
41648 + return res;
41649 +}
41650 +
41651 +static char *
41652 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41653 +{
41654 + char *retval;
41655 +
41656 + retval = __our_d_path(path, root, buf, buflen);
41657 + if (unlikely(IS_ERR(retval)))
41658 + retval = strcpy(buf, "<path too long>");
41659 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41660 + retval[1] = '\0';
41661 +
41662 + return retval;
41663 +}
41664 +
41665 +static char *
41666 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41667 + char *buf, int buflen)
41668 +{
41669 + struct path path;
41670 + char *res;
41671 +
41672 + path.dentry = (struct dentry *)dentry;
41673 + path.mnt = (struct vfsmount *)vfsmnt;
41674 +
41675 + /* we can use real_root.dentry, real_root.mnt, because this is only called
41676 + by the RBAC system */
41677 + res = gen_full_path(&path, &real_root, buf, buflen);
41678 +
41679 + return res;
41680 +}
41681 +
41682 +static char *
41683 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41684 + char *buf, int buflen)
41685 +{
41686 + char *res;
41687 + struct path path;
41688 + struct path root;
41689 + struct task_struct *reaper = &init_task;
41690 +
41691 + path.dentry = (struct dentry *)dentry;
41692 + path.mnt = (struct vfsmount *)vfsmnt;
41693 +
41694 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41695 + get_fs_root(reaper->fs, &root);
41696 +
41697 + write_seqlock(&rename_lock);
41698 + br_read_lock(vfsmount_lock);
41699 + res = gen_full_path(&path, &root, buf, buflen);
41700 + br_read_unlock(vfsmount_lock);
41701 + write_sequnlock(&rename_lock);
41702 +
41703 + path_put(&root);
41704 + return res;
41705 +}
41706 +
41707 +static char *
41708 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41709 +{
41710 + char *ret;
41711 + write_seqlock(&rename_lock);
41712 + br_read_lock(vfsmount_lock);
41713 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41714 + PAGE_SIZE);
41715 + br_read_unlock(vfsmount_lock);
41716 + write_sequnlock(&rename_lock);
41717 + return ret;
41718 +}
41719 +
41720 +char *
41721 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41722 +{
41723 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41724 + PAGE_SIZE);
41725 +}
41726 +
41727 +char *
41728 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41729 +{
41730 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41731 + PAGE_SIZE);
41732 +}
41733 +
41734 +char *
41735 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41736 +{
41737 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41738 + PAGE_SIZE);
41739 +}
41740 +
41741 +char *
41742 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41743 +{
41744 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41745 + PAGE_SIZE);
41746 +}
41747 +
41748 +char *
41749 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41750 +{
41751 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41752 + PAGE_SIZE);
41753 +}
41754 +
41755 +__inline__ __u32
41756 +to_gr_audit(const __u32 reqmode)
41757 +{
41758 + /* masks off auditable permission flags, then shifts them to create
41759 + auditing flags, and adds the special case of append auditing if
41760 + we're requesting write */
41761 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41762 +}
41763 +
41764 +struct acl_subject_label *
41765 +lookup_subject_map(const struct acl_subject_label *userp)
41766 +{
41767 + unsigned int index = shash(userp, subj_map_set.s_size);
41768 + struct subject_map *match;
41769 +
41770 + match = subj_map_set.s_hash[index];
41771 +
41772 + while (match && match->user != userp)
41773 + match = match->next;
41774 +
41775 + if (match != NULL)
41776 + return match->kernel;
41777 + else
41778 + return NULL;
41779 +}
41780 +
41781 +static void
41782 +insert_subj_map_entry(struct subject_map *subjmap)
41783 +{
41784 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41785 + struct subject_map **curr;
41786 +
41787 + subjmap->prev = NULL;
41788 +
41789 + curr = &subj_map_set.s_hash[index];
41790 + if (*curr != NULL)
41791 + (*curr)->prev = subjmap;
41792 +
41793 + subjmap->next = *curr;
41794 + *curr = subjmap;
41795 +
41796 + return;
41797 +}
41798 +
41799 +static struct acl_role_label *
41800 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41801 + const gid_t gid)
41802 +{
41803 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41804 + struct acl_role_label *match;
41805 + struct role_allowed_ip *ipp;
41806 + unsigned int x;
41807 + u32 curr_ip = task->signal->curr_ip;
41808 +
41809 + task->signal->saved_ip = curr_ip;
41810 +
41811 + match = acl_role_set.r_hash[index];
41812 +
41813 + while (match) {
41814 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41815 + for (x = 0; x < match->domain_child_num; x++) {
41816 + if (match->domain_children[x] == uid)
41817 + goto found;
41818 + }
41819 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41820 + break;
41821 + match = match->next;
41822 + }
41823 +found:
41824 + if (match == NULL) {
41825 + try_group:
41826 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41827 + match = acl_role_set.r_hash[index];
41828 +
41829 + while (match) {
41830 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41831 + for (x = 0; x < match->domain_child_num; x++) {
41832 + if (match->domain_children[x] == gid)
41833 + goto found2;
41834 + }
41835 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41836 + break;
41837 + match = match->next;
41838 + }
41839 +found2:
41840 + if (match == NULL)
41841 + match = default_role;
41842 + if (match->allowed_ips == NULL)
41843 + return match;
41844 + else {
41845 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41846 + if (likely
41847 + ((ntohl(curr_ip) & ipp->netmask) ==
41848 + (ntohl(ipp->addr) & ipp->netmask)))
41849 + return match;
41850 + }
41851 + match = default_role;
41852 + }
41853 + } else if (match->allowed_ips == NULL) {
41854 + return match;
41855 + } else {
41856 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41857 + if (likely
41858 + ((ntohl(curr_ip) & ipp->netmask) ==
41859 + (ntohl(ipp->addr) & ipp->netmask)))
41860 + return match;
41861 + }
41862 + goto try_group;
41863 + }
41864 +
41865 + return match;
41866 +}
41867 +
41868 +struct acl_subject_label *
41869 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41870 + const struct acl_role_label *role)
41871 +{
41872 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41873 + struct acl_subject_label *match;
41874 +
41875 + match = role->subj_hash[index];
41876 +
41877 + while (match && (match->inode != ino || match->device != dev ||
41878 + (match->mode & GR_DELETED))) {
41879 + match = match->next;
41880 + }
41881 +
41882 + if (match && !(match->mode & GR_DELETED))
41883 + return match;
41884 + else
41885 + return NULL;
41886 +}
41887 +
41888 +struct acl_subject_label *
41889 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41890 + const struct acl_role_label *role)
41891 +{
41892 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41893 + struct acl_subject_label *match;
41894 +
41895 + match = role->subj_hash[index];
41896 +
41897 + while (match && (match->inode != ino || match->device != dev ||
41898 + !(match->mode & GR_DELETED))) {
41899 + match = match->next;
41900 + }
41901 +
41902 + if (match && (match->mode & GR_DELETED))
41903 + return match;
41904 + else
41905 + return NULL;
41906 +}
41907 +
41908 +static struct acl_object_label *
41909 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
41910 + const struct acl_subject_label *subj)
41911 +{
41912 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41913 + struct acl_object_label *match;
41914 +
41915 + match = subj->obj_hash[index];
41916 +
41917 + while (match && (match->inode != ino || match->device != dev ||
41918 + (match->mode & GR_DELETED))) {
41919 + match = match->next;
41920 + }
41921 +
41922 + if (match && !(match->mode & GR_DELETED))
41923 + return match;
41924 + else
41925 + return NULL;
41926 +}
41927 +
41928 +static struct acl_object_label *
41929 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
41930 + const struct acl_subject_label *subj)
41931 +{
41932 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41933 + struct acl_object_label *match;
41934 +
41935 + match = subj->obj_hash[index];
41936 +
41937 + while (match && (match->inode != ino || match->device != dev ||
41938 + !(match->mode & GR_DELETED))) {
41939 + match = match->next;
41940 + }
41941 +
41942 + if (match && (match->mode & GR_DELETED))
41943 + return match;
41944 +
41945 + match = subj->obj_hash[index];
41946 +
41947 + while (match && (match->inode != ino || match->device != dev ||
41948 + (match->mode & GR_DELETED))) {
41949 + match = match->next;
41950 + }
41951 +
41952 + if (match && !(match->mode & GR_DELETED))
41953 + return match;
41954 + else
41955 + return NULL;
41956 +}
41957 +
41958 +static struct name_entry *
41959 +lookup_name_entry(const char *name)
41960 +{
41961 + unsigned int len = strlen(name);
41962 + unsigned int key = full_name_hash(name, len);
41963 + unsigned int index = key % name_set.n_size;
41964 + struct name_entry *match;
41965 +
41966 + match = name_set.n_hash[index];
41967 +
41968 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
41969 + match = match->next;
41970 +
41971 + return match;
41972 +}
41973 +
41974 +static struct name_entry *
41975 +lookup_name_entry_create(const char *name)
41976 +{
41977 + unsigned int len = strlen(name);
41978 + unsigned int key = full_name_hash(name, len);
41979 + unsigned int index = key % name_set.n_size;
41980 + struct name_entry *match;
41981 +
41982 + match = name_set.n_hash[index];
41983 +
41984 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41985 + !match->deleted))
41986 + match = match->next;
41987 +
41988 + if (match && match->deleted)
41989 + return match;
41990 +
41991 + match = name_set.n_hash[index];
41992 +
41993 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41994 + match->deleted))
41995 + match = match->next;
41996 +
41997 + if (match && !match->deleted)
41998 + return match;
41999 + else
42000 + return NULL;
42001 +}
42002 +
42003 +static struct inodev_entry *
42004 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
42005 +{
42006 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
42007 + struct inodev_entry *match;
42008 +
42009 + match = inodev_set.i_hash[index];
42010 +
42011 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
42012 + match = match->next;
42013 +
42014 + return match;
42015 +}
42016 +
42017 +static void
42018 +insert_inodev_entry(struct inodev_entry *entry)
42019 +{
42020 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
42021 + inodev_set.i_size);
42022 + struct inodev_entry **curr;
42023 +
42024 + entry->prev = NULL;
42025 +
42026 + curr = &inodev_set.i_hash[index];
42027 + if (*curr != NULL)
42028 + (*curr)->prev = entry;
42029 +
42030 + entry->next = *curr;
42031 + *curr = entry;
42032 +
42033 + return;
42034 +}
42035 +
42036 +static void
42037 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
42038 +{
42039 + unsigned int index =
42040 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
42041 + struct acl_role_label **curr;
42042 + struct acl_role_label *tmp;
42043 +
42044 + curr = &acl_role_set.r_hash[index];
42045 +
42046 + /* if role was already inserted due to domains and already has
42047 + a role in the same bucket as it attached, then we need to
42048 + combine these two buckets
42049 + */
42050 + if (role->next) {
42051 + tmp = role->next;
42052 + while (tmp->next)
42053 + tmp = tmp->next;
42054 + tmp->next = *curr;
42055 + } else
42056 + role->next = *curr;
42057 + *curr = role;
42058 +
42059 + return;
42060 +}
42061 +
42062 +static void
42063 +insert_acl_role_label(struct acl_role_label *role)
42064 +{
42065 + int i;
42066 +
42067 + if (role_list == NULL) {
42068 + role_list = role;
42069 + role->prev = NULL;
42070 + } else {
42071 + role->prev = role_list;
42072 + role_list = role;
42073 + }
42074 +
42075 + /* used for hash chains */
42076 + role->next = NULL;
42077 +
42078 + if (role->roletype & GR_ROLE_DOMAIN) {
42079 + for (i = 0; i < role->domain_child_num; i++)
42080 + __insert_acl_role_label(role, role->domain_children[i]);
42081 + } else
42082 + __insert_acl_role_label(role, role->uidgid);
42083 +}
42084 +
42085 +static int
42086 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42087 +{
42088 + struct name_entry **curr, *nentry;
42089 + struct inodev_entry *ientry;
42090 + unsigned int len = strlen(name);
42091 + unsigned int key = full_name_hash(name, len);
42092 + unsigned int index = key % name_set.n_size;
42093 +
42094 + curr = &name_set.n_hash[index];
42095 +
42096 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42097 + curr = &((*curr)->next);
42098 +
42099 + if (*curr != NULL)
42100 + return 1;
42101 +
42102 + nentry = acl_alloc(sizeof (struct name_entry));
42103 + if (nentry == NULL)
42104 + return 0;
42105 + ientry = acl_alloc(sizeof (struct inodev_entry));
42106 + if (ientry == NULL)
42107 + return 0;
42108 + ientry->nentry = nentry;
42109 +
42110 + nentry->key = key;
42111 + nentry->name = name;
42112 + nentry->inode = inode;
42113 + nentry->device = device;
42114 + nentry->len = len;
42115 + nentry->deleted = deleted;
42116 +
42117 + nentry->prev = NULL;
42118 + curr = &name_set.n_hash[index];
42119 + if (*curr != NULL)
42120 + (*curr)->prev = nentry;
42121 + nentry->next = *curr;
42122 + *curr = nentry;
42123 +
42124 + /* insert us into the table searchable by inode/dev */
42125 + insert_inodev_entry(ientry);
42126 +
42127 + return 1;
42128 +}
42129 +
42130 +static void
42131 +insert_acl_obj_label(struct acl_object_label *obj,
42132 + struct acl_subject_label *subj)
42133 +{
42134 + unsigned int index =
42135 + fhash(obj->inode, obj->device, subj->obj_hash_size);
42136 + struct acl_object_label **curr;
42137 +
42138 +
42139 + obj->prev = NULL;
42140 +
42141 + curr = &subj->obj_hash[index];
42142 + if (*curr != NULL)
42143 + (*curr)->prev = obj;
42144 +
42145 + obj->next = *curr;
42146 + *curr = obj;
42147 +
42148 + return;
42149 +}
42150 +
42151 +static void
42152 +insert_acl_subj_label(struct acl_subject_label *obj,
42153 + struct acl_role_label *role)
42154 +{
42155 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42156 + struct acl_subject_label **curr;
42157 +
42158 + obj->prev = NULL;
42159 +
42160 + curr = &role->subj_hash[index];
42161 + if (*curr != NULL)
42162 + (*curr)->prev = obj;
42163 +
42164 + obj->next = *curr;
42165 + *curr = obj;
42166 +
42167 + return;
42168 +}
42169 +
42170 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42171 +
42172 +static void *
42173 +create_table(__u32 * len, int elementsize)
42174 +{
42175 + unsigned int table_sizes[] = {
42176 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42177 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42178 + 4194301, 8388593, 16777213, 33554393, 67108859
42179 + };
42180 + void *newtable = NULL;
42181 + unsigned int pwr = 0;
42182 +
42183 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42184 + table_sizes[pwr] <= *len)
42185 + pwr++;
42186 +
42187 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42188 + return newtable;
42189 +
42190 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42191 + newtable =
42192 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42193 + else
42194 + newtable = vmalloc(table_sizes[pwr] * elementsize);
42195 +
42196 + *len = table_sizes[pwr];
42197 +
42198 + return newtable;
42199 +}
42200 +
42201 +static int
42202 +init_variables(const struct gr_arg *arg)
42203 +{
42204 + struct task_struct *reaper = &init_task;
42205 + unsigned int stacksize;
42206 +
42207 + subj_map_set.s_size = arg->role_db.num_subjects;
42208 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42209 + name_set.n_size = arg->role_db.num_objects;
42210 + inodev_set.i_size = arg->role_db.num_objects;
42211 +
42212 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
42213 + !name_set.n_size || !inodev_set.i_size)
42214 + return 1;
42215 +
42216 + if (!gr_init_uidset())
42217 + return 1;
42218 +
42219 + /* set up the stack that holds allocation info */
42220 +
42221 + stacksize = arg->role_db.num_pointers + 5;
42222 +
42223 + if (!acl_alloc_stack_init(stacksize))
42224 + return 1;
42225 +
42226 + /* grab reference for the real root dentry and vfsmount */
42227 + get_fs_root(reaper->fs, &real_root);
42228 +
42229 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42230 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42231 +#endif
42232 +
42233 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42234 + if (fakefs_obj_rw == NULL)
42235 + return 1;
42236 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42237 +
42238 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42239 + if (fakefs_obj_rwx == NULL)
42240 + return 1;
42241 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42242 +
42243 + subj_map_set.s_hash =
42244 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42245 + acl_role_set.r_hash =
42246 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42247 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42248 + inodev_set.i_hash =
42249 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42250 +
42251 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42252 + !name_set.n_hash || !inodev_set.i_hash)
42253 + return 1;
42254 +
42255 + memset(subj_map_set.s_hash, 0,
42256 + sizeof(struct subject_map *) * subj_map_set.s_size);
42257 + memset(acl_role_set.r_hash, 0,
42258 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
42259 + memset(name_set.n_hash, 0,
42260 + sizeof (struct name_entry *) * name_set.n_size);
42261 + memset(inodev_set.i_hash, 0,
42262 + sizeof (struct inodev_entry *) * inodev_set.i_size);
42263 +
42264 + return 0;
42265 +}
42266 +
42267 +/* free information not needed after startup
42268 + currently contains user->kernel pointer mappings for subjects
42269 +*/
42270 +
42271 +static void
42272 +free_init_variables(void)
42273 +{
42274 + __u32 i;
42275 +
42276 + if (subj_map_set.s_hash) {
42277 + for (i = 0; i < subj_map_set.s_size; i++) {
42278 + if (subj_map_set.s_hash[i]) {
42279 + kfree(subj_map_set.s_hash[i]);
42280 + subj_map_set.s_hash[i] = NULL;
42281 + }
42282 + }
42283 +
42284 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42285 + PAGE_SIZE)
42286 + kfree(subj_map_set.s_hash);
42287 + else
42288 + vfree(subj_map_set.s_hash);
42289 + }
42290 +
42291 + return;
42292 +}
42293 +
42294 +static void
42295 +free_variables(void)
42296 +{
42297 + struct acl_subject_label *s;
42298 + struct acl_role_label *r;
42299 + struct task_struct *task, *task2;
42300 + unsigned int x;
42301 +
42302 + gr_clear_learn_entries();
42303 +
42304 + read_lock(&tasklist_lock);
42305 + do_each_thread(task2, task) {
42306 + task->acl_sp_role = 0;
42307 + task->acl_role_id = 0;
42308 + task->acl = NULL;
42309 + task->role = NULL;
42310 + } while_each_thread(task2, task);
42311 + read_unlock(&tasklist_lock);
42312 +
42313 + /* release the reference to the real root dentry and vfsmount */
42314 + path_put(&real_root);
42315 +
42316 + /* free all object hash tables */
42317 +
42318 + FOR_EACH_ROLE_START(r)
42319 + if (r->subj_hash == NULL)
42320 + goto next_role;
42321 + FOR_EACH_SUBJECT_START(r, s, x)
42322 + if (s->obj_hash == NULL)
42323 + break;
42324 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42325 + kfree(s->obj_hash);
42326 + else
42327 + vfree(s->obj_hash);
42328 + FOR_EACH_SUBJECT_END(s, x)
42329 + FOR_EACH_NESTED_SUBJECT_START(r, s)
42330 + if (s->obj_hash == NULL)
42331 + break;
42332 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42333 + kfree(s->obj_hash);
42334 + else
42335 + vfree(s->obj_hash);
42336 + FOR_EACH_NESTED_SUBJECT_END(s)
42337 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42338 + kfree(r->subj_hash);
42339 + else
42340 + vfree(r->subj_hash);
42341 + r->subj_hash = NULL;
42342 +next_role:
42343 + FOR_EACH_ROLE_END(r)
42344 +
42345 + acl_free_all();
42346 +
42347 + if (acl_role_set.r_hash) {
42348 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42349 + PAGE_SIZE)
42350 + kfree(acl_role_set.r_hash);
42351 + else
42352 + vfree(acl_role_set.r_hash);
42353 + }
42354 + if (name_set.n_hash) {
42355 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
42356 + PAGE_SIZE)
42357 + kfree(name_set.n_hash);
42358 + else
42359 + vfree(name_set.n_hash);
42360 + }
42361 +
42362 + if (inodev_set.i_hash) {
42363 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42364 + PAGE_SIZE)
42365 + kfree(inodev_set.i_hash);
42366 + else
42367 + vfree(inodev_set.i_hash);
42368 + }
42369 +
42370 + gr_free_uidset();
42371 +
42372 + memset(&name_set, 0, sizeof (struct name_db));
42373 + memset(&inodev_set, 0, sizeof (struct inodev_db));
42374 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42375 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42376 +
42377 + default_role = NULL;
42378 + role_list = NULL;
42379 +
42380 + return;
42381 +}
42382 +
42383 +static __u32
42384 +count_user_objs(struct acl_object_label *userp)
42385 +{
42386 + struct acl_object_label o_tmp;
42387 + __u32 num = 0;
42388 +
42389 + while (userp) {
42390 + if (copy_from_user(&o_tmp, userp,
42391 + sizeof (struct acl_object_label)))
42392 + break;
42393 +
42394 + userp = o_tmp.prev;
42395 + num++;
42396 + }
42397 +
42398 + return num;
42399 +}
42400 +
42401 +static struct acl_subject_label *
42402 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42403 +
42404 +static int
42405 +copy_user_glob(struct acl_object_label *obj)
42406 +{
42407 + struct acl_object_label *g_tmp, **guser;
42408 + unsigned int len;
42409 + char *tmp;
42410 +
42411 + if (obj->globbed == NULL)
42412 + return 0;
42413 +
42414 + guser = &obj->globbed;
42415 + while (*guser) {
42416 + g_tmp = (struct acl_object_label *)
42417 + acl_alloc(sizeof (struct acl_object_label));
42418 + if (g_tmp == NULL)
42419 + return -ENOMEM;
42420 +
42421 + if (copy_from_user(g_tmp, *guser,
42422 + sizeof (struct acl_object_label)))
42423 + return -EFAULT;
42424 +
42425 + len = strnlen_user(g_tmp->filename, PATH_MAX);
42426 +
42427 + if (!len || len >= PATH_MAX)
42428 + return -EINVAL;
42429 +
42430 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42431 + return -ENOMEM;
42432 +
42433 + if (copy_from_user(tmp, g_tmp->filename, len))
42434 + return -EFAULT;
42435 + tmp[len-1] = '\0';
42436 + g_tmp->filename = tmp;
42437 +
42438 + *guser = g_tmp;
42439 + guser = &(g_tmp->next);
42440 + }
42441 +
42442 + return 0;
42443 +}
42444 +
42445 +static int
42446 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42447 + struct acl_role_label *role)
42448 +{
42449 + struct acl_object_label *o_tmp;
42450 + unsigned int len;
42451 + int ret;
42452 + char *tmp;
42453 +
42454 + while (userp) {
42455 + if ((o_tmp = (struct acl_object_label *)
42456 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
42457 + return -ENOMEM;
42458 +
42459 + if (copy_from_user(o_tmp, userp,
42460 + sizeof (struct acl_object_label)))
42461 + return -EFAULT;
42462 +
42463 + userp = o_tmp->prev;
42464 +
42465 + len = strnlen_user(o_tmp->filename, PATH_MAX);
42466 +
42467 + if (!len || len >= PATH_MAX)
42468 + return -EINVAL;
42469 +
42470 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42471 + return -ENOMEM;
42472 +
42473 + if (copy_from_user(tmp, o_tmp->filename, len))
42474 + return -EFAULT;
42475 + tmp[len-1] = '\0';
42476 + o_tmp->filename = tmp;
42477 +
42478 + insert_acl_obj_label(o_tmp, subj);
42479 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42480 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42481 + return -ENOMEM;
42482 +
42483 + ret = copy_user_glob(o_tmp);
42484 + if (ret)
42485 + return ret;
42486 +
42487 + if (o_tmp->nested) {
42488 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42489 + if (IS_ERR(o_tmp->nested))
42490 + return PTR_ERR(o_tmp->nested);
42491 +
42492 + /* insert into nested subject list */
42493 + o_tmp->nested->next = role->hash->first;
42494 + role->hash->first = o_tmp->nested;
42495 + }
42496 + }
42497 +
42498 + return 0;
42499 +}
42500 +
42501 +static __u32
42502 +count_user_subjs(struct acl_subject_label *userp)
42503 +{
42504 + struct acl_subject_label s_tmp;
42505 + __u32 num = 0;
42506 +
42507 + while (userp) {
42508 + if (copy_from_user(&s_tmp, userp,
42509 + sizeof (struct acl_subject_label)))
42510 + break;
42511 +
42512 + userp = s_tmp.prev;
42513 + /* do not count nested subjects against this count, since
42514 + they are not included in the hash table, but are
42515 + attached to objects. We have already counted
42516 + the subjects in userspace for the allocation
42517 + stack
42518 + */
42519 + if (!(s_tmp.mode & GR_NESTED))
42520 + num++;
42521 + }
42522 +
42523 + return num;
42524 +}
42525 +
42526 +static int
42527 +copy_user_allowedips(struct acl_role_label *rolep)
42528 +{
42529 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42530 +
42531 + ruserip = rolep->allowed_ips;
42532 +
42533 + while (ruserip) {
42534 + rlast = rtmp;
42535 +
42536 + if ((rtmp = (struct role_allowed_ip *)
42537 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42538 + return -ENOMEM;
42539 +
42540 + if (copy_from_user(rtmp, ruserip,
42541 + sizeof (struct role_allowed_ip)))
42542 + return -EFAULT;
42543 +
42544 + ruserip = rtmp->prev;
42545 +
42546 + if (!rlast) {
42547 + rtmp->prev = NULL;
42548 + rolep->allowed_ips = rtmp;
42549 + } else {
42550 + rlast->next = rtmp;
42551 + rtmp->prev = rlast;
42552 + }
42553 +
42554 + if (!ruserip)
42555 + rtmp->next = NULL;
42556 + }
42557 +
42558 + return 0;
42559 +}
42560 +
42561 +static int
42562 +copy_user_transitions(struct acl_role_label *rolep)
42563 +{
42564 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
42565 +
42566 + unsigned int len;
42567 + char *tmp;
42568 +
42569 + rusertp = rolep->transitions;
42570 +
42571 + while (rusertp) {
42572 + rlast = rtmp;
42573 +
42574 + if ((rtmp = (struct role_transition *)
42575 + acl_alloc(sizeof (struct role_transition))) == NULL)
42576 + return -ENOMEM;
42577 +
42578 + if (copy_from_user(rtmp, rusertp,
42579 + sizeof (struct role_transition)))
42580 + return -EFAULT;
42581 +
42582 + rusertp = rtmp->prev;
42583 +
42584 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42585 +
42586 + if (!len || len >= GR_SPROLE_LEN)
42587 + return -EINVAL;
42588 +
42589 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42590 + return -ENOMEM;
42591 +
42592 + if (copy_from_user(tmp, rtmp->rolename, len))
42593 + return -EFAULT;
42594 + tmp[len-1] = '\0';
42595 + rtmp->rolename = tmp;
42596 +
42597 + if (!rlast) {
42598 + rtmp->prev = NULL;
42599 + rolep->transitions = rtmp;
42600 + } else {
42601 + rlast->next = rtmp;
42602 + rtmp->prev = rlast;
42603 + }
42604 +
42605 + if (!rusertp)
42606 + rtmp->next = NULL;
42607 + }
42608 +
42609 + return 0;
42610 +}
42611 +
42612 +static struct acl_subject_label *
42613 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42614 +{
42615 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42616 + unsigned int len;
42617 + char *tmp;
42618 + __u32 num_objs;
42619 + struct acl_ip_label **i_tmp, *i_utmp2;
42620 + struct gr_hash_struct ghash;
42621 + struct subject_map *subjmap;
42622 + unsigned int i_num;
42623 + int err;
42624 +
42625 + s_tmp = lookup_subject_map(userp);
42626 +
42627 + /* we've already copied this subject into the kernel, just return
42628 + the reference to it, and don't copy it over again
42629 + */
42630 + if (s_tmp)
42631 + return(s_tmp);
42632 +
42633 + if ((s_tmp = (struct acl_subject_label *)
42634 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42635 + return ERR_PTR(-ENOMEM);
42636 +
42637 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42638 + if (subjmap == NULL)
42639 + return ERR_PTR(-ENOMEM);
42640 +
42641 + subjmap->user = userp;
42642 + subjmap->kernel = s_tmp;
42643 + insert_subj_map_entry(subjmap);
42644 +
42645 + if (copy_from_user(s_tmp, userp,
42646 + sizeof (struct acl_subject_label)))
42647 + return ERR_PTR(-EFAULT);
42648 +
42649 + len = strnlen_user(s_tmp->filename, PATH_MAX);
42650 +
42651 + if (!len || len >= PATH_MAX)
42652 + return ERR_PTR(-EINVAL);
42653 +
42654 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42655 + return ERR_PTR(-ENOMEM);
42656 +
42657 + if (copy_from_user(tmp, s_tmp->filename, len))
42658 + return ERR_PTR(-EFAULT);
42659 + tmp[len-1] = '\0';
42660 + s_tmp->filename = tmp;
42661 +
42662 + if (!strcmp(s_tmp->filename, "/"))
42663 + role->root_label = s_tmp;
42664 +
42665 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42666 + return ERR_PTR(-EFAULT);
42667 +
42668 + /* copy user and group transition tables */
42669 +
42670 + if (s_tmp->user_trans_num) {
42671 + uid_t *uidlist;
42672 +
42673 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42674 + if (uidlist == NULL)
42675 + return ERR_PTR(-ENOMEM);
42676 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42677 + return ERR_PTR(-EFAULT);
42678 +
42679 + s_tmp->user_transitions = uidlist;
42680 + }
42681 +
42682 + if (s_tmp->group_trans_num) {
42683 + gid_t *gidlist;
42684 +
42685 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42686 + if (gidlist == NULL)
42687 + return ERR_PTR(-ENOMEM);
42688 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42689 + return ERR_PTR(-EFAULT);
42690 +
42691 + s_tmp->group_transitions = gidlist;
42692 + }
42693 +
42694 + /* set up object hash table */
42695 + num_objs = count_user_objs(ghash.first);
42696 +
42697 + s_tmp->obj_hash_size = num_objs;
42698 + s_tmp->obj_hash =
42699 + (struct acl_object_label **)
42700 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42701 +
42702 + if (!s_tmp->obj_hash)
42703 + return ERR_PTR(-ENOMEM);
42704 +
42705 + memset(s_tmp->obj_hash, 0,
42706 + s_tmp->obj_hash_size *
42707 + sizeof (struct acl_object_label *));
42708 +
42709 + /* add in objects */
42710 + err = copy_user_objs(ghash.first, s_tmp, role);
42711 +
42712 + if (err)
42713 + return ERR_PTR(err);
42714 +
42715 + /* set pointer for parent subject */
42716 + if (s_tmp->parent_subject) {
42717 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42718 +
42719 + if (IS_ERR(s_tmp2))
42720 + return s_tmp2;
42721 +
42722 + s_tmp->parent_subject = s_tmp2;
42723 + }
42724 +
42725 + /* add in ip acls */
42726 +
42727 + if (!s_tmp->ip_num) {
42728 + s_tmp->ips = NULL;
42729 + goto insert;
42730 + }
42731 +
42732 + i_tmp =
42733 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42734 + sizeof (struct acl_ip_label *));
42735 +
42736 + if (!i_tmp)
42737 + return ERR_PTR(-ENOMEM);
42738 +
42739 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42740 + *(i_tmp + i_num) =
42741 + (struct acl_ip_label *)
42742 + acl_alloc(sizeof (struct acl_ip_label));
42743 + if (!*(i_tmp + i_num))
42744 + return ERR_PTR(-ENOMEM);
42745 +
42746 + if (copy_from_user
42747 + (&i_utmp2, s_tmp->ips + i_num,
42748 + sizeof (struct acl_ip_label *)))
42749 + return ERR_PTR(-EFAULT);
42750 +
42751 + if (copy_from_user
42752 + (*(i_tmp + i_num), i_utmp2,
42753 + sizeof (struct acl_ip_label)))
42754 + return ERR_PTR(-EFAULT);
42755 +
42756 + if ((*(i_tmp + i_num))->iface == NULL)
42757 + continue;
42758 +
42759 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42760 + if (!len || len >= IFNAMSIZ)
42761 + return ERR_PTR(-EINVAL);
42762 + tmp = acl_alloc(len);
42763 + if (tmp == NULL)
42764 + return ERR_PTR(-ENOMEM);
42765 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42766 + return ERR_PTR(-EFAULT);
42767 + (*(i_tmp + i_num))->iface = tmp;
42768 + }
42769 +
42770 + s_tmp->ips = i_tmp;
42771 +
42772 +insert:
42773 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42774 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42775 + return ERR_PTR(-ENOMEM);
42776 +
42777 + return s_tmp;
42778 +}
42779 +
42780 +static int
42781 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42782 +{
42783 + struct acl_subject_label s_pre;
42784 + struct acl_subject_label * ret;
42785 + int err;
42786 +
42787 + while (userp) {
42788 + if (copy_from_user(&s_pre, userp,
42789 + sizeof (struct acl_subject_label)))
42790 + return -EFAULT;
42791 +
42792 + /* do not add nested subjects here, add
42793 + while parsing objects
42794 + */
42795 +
42796 + if (s_pre.mode & GR_NESTED) {
42797 + userp = s_pre.prev;
42798 + continue;
42799 + }
42800 +
42801 + ret = do_copy_user_subj(userp, role);
42802 +
42803 + err = PTR_ERR(ret);
42804 + if (IS_ERR(ret))
42805 + return err;
42806 +
42807 + insert_acl_subj_label(ret, role);
42808 +
42809 + userp = s_pre.prev;
42810 + }
42811 +
42812 + return 0;
42813 +}
42814 +
42815 +static int
42816 +copy_user_acl(struct gr_arg *arg)
42817 +{
42818 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42819 + struct sprole_pw *sptmp;
42820 + struct gr_hash_struct *ghash;
42821 + uid_t *domainlist;
42822 + unsigned int r_num;
42823 + unsigned int len;
42824 + char *tmp;
42825 + int err = 0;
42826 + __u16 i;
42827 + __u32 num_subjs;
42828 +
42829 + /* we need a default and kernel role */
42830 + if (arg->role_db.num_roles < 2)
42831 + return -EINVAL;
42832 +
42833 + /* copy special role authentication info from userspace */
42834 +
42835 + num_sprole_pws = arg->num_sprole_pws;
42836 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42837 +
42838 + if (!acl_special_roles) {
42839 + err = -ENOMEM;
42840 + goto cleanup;
42841 + }
42842 +
42843 + for (i = 0; i < num_sprole_pws; i++) {
42844 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42845 + if (!sptmp) {
42846 + err = -ENOMEM;
42847 + goto cleanup;
42848 + }
42849 + if (copy_from_user(sptmp, arg->sprole_pws + i,
42850 + sizeof (struct sprole_pw))) {
42851 + err = -EFAULT;
42852 + goto cleanup;
42853 + }
42854 +
42855 + len =
42856 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42857 +
42858 + if (!len || len >= GR_SPROLE_LEN) {
42859 + err = -EINVAL;
42860 + goto cleanup;
42861 + }
42862 +
42863 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42864 + err = -ENOMEM;
42865 + goto cleanup;
42866 + }
42867 +
42868 + if (copy_from_user(tmp, sptmp->rolename, len)) {
42869 + err = -EFAULT;
42870 + goto cleanup;
42871 + }
42872 + tmp[len-1] = '\0';
42873 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42874 + printk(KERN_ALERT "Copying special role %s\n", tmp);
42875 +#endif
42876 + sptmp->rolename = tmp;
42877 + acl_special_roles[i] = sptmp;
42878 + }
42879 +
42880 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42881 +
42882 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42883 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
42884 +
42885 + if (!r_tmp) {
42886 + err = -ENOMEM;
42887 + goto cleanup;
42888 + }
42889 +
42890 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
42891 + sizeof (struct acl_role_label *))) {
42892 + err = -EFAULT;
42893 + goto cleanup;
42894 + }
42895 +
42896 + if (copy_from_user(r_tmp, r_utmp2,
42897 + sizeof (struct acl_role_label))) {
42898 + err = -EFAULT;
42899 + goto cleanup;
42900 + }
42901 +
42902 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
42903 +
42904 + if (!len || len >= PATH_MAX) {
42905 + err = -EINVAL;
42906 + goto cleanup;
42907 + }
42908 +
42909 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42910 + err = -ENOMEM;
42911 + goto cleanup;
42912 + }
42913 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
42914 + err = -EFAULT;
42915 + goto cleanup;
42916 + }
42917 + tmp[len-1] = '\0';
42918 + r_tmp->rolename = tmp;
42919 +
42920 + if (!strcmp(r_tmp->rolename, "default")
42921 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
42922 + default_role = r_tmp;
42923 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
42924 + kernel_role = r_tmp;
42925 + }
42926 +
42927 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
42928 + err = -ENOMEM;
42929 + goto cleanup;
42930 + }
42931 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
42932 + err = -EFAULT;
42933 + goto cleanup;
42934 + }
42935 +
42936 + r_tmp->hash = ghash;
42937 +
42938 + num_subjs = count_user_subjs(r_tmp->hash->first);
42939 +
42940 + r_tmp->subj_hash_size = num_subjs;
42941 + r_tmp->subj_hash =
42942 + (struct acl_subject_label **)
42943 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
42944 +
42945 + if (!r_tmp->subj_hash) {
42946 + err = -ENOMEM;
42947 + goto cleanup;
42948 + }
42949 +
42950 + err = copy_user_allowedips(r_tmp);
42951 + if (err)
42952 + goto cleanup;
42953 +
42954 + /* copy domain info */
42955 + if (r_tmp->domain_children != NULL) {
42956 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
42957 + if (domainlist == NULL) {
42958 + err = -ENOMEM;
42959 + goto cleanup;
42960 + }
42961 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
42962 + err = -EFAULT;
42963 + goto cleanup;
42964 + }
42965 + r_tmp->domain_children = domainlist;
42966 + }
42967 +
42968 + err = copy_user_transitions(r_tmp);
42969 + if (err)
42970 + goto cleanup;
42971 +
42972 + memset(r_tmp->subj_hash, 0,
42973 + r_tmp->subj_hash_size *
42974 + sizeof (struct acl_subject_label *));
42975 +
42976 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
42977 +
42978 + if (err)
42979 + goto cleanup;
42980 +
42981 + /* set nested subject list to null */
42982 + r_tmp->hash->first = NULL;
42983 +
42984 + insert_acl_role_label(r_tmp);
42985 + }
42986 +
42987 + goto return_err;
42988 + cleanup:
42989 + free_variables();
42990 + return_err:
42991 + return err;
42992 +
42993 +}
42994 +
42995 +static int
42996 +gracl_init(struct gr_arg *args)
42997 +{
42998 + int error = 0;
42999 +
43000 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
43001 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
43002 +
43003 + if (init_variables(args)) {
43004 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
43005 + error = -ENOMEM;
43006 + free_variables();
43007 + goto out;
43008 + }
43009 +
43010 + error = copy_user_acl(args);
43011 + free_init_variables();
43012 + if (error) {
43013 + free_variables();
43014 + goto out;
43015 + }
43016 +
43017 + if ((error = gr_set_acls(0))) {
43018 + free_variables();
43019 + goto out;
43020 + }
43021 +
43022 + pax_open_kernel();
43023 + gr_status |= GR_READY;
43024 + pax_close_kernel();
43025 +
43026 + out:
43027 + return error;
43028 +}
43029 +
43030 +/* derived from glibc fnmatch() 0: match, 1: no match*/
43031 +
43032 +static int
43033 +glob_match(const char *p, const char *n)
43034 +{
43035 + char c;
43036 +
43037 + while ((c = *p++) != '\0') {
43038 + switch (c) {
43039 + case '?':
43040 + if (*n == '\0')
43041 + return 1;
43042 + else if (*n == '/')
43043 + return 1;
43044 + break;
43045 + case '\\':
43046 + if (*n != c)
43047 + return 1;
43048 + break;
43049 + case '*':
43050 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
43051 + if (*n == '/')
43052 + return 1;
43053 + else if (c == '?') {
43054 + if (*n == '\0')
43055 + return 1;
43056 + else
43057 + ++n;
43058 + }
43059 + }
43060 + if (c == '\0') {
43061 + return 0;
43062 + } else {
43063 + const char *endp;
43064 +
43065 + if ((endp = strchr(n, '/')) == NULL)
43066 + endp = n + strlen(n);
43067 +
43068 + if (c == '[') {
43069 + for (--p; n < endp; ++n)
43070 + if (!glob_match(p, n))
43071 + return 0;
43072 + } else if (c == '/') {
43073 + while (*n != '\0' && *n != '/')
43074 + ++n;
43075 + if (*n == '/' && !glob_match(p, n + 1))
43076 + return 0;
43077 + } else {
43078 + for (--p; n < endp; ++n)
43079 + if (*n == c && !glob_match(p, n))
43080 + return 0;
43081 + }
43082 +
43083 + return 1;
43084 + }
43085 + case '[':
43086 + {
43087 + int not;
43088 + char cold;
43089 +
43090 + if (*n == '\0' || *n == '/')
43091 + return 1;
43092 +
43093 + not = (*p == '!' || *p == '^');
43094 + if (not)
43095 + ++p;
43096 +
43097 + c = *p++;
43098 + for (;;) {
43099 + unsigned char fn = (unsigned char)*n;
43100 +
43101 + if (c == '\0')
43102 + return 1;
43103 + else {
43104 + if (c == fn)
43105 + goto matched;
43106 + cold = c;
43107 + c = *p++;
43108 +
43109 + if (c == '-' && *p != ']') {
43110 + unsigned char cend = *p++;
43111 +
43112 + if (cend == '\0')
43113 + return 1;
43114 +
43115 + if (cold <= fn && fn <= cend)
43116 + goto matched;
43117 +
43118 + c = *p++;
43119 + }
43120 + }
43121 +
43122 + if (c == ']')
43123 + break;
43124 + }
43125 + if (!not)
43126 + return 1;
43127 + break;
43128 + matched:
43129 + while (c != ']') {
43130 + if (c == '\0')
43131 + return 1;
43132 +
43133 + c = *p++;
43134 + }
43135 + if (not)
43136 + return 1;
43137 + }
43138 + break;
43139 + default:
43140 + if (c != *n)
43141 + return 1;
43142 + }
43143 +
43144 + ++n;
43145 + }
43146 +
43147 + if (*n == '\0')
43148 + return 0;
43149 +
43150 + if (*n == '/')
43151 + return 0;
43152 +
43153 + return 1;
43154 +}
43155 +
43156 +static struct acl_object_label *
43157 +chk_glob_label(struct acl_object_label *globbed,
43158 + struct dentry *dentry, struct vfsmount *mnt, char **path)
43159 +{
43160 + struct acl_object_label *tmp;
43161 +
43162 + if (*path == NULL)
43163 + *path = gr_to_filename_nolock(dentry, mnt);
43164 +
43165 + tmp = globbed;
43166 +
43167 + while (tmp) {
43168 + if (!glob_match(tmp->filename, *path))
43169 + return tmp;
43170 + tmp = tmp->next;
43171 + }
43172 +
43173 + return NULL;
43174 +}
43175 +
43176 +static struct acl_object_label *
43177 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43178 + const ino_t curr_ino, const dev_t curr_dev,
43179 + const struct acl_subject_label *subj, char **path, const int checkglob)
43180 +{
43181 + struct acl_subject_label *tmpsubj;
43182 + struct acl_object_label *retval;
43183 + struct acl_object_label *retval2;
43184 +
43185 + tmpsubj = (struct acl_subject_label *) subj;
43186 + read_lock(&gr_inode_lock);
43187 + do {
43188 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43189 + if (retval) {
43190 + if (checkglob && retval->globbed) {
43191 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43192 + (struct vfsmount *)orig_mnt, path);
43193 + if (retval2)
43194 + retval = retval2;
43195 + }
43196 + break;
43197 + }
43198 + } while ((tmpsubj = tmpsubj->parent_subject));
43199 + read_unlock(&gr_inode_lock);
43200 +
43201 + return retval;
43202 +}
43203 +
43204 +static __inline__ struct acl_object_label *
43205 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43206 + struct dentry *curr_dentry,
43207 + const struct acl_subject_label *subj, char **path, const int checkglob)
43208 +{
43209 + int newglob = checkglob;
43210 + ino_t inode;
43211 + dev_t device;
43212 +
43213 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43214 + as we don't want a / * rule to match instead of the / object
43215 + don't do this for create lookups that call this function though, since they're looking up
43216 + on the parent and thus need globbing checks on all paths
43217 + */
43218 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43219 + newglob = GR_NO_GLOB;
43220 +
43221 + spin_lock(&curr_dentry->d_lock);
43222 + inode = curr_dentry->d_inode->i_ino;
43223 + device = __get_dev(curr_dentry);
43224 + spin_unlock(&curr_dentry->d_lock);
43225 +
43226 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43227 +}
43228 +
43229 +static struct acl_object_label *
43230 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43231 + const struct acl_subject_label *subj, char *path, const int checkglob)
43232 +{
43233 + struct dentry *dentry = (struct dentry *) l_dentry;
43234 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43235 + struct acl_object_label *retval;
43236 + struct dentry *parent;
43237 +
43238 + write_seqlock(&rename_lock);
43239 + br_read_lock(vfsmount_lock);
43240 +
43241 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43242 +#ifdef CONFIG_NET
43243 + mnt == sock_mnt ||
43244 +#endif
43245 +#ifdef CONFIG_HUGETLBFS
43246 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43247 +#endif
43248 + /* ignore Eric Biederman */
43249 + IS_PRIVATE(l_dentry->d_inode))) {
43250 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43251 + goto out;
43252 + }
43253 +
43254 + for (;;) {
43255 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43256 + break;
43257 +
43258 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43259 + if (mnt->mnt_parent == mnt)
43260 + break;
43261 +
43262 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43263 + if (retval != NULL)
43264 + goto out;
43265 +
43266 + dentry = mnt->mnt_mountpoint;
43267 + mnt = mnt->mnt_parent;
43268 + continue;
43269 + }
43270 +
43271 + parent = dentry->d_parent;
43272 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43273 + if (retval != NULL)
43274 + goto out;
43275 +
43276 + dentry = parent;
43277 + }
43278 +
43279 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43280 +
43281 + /* real_root is pinned so we don't have to hold a reference */
43282 + if (retval == NULL)
43283 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43284 +out:
43285 + br_read_unlock(vfsmount_lock);
43286 + write_sequnlock(&rename_lock);
43287 +
43288 + BUG_ON(retval == NULL);
43289 +
43290 + return retval;
43291 +}
43292 +
43293 +static __inline__ struct acl_object_label *
43294 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43295 + const struct acl_subject_label *subj)
43296 +{
43297 + char *path = NULL;
43298 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43299 +}
43300 +
43301 +static __inline__ struct acl_object_label *
43302 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43303 + const struct acl_subject_label *subj)
43304 +{
43305 + char *path = NULL;
43306 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43307 +}
43308 +
43309 +static __inline__ struct acl_object_label *
43310 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43311 + const struct acl_subject_label *subj, char *path)
43312 +{
43313 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43314 +}
43315 +
43316 +static struct acl_subject_label *
43317 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43318 + const struct acl_role_label *role)
43319 +{
43320 + struct dentry *dentry = (struct dentry *) l_dentry;
43321 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43322 + struct acl_subject_label *retval;
43323 + struct dentry *parent;
43324 +
43325 + write_seqlock(&rename_lock);
43326 + br_read_lock(vfsmount_lock);
43327 +
43328 + for (;;) {
43329 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43330 + break;
43331 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43332 + if (mnt->mnt_parent == mnt)
43333 + break;
43334 +
43335 + spin_lock(&dentry->d_lock);
43336 + read_lock(&gr_inode_lock);
43337 + retval =
43338 + lookup_acl_subj_label(dentry->d_inode->i_ino,
43339 + __get_dev(dentry), role);
43340 + read_unlock(&gr_inode_lock);
43341 + spin_unlock(&dentry->d_lock);
43342 + if (retval != NULL)
43343 + goto out;
43344 +
43345 + dentry = mnt->mnt_mountpoint;
43346 + mnt = mnt->mnt_parent;
43347 + continue;
43348 + }
43349 +
43350 + spin_lock(&dentry->d_lock);
43351 + read_lock(&gr_inode_lock);
43352 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43353 + __get_dev(dentry), role);
43354 + read_unlock(&gr_inode_lock);
43355 + parent = dentry->d_parent;
43356 + spin_unlock(&dentry->d_lock);
43357 +
43358 + if (retval != NULL)
43359 + goto out;
43360 +
43361 + dentry = parent;
43362 + }
43363 +
43364 + spin_lock(&dentry->d_lock);
43365 + read_lock(&gr_inode_lock);
43366 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43367 + __get_dev(dentry), role);
43368 + read_unlock(&gr_inode_lock);
43369 + spin_unlock(&dentry->d_lock);
43370 +
43371 + if (unlikely(retval == NULL)) {
43372 + /* real_root is pinned, we don't need to hold a reference */
43373 + read_lock(&gr_inode_lock);
43374 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43375 + __get_dev(real_root.dentry), role);
43376 + read_unlock(&gr_inode_lock);
43377 + }
43378 +out:
43379 + br_read_unlock(vfsmount_lock);
43380 + write_sequnlock(&rename_lock);
43381 +
43382 + BUG_ON(retval == NULL);
43383 +
43384 + return retval;
43385 +}
43386 +
43387 +static void
43388 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43389 +{
43390 + struct task_struct *task = current;
43391 + const struct cred *cred = current_cred();
43392 +
43393 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43394 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43395 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43396 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43397 +
43398 + return;
43399 +}
43400 +
43401 +static void
43402 +gr_log_learn_sysctl(const char *path, const __u32 mode)
43403 +{
43404 + struct task_struct *task = current;
43405 + const struct cred *cred = current_cred();
43406 +
43407 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43408 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43409 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43410 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43411 +
43412 + return;
43413 +}
43414 +
43415 +static void
43416 +gr_log_learn_id_change(const char type, const unsigned int real,
43417 + const unsigned int effective, const unsigned int fs)
43418 +{
43419 + struct task_struct *task = current;
43420 + const struct cred *cred = current_cred();
43421 +
43422 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43423 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43424 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43425 + type, real, effective, fs, &task->signal->saved_ip);
43426 +
43427 + return;
43428 +}
43429 +
43430 +__u32
43431 +gr_check_link(const struct dentry * new_dentry,
43432 + const struct dentry * parent_dentry,
43433 + const struct vfsmount * parent_mnt,
43434 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43435 +{
43436 + struct acl_object_label *obj;
43437 + __u32 oldmode, newmode;
43438 + __u32 needmode;
43439 +
43440 + if (unlikely(!(gr_status & GR_READY)))
43441 + return (GR_CREATE | GR_LINK);
43442 +
43443 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43444 + oldmode = obj->mode;
43445 +
43446 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43447 + oldmode |= (GR_CREATE | GR_LINK);
43448 +
43449 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43450 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43451 + needmode |= GR_SETID | GR_AUDIT_SETID;
43452 +
43453 + newmode =
43454 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
43455 + oldmode | needmode);
43456 +
43457 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43458 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43459 + GR_INHERIT | GR_AUDIT_INHERIT);
43460 +
43461 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43462 + goto bad;
43463 +
43464 + if ((oldmode & needmode) != needmode)
43465 + goto bad;
43466 +
43467 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43468 + if ((newmode & needmode) != needmode)
43469 + goto bad;
43470 +
43471 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43472 + return newmode;
43473 +bad:
43474 + needmode = oldmode;
43475 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43476 + needmode |= GR_SETID;
43477 +
43478 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43479 + gr_log_learn(old_dentry, old_mnt, needmode);
43480 + return (GR_CREATE | GR_LINK);
43481 + } else if (newmode & GR_SUPPRESS)
43482 + return GR_SUPPRESS;
43483 + else
43484 + return 0;
43485 +}
43486 +
43487 +__u32
43488 +gr_search_file(const struct dentry * dentry, const __u32 mode,
43489 + const struct vfsmount * mnt)
43490 +{
43491 + __u32 retval = mode;
43492 + struct acl_subject_label *curracl;
43493 + struct acl_object_label *currobj;
43494 +
43495 + if (unlikely(!(gr_status & GR_READY)))
43496 + return (mode & ~GR_AUDITS);
43497 +
43498 + curracl = current->acl;
43499 +
43500 + currobj = chk_obj_label(dentry, mnt, curracl);
43501 + retval = currobj->mode & mode;
43502 +
43503 + /* if we're opening a specified transfer file for writing
43504 + (e.g. /dev/initctl), then transfer our role to init
43505 + */
43506 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43507 + current->role->roletype & GR_ROLE_PERSIST)) {
43508 + struct task_struct *task = init_pid_ns.child_reaper;
43509 +
43510 + if (task->role != current->role) {
43511 + task->acl_sp_role = 0;
43512 + task->acl_role_id = current->acl_role_id;
43513 + task->role = current->role;
43514 + rcu_read_lock();
43515 + read_lock(&grsec_exec_file_lock);
43516 + gr_apply_subject_to_task(task);
43517 + read_unlock(&grsec_exec_file_lock);
43518 + rcu_read_unlock();
43519 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43520 + }
43521 + }
43522 +
43523 + if (unlikely
43524 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43525 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43526 + __u32 new_mode = mode;
43527 +
43528 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43529 +
43530 + retval = new_mode;
43531 +
43532 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43533 + new_mode |= GR_INHERIT;
43534 +
43535 + if (!(mode & GR_NOLEARN))
43536 + gr_log_learn(dentry, mnt, new_mode);
43537 + }
43538 +
43539 + return retval;
43540 +}
43541 +
43542 +__u32
43543 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43544 + const struct vfsmount * mnt, const __u32 mode)
43545 +{
43546 + struct name_entry *match;
43547 + struct acl_object_label *matchpo;
43548 + struct acl_subject_label *curracl;
43549 + char *path;
43550 + __u32 retval;
43551 +
43552 + if (unlikely(!(gr_status & GR_READY)))
43553 + return (mode & ~GR_AUDITS);
43554 +
43555 + preempt_disable();
43556 + path = gr_to_filename_rbac(new_dentry, mnt);
43557 + match = lookup_name_entry_create(path);
43558 +
43559 + if (!match)
43560 + goto check_parent;
43561 +
43562 + curracl = current->acl;
43563 +
43564 + read_lock(&gr_inode_lock);
43565 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43566 + read_unlock(&gr_inode_lock);
43567 +
43568 + if (matchpo) {
43569 + if ((matchpo->mode & mode) !=
43570 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
43571 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43572 + __u32 new_mode = mode;
43573 +
43574 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43575 +
43576 + gr_log_learn(new_dentry, mnt, new_mode);
43577 +
43578 + preempt_enable();
43579 + return new_mode;
43580 + }
43581 + preempt_enable();
43582 + return (matchpo->mode & mode);
43583 + }
43584 +
43585 + check_parent:
43586 + curracl = current->acl;
43587 +
43588 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43589 + retval = matchpo->mode & mode;
43590 +
43591 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43592 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43593 + __u32 new_mode = mode;
43594 +
43595 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43596 +
43597 + gr_log_learn(new_dentry, mnt, new_mode);
43598 + preempt_enable();
43599 + return new_mode;
43600 + }
43601 +
43602 + preempt_enable();
43603 + return retval;
43604 +}
43605 +
43606 +int
43607 +gr_check_hidden_task(const struct task_struct *task)
43608 +{
43609 + if (unlikely(!(gr_status & GR_READY)))
43610 + return 0;
43611 +
43612 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43613 + return 1;
43614 +
43615 + return 0;
43616 +}
43617 +
43618 +int
43619 +gr_check_protected_task(const struct task_struct *task)
43620 +{
43621 + if (unlikely(!(gr_status & GR_READY) || !task))
43622 + return 0;
43623 +
43624 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43625 + task->acl != current->acl)
43626 + return 1;
43627 +
43628 + return 0;
43629 +}
43630 +
43631 +int
43632 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43633 +{
43634 + struct task_struct *p;
43635 + int ret = 0;
43636 +
43637 + if (unlikely(!(gr_status & GR_READY) || !pid))
43638 + return ret;
43639 +
43640 + read_lock(&tasklist_lock);
43641 + do_each_pid_task(pid, type, p) {
43642 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43643 + p->acl != current->acl) {
43644 + ret = 1;
43645 + goto out;
43646 + }
43647 + } while_each_pid_task(pid, type, p);
43648 +out:
43649 + read_unlock(&tasklist_lock);
43650 +
43651 + return ret;
43652 +}
43653 +
43654 +void
43655 +gr_copy_label(struct task_struct *tsk)
43656 +{
43657 + tsk->signal->used_accept = 0;
43658 + tsk->acl_sp_role = 0;
43659 + tsk->acl_role_id = current->acl_role_id;
43660 + tsk->acl = current->acl;
43661 + tsk->role = current->role;
43662 + tsk->signal->curr_ip = current->signal->curr_ip;
43663 + tsk->signal->saved_ip = current->signal->saved_ip;
43664 + if (current->exec_file)
43665 + get_file(current->exec_file);
43666 + tsk->exec_file = current->exec_file;
43667 + tsk->is_writable = current->is_writable;
43668 + if (unlikely(current->signal->used_accept)) {
43669 + current->signal->curr_ip = 0;
43670 + current->signal->saved_ip = 0;
43671 + }
43672 +
43673 + return;
43674 +}
43675 +
43676 +static void
43677 +gr_set_proc_res(struct task_struct *task)
43678 +{
43679 + struct acl_subject_label *proc;
43680 + unsigned short i;
43681 +
43682 + proc = task->acl;
43683 +
43684 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43685 + return;
43686 +
43687 + for (i = 0; i < RLIM_NLIMITS; i++) {
43688 + if (!(proc->resmask & (1 << i)))
43689 + continue;
43690 +
43691 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43692 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43693 + }
43694 +
43695 + return;
43696 +}
43697 +
43698 +extern int __gr_process_user_ban(struct user_struct *user);
43699 +
43700 +int
43701 +gr_check_user_change(int real, int effective, int fs)
43702 +{
43703 + unsigned int i;
43704 + __u16 num;
43705 + uid_t *uidlist;
43706 + int curuid;
43707 + int realok = 0;
43708 + int effectiveok = 0;
43709 + int fsok = 0;
43710 +
43711 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43712 + struct user_struct *user;
43713 +
43714 + if (real == -1)
43715 + goto skipit;
43716 +
43717 + user = find_user(real);
43718 + if (user == NULL)
43719 + goto skipit;
43720 +
43721 + if (__gr_process_user_ban(user)) {
43722 + /* for find_user */
43723 + free_uid(user);
43724 + return 1;
43725 + }
43726 +
43727 + /* for find_user */
43728 + free_uid(user);
43729 +
43730 +skipit:
43731 +#endif
43732 +
43733 + if (unlikely(!(gr_status & GR_READY)))
43734 + return 0;
43735 +
43736 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43737 + gr_log_learn_id_change('u', real, effective, fs);
43738 +
43739 + num = current->acl->user_trans_num;
43740 + uidlist = current->acl->user_transitions;
43741 +
43742 + if (uidlist == NULL)
43743 + return 0;
43744 +
43745 + if (real == -1)
43746 + realok = 1;
43747 + if (effective == -1)
43748 + effectiveok = 1;
43749 + if (fs == -1)
43750 + fsok = 1;
43751 +
43752 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
43753 + for (i = 0; i < num; i++) {
43754 + curuid = (int)uidlist[i];
43755 + if (real == curuid)
43756 + realok = 1;
43757 + if (effective == curuid)
43758 + effectiveok = 1;
43759 + if (fs == curuid)
43760 + fsok = 1;
43761 + }
43762 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
43763 + for (i = 0; i < num; i++) {
43764 + curuid = (int)uidlist[i];
43765 + if (real == curuid)
43766 + break;
43767 + if (effective == curuid)
43768 + break;
43769 + if (fs == curuid)
43770 + break;
43771 + }
43772 + /* not in deny list */
43773 + if (i == num) {
43774 + realok = 1;
43775 + effectiveok = 1;
43776 + fsok = 1;
43777 + }
43778 + }
43779 +
43780 + if (realok && effectiveok && fsok)
43781 + return 0;
43782 + else {
43783 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43784 + return 1;
43785 + }
43786 +}
43787 +
43788 +int
43789 +gr_check_group_change(int real, int effective, int fs)
43790 +{
43791 + unsigned int i;
43792 + __u16 num;
43793 + gid_t *gidlist;
43794 + int curgid;
43795 + int realok = 0;
43796 + int effectiveok = 0;
43797 + int fsok = 0;
43798 +
43799 + if (unlikely(!(gr_status & GR_READY)))
43800 + return 0;
43801 +
43802 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43803 + gr_log_learn_id_change('g', real, effective, fs);
43804 +
43805 + num = current->acl->group_trans_num;
43806 + gidlist = current->acl->group_transitions;
43807 +
43808 + if (gidlist == NULL)
43809 + return 0;
43810 +
43811 + if (real == -1)
43812 + realok = 1;
43813 + if (effective == -1)
43814 + effectiveok = 1;
43815 + if (fs == -1)
43816 + fsok = 1;
43817 +
43818 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
43819 + for (i = 0; i < num; i++) {
43820 + curgid = (int)gidlist[i];
43821 + if (real == curgid)
43822 + realok = 1;
43823 + if (effective == curgid)
43824 + effectiveok = 1;
43825 + if (fs == curgid)
43826 + fsok = 1;
43827 + }
43828 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
43829 + for (i = 0; i < num; i++) {
43830 + curgid = (int)gidlist[i];
43831 + if (real == curgid)
43832 + break;
43833 + if (effective == curgid)
43834 + break;
43835 + if (fs == curgid)
43836 + break;
43837 + }
43838 + /* not in deny list */
43839 + if (i == num) {
43840 + realok = 1;
43841 + effectiveok = 1;
43842 + fsok = 1;
43843 + }
43844 + }
43845 +
43846 + if (realok && effectiveok && fsok)
43847 + return 0;
43848 + else {
43849 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43850 + return 1;
43851 + }
43852 +}
43853 +
43854 +void
43855 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43856 +{
43857 + struct acl_role_label *role = task->role;
43858 + struct acl_subject_label *subj = NULL;
43859 + struct acl_object_label *obj;
43860 + struct file *filp;
43861 +
43862 + if (unlikely(!(gr_status & GR_READY)))
43863 + return;
43864 +
43865 + filp = task->exec_file;
43866 +
43867 + /* kernel process, we'll give them the kernel role */
43868 + if (unlikely(!filp)) {
43869 + task->role = kernel_role;
43870 + task->acl = kernel_role->root_label;
43871 + return;
43872 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43873 + role = lookup_acl_role_label(task, uid, gid);
43874 +
43875 + /* perform subject lookup in possibly new role
43876 + we can use this result below in the case where role == task->role
43877 + */
43878 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43879 +
43880 + /* if we changed uid/gid, but result in the same role
43881 + and are using inheritance, don't lose the inherited subject
43882 + if current subject is other than what normal lookup
43883 + would result in, we arrived via inheritance, don't
43884 + lose subject
43885 + */
43886 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43887 + (subj == task->acl)))
43888 + task->acl = subj;
43889 +
43890 + task->role = role;
43891 +
43892 + task->is_writable = 0;
43893 +
43894 + /* ignore additional mmap checks for processes that are writable
43895 + by the default ACL */
43896 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43897 + if (unlikely(obj->mode & GR_WRITE))
43898 + task->is_writable = 1;
43899 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43900 + if (unlikely(obj->mode & GR_WRITE))
43901 + task->is_writable = 1;
43902 +
43903 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43904 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43905 +#endif
43906 +
43907 + gr_set_proc_res(task);
43908 +
43909 + return;
43910 +}
43911 +
43912 +int
43913 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
43914 + const int unsafe_share)
43915 +{
43916 + struct task_struct *task = current;
43917 + struct acl_subject_label *newacl;
43918 + struct acl_object_label *obj;
43919 + __u32 retmode;
43920 +
43921 + if (unlikely(!(gr_status & GR_READY)))
43922 + return 0;
43923 +
43924 + newacl = chk_subj_label(dentry, mnt, task->role);
43925 +
43926 + task_lock(task);
43927 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
43928 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
43929 + !(task->role->roletype & GR_ROLE_GOD) &&
43930 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
43931 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
43932 + task_unlock(task);
43933 + if (unsafe_share)
43934 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
43935 + else
43936 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
43937 + return -EACCES;
43938 + }
43939 + task_unlock(task);
43940 +
43941 + obj = chk_obj_label(dentry, mnt, task->acl);
43942 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
43943 +
43944 + if (!(task->acl->mode & GR_INHERITLEARN) &&
43945 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
43946 + if (obj->nested)
43947 + task->acl = obj->nested;
43948 + else
43949 + task->acl = newacl;
43950 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
43951 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
43952 +
43953 + task->is_writable = 0;
43954 +
43955 + /* ignore additional mmap checks for processes that are writable
43956 + by the default ACL */
43957 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
43958 + if (unlikely(obj->mode & GR_WRITE))
43959 + task->is_writable = 1;
43960 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
43961 + if (unlikely(obj->mode & GR_WRITE))
43962 + task->is_writable = 1;
43963 +
43964 + gr_set_proc_res(task);
43965 +
43966 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43967 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43968 +#endif
43969 + return 0;
43970 +}
43971 +
43972 +/* always called with valid inodev ptr */
43973 +static void
43974 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
43975 +{
43976 + struct acl_object_label *matchpo;
43977 + struct acl_subject_label *matchps;
43978 + struct acl_subject_label *subj;
43979 + struct acl_role_label *role;
43980 + unsigned int x;
43981 +
43982 + FOR_EACH_ROLE_START(role)
43983 + FOR_EACH_SUBJECT_START(role, subj, x)
43984 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
43985 + matchpo->mode |= GR_DELETED;
43986 + FOR_EACH_SUBJECT_END(subj,x)
43987 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
43988 + if (subj->inode == ino && subj->device == dev)
43989 + subj->mode |= GR_DELETED;
43990 + FOR_EACH_NESTED_SUBJECT_END(subj)
43991 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
43992 + matchps->mode |= GR_DELETED;
43993 + FOR_EACH_ROLE_END(role)
43994 +
43995 + inodev->nentry->deleted = 1;
43996 +
43997 + return;
43998 +}
43999 +
44000 +void
44001 +gr_handle_delete(const ino_t ino, const dev_t dev)
44002 +{
44003 + struct inodev_entry *inodev;
44004 +
44005 + if (unlikely(!(gr_status & GR_READY)))
44006 + return;
44007 +
44008 + write_lock(&gr_inode_lock);
44009 + inodev = lookup_inodev_entry(ino, dev);
44010 + if (inodev != NULL)
44011 + do_handle_delete(inodev, ino, dev);
44012 + write_unlock(&gr_inode_lock);
44013 +
44014 + return;
44015 +}
44016 +
44017 +static void
44018 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
44019 + const ino_t newinode, const dev_t newdevice,
44020 + struct acl_subject_label *subj)
44021 +{
44022 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
44023 + struct acl_object_label *match;
44024 +
44025 + match = subj->obj_hash[index];
44026 +
44027 + while (match && (match->inode != oldinode ||
44028 + match->device != olddevice ||
44029 + !(match->mode & GR_DELETED)))
44030 + match = match->next;
44031 +
44032 + if (match && (match->inode == oldinode)
44033 + && (match->device == olddevice)
44034 + && (match->mode & GR_DELETED)) {
44035 + if (match->prev == NULL) {
44036 + subj->obj_hash[index] = match->next;
44037 + if (match->next != NULL)
44038 + match->next->prev = NULL;
44039 + } else {
44040 + match->prev->next = match->next;
44041 + if (match->next != NULL)
44042 + match->next->prev = match->prev;
44043 + }
44044 + match->prev = NULL;
44045 + match->next = NULL;
44046 + match->inode = newinode;
44047 + match->device = newdevice;
44048 + match->mode &= ~GR_DELETED;
44049 +
44050 + insert_acl_obj_label(match, subj);
44051 + }
44052 +
44053 + return;
44054 +}
44055 +
44056 +static void
44057 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
44058 + const ino_t newinode, const dev_t newdevice,
44059 + struct acl_role_label *role)
44060 +{
44061 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
44062 + struct acl_subject_label *match;
44063 +
44064 + match = role->subj_hash[index];
44065 +
44066 + while (match && (match->inode != oldinode ||
44067 + match->device != olddevice ||
44068 + !(match->mode & GR_DELETED)))
44069 + match = match->next;
44070 +
44071 + if (match && (match->inode == oldinode)
44072 + && (match->device == olddevice)
44073 + && (match->mode & GR_DELETED)) {
44074 + if (match->prev == NULL) {
44075 + role->subj_hash[index] = match->next;
44076 + if (match->next != NULL)
44077 + match->next->prev = NULL;
44078 + } else {
44079 + match->prev->next = match->next;
44080 + if (match->next != NULL)
44081 + match->next->prev = match->prev;
44082 + }
44083 + match->prev = NULL;
44084 + match->next = NULL;
44085 + match->inode = newinode;
44086 + match->device = newdevice;
44087 + match->mode &= ~GR_DELETED;
44088 +
44089 + insert_acl_subj_label(match, role);
44090 + }
44091 +
44092 + return;
44093 +}
44094 +
44095 +static void
44096 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44097 + const ino_t newinode, const dev_t newdevice)
44098 +{
44099 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44100 + struct inodev_entry *match;
44101 +
44102 + match = inodev_set.i_hash[index];
44103 +
44104 + while (match && (match->nentry->inode != oldinode ||
44105 + match->nentry->device != olddevice || !match->nentry->deleted))
44106 + match = match->next;
44107 +
44108 + if (match && (match->nentry->inode == oldinode)
44109 + && (match->nentry->device == olddevice) &&
44110 + match->nentry->deleted) {
44111 + if (match->prev == NULL) {
44112 + inodev_set.i_hash[index] = match->next;
44113 + if (match->next != NULL)
44114 + match->next->prev = NULL;
44115 + } else {
44116 + match->prev->next = match->next;
44117 + if (match->next != NULL)
44118 + match->next->prev = match->prev;
44119 + }
44120 + match->prev = NULL;
44121 + match->next = NULL;
44122 + match->nentry->inode = newinode;
44123 + match->nentry->device = newdevice;
44124 + match->nentry->deleted = 0;
44125 +
44126 + insert_inodev_entry(match);
44127 + }
44128 +
44129 + return;
44130 +}
44131 +
44132 +static void
44133 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44134 + const struct vfsmount *mnt)
44135 +{
44136 + struct acl_subject_label *subj;
44137 + struct acl_role_label *role;
44138 + unsigned int x;
44139 + ino_t ino = dentry->d_inode->i_ino;
44140 + dev_t dev = __get_dev(dentry);
44141 +
44142 + FOR_EACH_ROLE_START(role)
44143 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44144 +
44145 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44146 + if ((subj->inode == ino) && (subj->device == dev)) {
44147 + subj->inode = ino;
44148 + subj->device = dev;
44149 + }
44150 + FOR_EACH_NESTED_SUBJECT_END(subj)
44151 + FOR_EACH_SUBJECT_START(role, subj, x)
44152 + update_acl_obj_label(matchn->inode, matchn->device,
44153 + ino, dev, subj);
44154 + FOR_EACH_SUBJECT_END(subj,x)
44155 + FOR_EACH_ROLE_END(role)
44156 +
44157 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44158 +
44159 + return;
44160 +}
44161 +
44162 +void
44163 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44164 +{
44165 + struct name_entry *matchn;
44166 +
44167 + if (unlikely(!(gr_status & GR_READY)))
44168 + return;
44169 +
44170 + preempt_disable();
44171 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44172 +
44173 + if (unlikely((unsigned long)matchn)) {
44174 + write_lock(&gr_inode_lock);
44175 + do_handle_create(matchn, dentry, mnt);
44176 + write_unlock(&gr_inode_lock);
44177 + }
44178 + preempt_enable();
44179 +
44180 + return;
44181 +}
44182 +
44183 +void
44184 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44185 + struct dentry *old_dentry,
44186 + struct dentry *new_dentry,
44187 + struct vfsmount *mnt, const __u8 replace)
44188 +{
44189 + struct name_entry *matchn;
44190 + struct inodev_entry *inodev;
44191 + ino_t old_ino = old_dentry->d_inode->i_ino;
44192 + dev_t old_dev = __get_dev(old_dentry);
44193 +
44194 + /* vfs_rename swaps the name and parent link for old_dentry and
44195 + new_dentry
44196 + at this point, old_dentry has the new name, parent link, and inode
44197 + for the renamed file
44198 + if a file is being replaced by a rename, new_dentry has the inode
44199 + and name for the replaced file
44200 + */
44201 +
44202 + if (unlikely(!(gr_status & GR_READY)))
44203 + return;
44204 +
44205 + preempt_disable();
44206 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44207 +
44208 + /* we wouldn't have to check d_inode if it weren't for
44209 + NFS silly-renaming
44210 + */
44211 +
44212 + write_lock(&gr_inode_lock);
44213 + if (unlikely(replace && new_dentry->d_inode)) {
44214 + ino_t new_ino = new_dentry->d_inode->i_ino;
44215 + dev_t new_dev = __get_dev(new_dentry);
44216 +
44217 + inodev = lookup_inodev_entry(new_ino, new_dev);
44218 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44219 + do_handle_delete(inodev, new_ino, new_dev);
44220 + }
44221 +
44222 + inodev = lookup_inodev_entry(old_ino, old_dev);
44223 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44224 + do_handle_delete(inodev, old_ino, old_dev);
44225 +
44226 + if (unlikely((unsigned long)matchn))
44227 + do_handle_create(matchn, old_dentry, mnt);
44228 +
44229 + write_unlock(&gr_inode_lock);
44230 + preempt_enable();
44231 +
44232 + return;
44233 +}
44234 +
44235 +static int
44236 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44237 + unsigned char **sum)
44238 +{
44239 + struct acl_role_label *r;
44240 + struct role_allowed_ip *ipp;
44241 + struct role_transition *trans;
44242 + unsigned int i;
44243 + int found = 0;
44244 + u32 curr_ip = current->signal->curr_ip;
44245 +
44246 + current->signal->saved_ip = curr_ip;
44247 +
44248 + /* check transition table */
44249 +
44250 + for (trans = current->role->transitions; trans; trans = trans->next) {
44251 + if (!strcmp(rolename, trans->rolename)) {
44252 + found = 1;
44253 + break;
44254 + }
44255 + }
44256 +
44257 + if (!found)
44258 + return 0;
44259 +
44260 + /* handle special roles that do not require authentication
44261 + and check ip */
44262 +
44263 + FOR_EACH_ROLE_START(r)
44264 + if (!strcmp(rolename, r->rolename) &&
44265 + (r->roletype & GR_ROLE_SPECIAL)) {
44266 + found = 0;
44267 + if (r->allowed_ips != NULL) {
44268 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44269 + if ((ntohl(curr_ip) & ipp->netmask) ==
44270 + (ntohl(ipp->addr) & ipp->netmask))
44271 + found = 1;
44272 + }
44273 + } else
44274 + found = 2;
44275 + if (!found)
44276 + return 0;
44277 +
44278 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44279 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44280 + *salt = NULL;
44281 + *sum = NULL;
44282 + return 1;
44283 + }
44284 + }
44285 + FOR_EACH_ROLE_END(r)
44286 +
44287 + for (i = 0; i < num_sprole_pws; i++) {
44288 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44289 + *salt = acl_special_roles[i]->salt;
44290 + *sum = acl_special_roles[i]->sum;
44291 + return 1;
44292 + }
44293 + }
44294 +
44295 + return 0;
44296 +}
44297 +
44298 +static void
44299 +assign_special_role(char *rolename)
44300 +{
44301 + struct acl_object_label *obj;
44302 + struct acl_role_label *r;
44303 + struct acl_role_label *assigned = NULL;
44304 + struct task_struct *tsk;
44305 + struct file *filp;
44306 +
44307 + FOR_EACH_ROLE_START(r)
44308 + if (!strcmp(rolename, r->rolename) &&
44309 + (r->roletype & GR_ROLE_SPECIAL)) {
44310 + assigned = r;
44311 + break;
44312 + }
44313 + FOR_EACH_ROLE_END(r)
44314 +
44315 + if (!assigned)
44316 + return;
44317 +
44318 + read_lock(&tasklist_lock);
44319 + read_lock(&grsec_exec_file_lock);
44320 +
44321 + tsk = current->real_parent;
44322 + if (tsk == NULL)
44323 + goto out_unlock;
44324 +
44325 + filp = tsk->exec_file;
44326 + if (filp == NULL)
44327 + goto out_unlock;
44328 +
44329 + tsk->is_writable = 0;
44330 +
44331 + tsk->acl_sp_role = 1;
44332 + tsk->acl_role_id = ++acl_sp_role_value;
44333 + tsk->role = assigned;
44334 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44335 +
44336 + /* ignore additional mmap checks for processes that are writable
44337 + by the default ACL */
44338 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44339 + if (unlikely(obj->mode & GR_WRITE))
44340 + tsk->is_writable = 1;
44341 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44342 + if (unlikely(obj->mode & GR_WRITE))
44343 + tsk->is_writable = 1;
44344 +
44345 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44346 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44347 +#endif
44348 +
44349 +out_unlock:
44350 + read_unlock(&grsec_exec_file_lock);
44351 + read_unlock(&tasklist_lock);
44352 + return;
44353 +}
44354 +
44355 +int gr_check_secure_terminal(struct task_struct *task)
44356 +{
44357 + struct task_struct *p, *p2, *p3;
44358 + struct files_struct *files;
44359 + struct fdtable *fdt;
44360 + struct file *our_file = NULL, *file;
44361 + int i;
44362 +
44363 + if (task->signal->tty == NULL)
44364 + return 1;
44365 +
44366 + files = get_files_struct(task);
44367 + if (files != NULL) {
44368 + rcu_read_lock();
44369 + fdt = files_fdtable(files);
44370 + for (i=0; i < fdt->max_fds; i++) {
44371 + file = fcheck_files(files, i);
44372 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44373 + get_file(file);
44374 + our_file = file;
44375 + }
44376 + }
44377 + rcu_read_unlock();
44378 + put_files_struct(files);
44379 + }
44380 +
44381 + if (our_file == NULL)
44382 + return 1;
44383 +
44384 + read_lock(&tasklist_lock);
44385 + do_each_thread(p2, p) {
44386 + files = get_files_struct(p);
44387 + if (files == NULL ||
44388 + (p->signal && p->signal->tty == task->signal->tty)) {
44389 + if (files != NULL)
44390 + put_files_struct(files);
44391 + continue;
44392 + }
44393 + rcu_read_lock();
44394 + fdt = files_fdtable(files);
44395 + for (i=0; i < fdt->max_fds; i++) {
44396 + file = fcheck_files(files, i);
44397 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44398 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44399 + p3 = task;
44400 + while (p3->pid > 0) {
44401 + if (p3 == p)
44402 + break;
44403 + p3 = p3->real_parent;
44404 + }
44405 + if (p3 == p)
44406 + break;
44407 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44408 + gr_handle_alertkill(p);
44409 + rcu_read_unlock();
44410 + put_files_struct(files);
44411 + read_unlock(&tasklist_lock);
44412 + fput(our_file);
44413 + return 0;
44414 + }
44415 + }
44416 + rcu_read_unlock();
44417 + put_files_struct(files);
44418 + } while_each_thread(p2, p);
44419 + read_unlock(&tasklist_lock);
44420 +
44421 + fput(our_file);
44422 + return 1;
44423 +}
44424 +
44425 +ssize_t
44426 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44427 +{
44428 + struct gr_arg_wrapper uwrap;
44429 + unsigned char *sprole_salt = NULL;
44430 + unsigned char *sprole_sum = NULL;
44431 + int error = sizeof (struct gr_arg_wrapper);
44432 + int error2 = 0;
44433 +
44434 + mutex_lock(&gr_dev_mutex);
44435 +
44436 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44437 + error = -EPERM;
44438 + goto out;
44439 + }
44440 +
44441 + if (count != sizeof (struct gr_arg_wrapper)) {
44442 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44443 + error = -EINVAL;
44444 + goto out;
44445 + }
44446 +
44447 +
44448 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44449 + gr_auth_expires = 0;
44450 + gr_auth_attempts = 0;
44451 + }
44452 +
44453 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44454 + error = -EFAULT;
44455 + goto out;
44456 + }
44457 +
44458 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44459 + error = -EINVAL;
44460 + goto out;
44461 + }
44462 +
44463 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44464 + error = -EFAULT;
44465 + goto out;
44466 + }
44467 +
44468 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44469 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44470 + time_after(gr_auth_expires, get_seconds())) {
44471 + error = -EBUSY;
44472 + goto out;
44473 + }
44474 +
44475 + /* if non-root trying to do anything other than use a special role,
44476 + do not attempt authentication, do not count towards authentication
44477 + locking
44478 + */
44479 +
44480 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44481 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44482 + current_uid()) {
44483 + error = -EPERM;
44484 + goto out;
44485 + }
44486 +
44487 + /* ensure pw and special role name are null terminated */
44488 +
44489 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44490 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44491 +
44492 + /* Okay.
44493 + * We have our enough of the argument structure..(we have yet
44494 + * to copy_from_user the tables themselves) . Copy the tables
44495 + * only if we need them, i.e. for loading operations. */
44496 +
44497 + switch (gr_usermode->mode) {
44498 + case GR_STATUS:
44499 + if (gr_status & GR_READY) {
44500 + error = 1;
44501 + if (!gr_check_secure_terminal(current))
44502 + error = 3;
44503 + } else
44504 + error = 2;
44505 + goto out;
44506 + case GR_SHUTDOWN:
44507 + if ((gr_status & GR_READY)
44508 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44509 + pax_open_kernel();
44510 + gr_status &= ~GR_READY;
44511 + pax_close_kernel();
44512 +
44513 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44514 + free_variables();
44515 + memset(gr_usermode, 0, sizeof (struct gr_arg));
44516 + memset(gr_system_salt, 0, GR_SALT_LEN);
44517 + memset(gr_system_sum, 0, GR_SHA_LEN);
44518 + } else if (gr_status & GR_READY) {
44519 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44520 + error = -EPERM;
44521 + } else {
44522 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44523 + error = -EAGAIN;
44524 + }
44525 + break;
44526 + case GR_ENABLE:
44527 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44528 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44529 + else {
44530 + if (gr_status & GR_READY)
44531 + error = -EAGAIN;
44532 + else
44533 + error = error2;
44534 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44535 + }
44536 + break;
44537 + case GR_RELOAD:
44538 + if (!(gr_status & GR_READY)) {
44539 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44540 + error = -EAGAIN;
44541 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44542 + preempt_disable();
44543 +
44544 + pax_open_kernel();
44545 + gr_status &= ~GR_READY;
44546 + pax_close_kernel();
44547 +
44548 + free_variables();
44549 + if (!(error2 = gracl_init(gr_usermode))) {
44550 + preempt_enable();
44551 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44552 + } else {
44553 + preempt_enable();
44554 + error = error2;
44555 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44556 + }
44557 + } else {
44558 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44559 + error = -EPERM;
44560 + }
44561 + break;
44562 + case GR_SEGVMOD:
44563 + if (unlikely(!(gr_status & GR_READY))) {
44564 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44565 + error = -EAGAIN;
44566 + break;
44567 + }
44568 +
44569 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44570 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44571 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44572 + struct acl_subject_label *segvacl;
44573 + segvacl =
44574 + lookup_acl_subj_label(gr_usermode->segv_inode,
44575 + gr_usermode->segv_device,
44576 + current->role);
44577 + if (segvacl) {
44578 + segvacl->crashes = 0;
44579 + segvacl->expires = 0;
44580 + }
44581 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44582 + gr_remove_uid(gr_usermode->segv_uid);
44583 + }
44584 + } else {
44585 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44586 + error = -EPERM;
44587 + }
44588 + break;
44589 + case GR_SPROLE:
44590 + case GR_SPROLEPAM:
44591 + if (unlikely(!(gr_status & GR_READY))) {
44592 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44593 + error = -EAGAIN;
44594 + break;
44595 + }
44596 +
44597 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44598 + current->role->expires = 0;
44599 + current->role->auth_attempts = 0;
44600 + }
44601 +
44602 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44603 + time_after(current->role->expires, get_seconds())) {
44604 + error = -EBUSY;
44605 + goto out;
44606 + }
44607 +
44608 + if (lookup_special_role_auth
44609 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44610 + && ((!sprole_salt && !sprole_sum)
44611 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44612 + char *p = "";
44613 + assign_special_role(gr_usermode->sp_role);
44614 + read_lock(&tasklist_lock);
44615 + if (current->real_parent)
44616 + p = current->real_parent->role->rolename;
44617 + read_unlock(&tasklist_lock);
44618 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44619 + p, acl_sp_role_value);
44620 + } else {
44621 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44622 + error = -EPERM;
44623 + if(!(current->role->auth_attempts++))
44624 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44625 +
44626 + goto out;
44627 + }
44628 + break;
44629 + case GR_UNSPROLE:
44630 + if (unlikely(!(gr_status & GR_READY))) {
44631 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44632 + error = -EAGAIN;
44633 + break;
44634 + }
44635 +
44636 + if (current->role->roletype & GR_ROLE_SPECIAL) {
44637 + char *p = "";
44638 + int i = 0;
44639 +
44640 + read_lock(&tasklist_lock);
44641 + if (current->real_parent) {
44642 + p = current->real_parent->role->rolename;
44643 + i = current->real_parent->acl_role_id;
44644 + }
44645 + read_unlock(&tasklist_lock);
44646 +
44647 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44648 + gr_set_acls(1);
44649 + } else {
44650 + error = -EPERM;
44651 + goto out;
44652 + }
44653 + break;
44654 + default:
44655 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44656 + error = -EINVAL;
44657 + break;
44658 + }
44659 +
44660 + if (error != -EPERM)
44661 + goto out;
44662 +
44663 + if(!(gr_auth_attempts++))
44664 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44665 +
44666 + out:
44667 + mutex_unlock(&gr_dev_mutex);
44668 + return error;
44669 +}
44670 +
44671 +/* must be called with
44672 + rcu_read_lock();
44673 + read_lock(&tasklist_lock);
44674 + read_lock(&grsec_exec_file_lock);
44675 +*/
44676 +int gr_apply_subject_to_task(struct task_struct *task)
44677 +{
44678 + struct acl_object_label *obj;
44679 + char *tmpname;
44680 + struct acl_subject_label *tmpsubj;
44681 + struct file *filp;
44682 + struct name_entry *nmatch;
44683 +
44684 + filp = task->exec_file;
44685 + if (filp == NULL)
44686 + return 0;
44687 +
44688 + /* the following is to apply the correct subject
44689 + on binaries running when the RBAC system
44690 + is enabled, when the binaries have been
44691 + replaced or deleted since their execution
44692 + -----
44693 + when the RBAC system starts, the inode/dev
44694 + from exec_file will be one the RBAC system
44695 + is unaware of. It only knows the inode/dev
44696 + of the present file on disk, or the absence
44697 + of it.
44698 + */
44699 + preempt_disable();
44700 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44701 +
44702 + nmatch = lookup_name_entry(tmpname);
44703 + preempt_enable();
44704 + tmpsubj = NULL;
44705 + if (nmatch) {
44706 + if (nmatch->deleted)
44707 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44708 + else
44709 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44710 + if (tmpsubj != NULL)
44711 + task->acl = tmpsubj;
44712 + }
44713 + if (tmpsubj == NULL)
44714 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44715 + task->role);
44716 + if (task->acl) {
44717 + task->is_writable = 0;
44718 + /* ignore additional mmap checks for processes that are writable
44719 + by the default ACL */
44720 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44721 + if (unlikely(obj->mode & GR_WRITE))
44722 + task->is_writable = 1;
44723 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44724 + if (unlikely(obj->mode & GR_WRITE))
44725 + task->is_writable = 1;
44726 +
44727 + gr_set_proc_res(task);
44728 +
44729 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44730 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44731 +#endif
44732 + } else {
44733 + return 1;
44734 + }
44735 +
44736 + return 0;
44737 +}
44738 +
44739 +int
44740 +gr_set_acls(const int type)
44741 +{
44742 + struct task_struct *task, *task2;
44743 + struct acl_role_label *role = current->role;
44744 + __u16 acl_role_id = current->acl_role_id;
44745 + const struct cred *cred;
44746 + int ret;
44747 +
44748 + rcu_read_lock();
44749 + read_lock(&tasklist_lock);
44750 + read_lock(&grsec_exec_file_lock);
44751 + do_each_thread(task2, task) {
44752 + /* check to see if we're called from the exit handler,
44753 + if so, only replace ACLs that have inherited the admin
44754 + ACL */
44755 +
44756 + if (type && (task->role != role ||
44757 + task->acl_role_id != acl_role_id))
44758 + continue;
44759 +
44760 + task->acl_role_id = 0;
44761 + task->acl_sp_role = 0;
44762 +
44763 + if (task->exec_file) {
44764 + cred = __task_cred(task);
44765 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44766 + ret = gr_apply_subject_to_task(task);
44767 + if (ret) {
44768 + read_unlock(&grsec_exec_file_lock);
44769 + read_unlock(&tasklist_lock);
44770 + rcu_read_unlock();
44771 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44772 + return ret;
44773 + }
44774 + } else {
44775 + // it's a kernel process
44776 + task->role = kernel_role;
44777 + task->acl = kernel_role->root_label;
44778 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44779 + task->acl->mode &= ~GR_PROCFIND;
44780 +#endif
44781 + }
44782 + } while_each_thread(task2, task);
44783 + read_unlock(&grsec_exec_file_lock);
44784 + read_unlock(&tasklist_lock);
44785 + rcu_read_unlock();
44786 +
44787 + return 0;
44788 +}
44789 +
44790 +void
44791 +gr_learn_resource(const struct task_struct *task,
44792 + const int res, const unsigned long wanted, const int gt)
44793 +{
44794 + struct acl_subject_label *acl;
44795 + const struct cred *cred;
44796 +
44797 + if (unlikely((gr_status & GR_READY) &&
44798 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44799 + goto skip_reslog;
44800 +
44801 +#ifdef CONFIG_GRKERNSEC_RESLOG
44802 + gr_log_resource(task, res, wanted, gt);
44803 +#endif
44804 + skip_reslog:
44805 +
44806 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44807 + return;
44808 +
44809 + acl = task->acl;
44810 +
44811 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44812 + !(acl->resmask & (1 << (unsigned short) res))))
44813 + return;
44814 +
44815 + if (wanted >= acl->res[res].rlim_cur) {
44816 + unsigned long res_add;
44817 +
44818 + res_add = wanted;
44819 + switch (res) {
44820 + case RLIMIT_CPU:
44821 + res_add += GR_RLIM_CPU_BUMP;
44822 + break;
44823 + case RLIMIT_FSIZE:
44824 + res_add += GR_RLIM_FSIZE_BUMP;
44825 + break;
44826 + case RLIMIT_DATA:
44827 + res_add += GR_RLIM_DATA_BUMP;
44828 + break;
44829 + case RLIMIT_STACK:
44830 + res_add += GR_RLIM_STACK_BUMP;
44831 + break;
44832 + case RLIMIT_CORE:
44833 + res_add += GR_RLIM_CORE_BUMP;
44834 + break;
44835 + case RLIMIT_RSS:
44836 + res_add += GR_RLIM_RSS_BUMP;
44837 + break;
44838 + case RLIMIT_NPROC:
44839 + res_add += GR_RLIM_NPROC_BUMP;
44840 + break;
44841 + case RLIMIT_NOFILE:
44842 + res_add += GR_RLIM_NOFILE_BUMP;
44843 + break;
44844 + case RLIMIT_MEMLOCK:
44845 + res_add += GR_RLIM_MEMLOCK_BUMP;
44846 + break;
44847 + case RLIMIT_AS:
44848 + res_add += GR_RLIM_AS_BUMP;
44849 + break;
44850 + case RLIMIT_LOCKS:
44851 + res_add += GR_RLIM_LOCKS_BUMP;
44852 + break;
44853 + case RLIMIT_SIGPENDING:
44854 + res_add += GR_RLIM_SIGPENDING_BUMP;
44855 + break;
44856 + case RLIMIT_MSGQUEUE:
44857 + res_add += GR_RLIM_MSGQUEUE_BUMP;
44858 + break;
44859 + case RLIMIT_NICE:
44860 + res_add += GR_RLIM_NICE_BUMP;
44861 + break;
44862 + case RLIMIT_RTPRIO:
44863 + res_add += GR_RLIM_RTPRIO_BUMP;
44864 + break;
44865 + case RLIMIT_RTTIME:
44866 + res_add += GR_RLIM_RTTIME_BUMP;
44867 + break;
44868 + }
44869 +
44870 + acl->res[res].rlim_cur = res_add;
44871 +
44872 + if (wanted > acl->res[res].rlim_max)
44873 + acl->res[res].rlim_max = res_add;
44874 +
44875 + /* only log the subject filename, since resource logging is supported for
44876 + single-subject learning only */
44877 + rcu_read_lock();
44878 + cred = __task_cred(task);
44879 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44880 + task->role->roletype, cred->uid, cred->gid, acl->filename,
44881 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44882 + "", (unsigned long) res, &task->signal->saved_ip);
44883 + rcu_read_unlock();
44884 + }
44885 +
44886 + return;
44887 +}
44888 +
44889 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44890 +void
44891 +pax_set_initial_flags(struct linux_binprm *bprm)
44892 +{
44893 + struct task_struct *task = current;
44894 + struct acl_subject_label *proc;
44895 + unsigned long flags;
44896 +
44897 + if (unlikely(!(gr_status & GR_READY)))
44898 + return;
44899 +
44900 + flags = pax_get_flags(task);
44901 +
44902 + proc = task->acl;
44903 +
44904 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
44905 + flags &= ~MF_PAX_PAGEEXEC;
44906 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
44907 + flags &= ~MF_PAX_SEGMEXEC;
44908 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
44909 + flags &= ~MF_PAX_RANDMMAP;
44910 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
44911 + flags &= ~MF_PAX_EMUTRAMP;
44912 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
44913 + flags &= ~MF_PAX_MPROTECT;
44914 +
44915 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
44916 + flags |= MF_PAX_PAGEEXEC;
44917 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
44918 + flags |= MF_PAX_SEGMEXEC;
44919 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
44920 + flags |= MF_PAX_RANDMMAP;
44921 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
44922 + flags |= MF_PAX_EMUTRAMP;
44923 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
44924 + flags |= MF_PAX_MPROTECT;
44925 +
44926 + pax_set_flags(task, flags);
44927 +
44928 + return;
44929 +}
44930 +#endif
44931 +
44932 +#ifdef CONFIG_SYSCTL
44933 +/* Eric Biederman likes breaking userland ABI and every inode-based security
44934 + system to save 35kb of memory */
44935 +
44936 +/* we modify the passed in filename, but adjust it back before returning */
44937 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
44938 +{
44939 + struct name_entry *nmatch;
44940 + char *p, *lastp = NULL;
44941 + struct acl_object_label *obj = NULL, *tmp;
44942 + struct acl_subject_label *tmpsubj;
44943 + char c = '\0';
44944 +
44945 + read_lock(&gr_inode_lock);
44946 +
44947 + p = name + len - 1;
44948 + do {
44949 + nmatch = lookup_name_entry(name);
44950 + if (lastp != NULL)
44951 + *lastp = c;
44952 +
44953 + if (nmatch == NULL)
44954 + goto next_component;
44955 + tmpsubj = current->acl;
44956 + do {
44957 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
44958 + if (obj != NULL) {
44959 + tmp = obj->globbed;
44960 + while (tmp) {
44961 + if (!glob_match(tmp->filename, name)) {
44962 + obj = tmp;
44963 + goto found_obj;
44964 + }
44965 + tmp = tmp->next;
44966 + }
44967 + goto found_obj;
44968 + }
44969 + } while ((tmpsubj = tmpsubj->parent_subject));
44970 +next_component:
44971 + /* end case */
44972 + if (p == name)
44973 + break;
44974 +
44975 + while (*p != '/')
44976 + p--;
44977 + if (p == name)
44978 + lastp = p + 1;
44979 + else {
44980 + lastp = p;
44981 + p--;
44982 + }
44983 + c = *lastp;
44984 + *lastp = '\0';
44985 + } while (1);
44986 +found_obj:
44987 + read_unlock(&gr_inode_lock);
44988 + /* obj returned will always be non-null */
44989 + return obj;
44990 +}
44991 +
44992 +/* returns 0 when allowing, non-zero on error
44993 + op of 0 is used for readdir, so we don't log the names of hidden files
44994 +*/
44995 +__u32
44996 +gr_handle_sysctl(const struct ctl_table *table, const int op)
44997 +{
44998 + struct ctl_table *tmp;
44999 + const char *proc_sys = "/proc/sys";
45000 + char *path;
45001 + struct acl_object_label *obj;
45002 + unsigned short len = 0, pos = 0, depth = 0, i;
45003 + __u32 err = 0;
45004 + __u32 mode = 0;
45005 +
45006 + if (unlikely(!(gr_status & GR_READY)))
45007 + return 0;
45008 +
45009 + /* for now, ignore operations on non-sysctl entries if it's not a
45010 + readdir*/
45011 + if (table->child != NULL && op != 0)
45012 + return 0;
45013 +
45014 + mode |= GR_FIND;
45015 + /* it's only a read if it's an entry, read on dirs is for readdir */
45016 + if (op & MAY_READ)
45017 + mode |= GR_READ;
45018 + if (op & MAY_WRITE)
45019 + mode |= GR_WRITE;
45020 +
45021 + preempt_disable();
45022 +
45023 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
45024 +
45025 + /* it's only a read/write if it's an actual entry, not a dir
45026 + (which are opened for readdir)
45027 + */
45028 +
45029 + /* convert the requested sysctl entry into a pathname */
45030 +
45031 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45032 + len += strlen(tmp->procname);
45033 + len++;
45034 + depth++;
45035 + }
45036 +
45037 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
45038 + /* deny */
45039 + goto out;
45040 + }
45041 +
45042 + memset(path, 0, PAGE_SIZE);
45043 +
45044 + memcpy(path, proc_sys, strlen(proc_sys));
45045 +
45046 + pos += strlen(proc_sys);
45047 +
45048 + for (; depth > 0; depth--) {
45049 + path[pos] = '/';
45050 + pos++;
45051 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45052 + if (depth == i) {
45053 + memcpy(path + pos, tmp->procname,
45054 + strlen(tmp->procname));
45055 + pos += strlen(tmp->procname);
45056 + }
45057 + i++;
45058 + }
45059 + }
45060 +
45061 + obj = gr_lookup_by_name(path, pos);
45062 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
45063 +
45064 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
45065 + ((err & mode) != mode))) {
45066 + __u32 new_mode = mode;
45067 +
45068 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45069 +
45070 + err = 0;
45071 + gr_log_learn_sysctl(path, new_mode);
45072 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45073 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45074 + err = -ENOENT;
45075 + } else if (!(err & GR_FIND)) {
45076 + err = -ENOENT;
45077 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45078 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45079 + path, (mode & GR_READ) ? " reading" : "",
45080 + (mode & GR_WRITE) ? " writing" : "");
45081 + err = -EACCES;
45082 + } else if ((err & mode) != mode) {
45083 + err = -EACCES;
45084 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45085 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45086 + path, (mode & GR_READ) ? " reading" : "",
45087 + (mode & GR_WRITE) ? " writing" : "");
45088 + err = 0;
45089 + } else
45090 + err = 0;
45091 +
45092 + out:
45093 + preempt_enable();
45094 +
45095 + return err;
45096 +}
45097 +#endif
45098 +
45099 +int
45100 +gr_handle_proc_ptrace(struct task_struct *task)
45101 +{
45102 + struct file *filp;
45103 + struct task_struct *tmp = task;
45104 + struct task_struct *curtemp = current;
45105 + __u32 retmode;
45106 +
45107 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45108 + if (unlikely(!(gr_status & GR_READY)))
45109 + return 0;
45110 +#endif
45111 +
45112 + read_lock(&tasklist_lock);
45113 + read_lock(&grsec_exec_file_lock);
45114 + filp = task->exec_file;
45115 +
45116 + while (tmp->pid > 0) {
45117 + if (tmp == curtemp)
45118 + break;
45119 + tmp = tmp->real_parent;
45120 + }
45121 +
45122 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45123 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45124 + read_unlock(&grsec_exec_file_lock);
45125 + read_unlock(&tasklist_lock);
45126 + return 1;
45127 + }
45128 +
45129 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45130 + if (!(gr_status & GR_READY)) {
45131 + read_unlock(&grsec_exec_file_lock);
45132 + read_unlock(&tasklist_lock);
45133 + return 0;
45134 + }
45135 +#endif
45136 +
45137 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45138 + read_unlock(&grsec_exec_file_lock);
45139 + read_unlock(&tasklist_lock);
45140 +
45141 + if (retmode & GR_NOPTRACE)
45142 + return 1;
45143 +
45144 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45145 + && (current->acl != task->acl || (current->acl != current->role->root_label
45146 + && current->pid != task->pid)))
45147 + return 1;
45148 +
45149 + return 0;
45150 +}
45151 +
45152 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45153 +{
45154 + if (unlikely(!(gr_status & GR_READY)))
45155 + return;
45156 +
45157 + if (!(current->role->roletype & GR_ROLE_GOD))
45158 + return;
45159 +
45160 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45161 + p->role->rolename, gr_task_roletype_to_char(p),
45162 + p->acl->filename);
45163 +}
45164 +
45165 +int
45166 +gr_handle_ptrace(struct task_struct *task, const long request)
45167 +{
45168 + struct task_struct *tmp = task;
45169 + struct task_struct *curtemp = current;
45170 + __u32 retmode;
45171 +
45172 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45173 + if (unlikely(!(gr_status & GR_READY)))
45174 + return 0;
45175 +#endif
45176 +
45177 + read_lock(&tasklist_lock);
45178 + while (tmp->pid > 0) {
45179 + if (tmp == curtemp)
45180 + break;
45181 + tmp = tmp->real_parent;
45182 + }
45183 +
45184 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45185 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45186 + read_unlock(&tasklist_lock);
45187 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45188 + return 1;
45189 + }
45190 + read_unlock(&tasklist_lock);
45191 +
45192 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45193 + if (!(gr_status & GR_READY))
45194 + return 0;
45195 +#endif
45196 +
45197 + read_lock(&grsec_exec_file_lock);
45198 + if (unlikely(!task->exec_file)) {
45199 + read_unlock(&grsec_exec_file_lock);
45200 + return 0;
45201 + }
45202 +
45203 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45204 + read_unlock(&grsec_exec_file_lock);
45205 +
45206 + if (retmode & GR_NOPTRACE) {
45207 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45208 + return 1;
45209 + }
45210 +
45211 + if (retmode & GR_PTRACERD) {
45212 + switch (request) {
45213 + case PTRACE_POKETEXT:
45214 + case PTRACE_POKEDATA:
45215 + case PTRACE_POKEUSR:
45216 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45217 + case PTRACE_SETREGS:
45218 + case PTRACE_SETFPREGS:
45219 +#endif
45220 +#ifdef CONFIG_X86
45221 + case PTRACE_SETFPXREGS:
45222 +#endif
45223 +#ifdef CONFIG_ALTIVEC
45224 + case PTRACE_SETVRREGS:
45225 +#endif
45226 + return 1;
45227 + default:
45228 + return 0;
45229 + }
45230 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
45231 + !(current->role->roletype & GR_ROLE_GOD) &&
45232 + (current->acl != task->acl)) {
45233 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45234 + return 1;
45235 + }
45236 +
45237 + return 0;
45238 +}
45239 +
45240 +static int is_writable_mmap(const struct file *filp)
45241 +{
45242 + struct task_struct *task = current;
45243 + struct acl_object_label *obj, *obj2;
45244 +
45245 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45246 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45247 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45248 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45249 + task->role->root_label);
45250 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45251 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45252 + return 1;
45253 + }
45254 + }
45255 + return 0;
45256 +}
45257 +
45258 +int
45259 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45260 +{
45261 + __u32 mode;
45262 +
45263 + if (unlikely(!file || !(prot & PROT_EXEC)))
45264 + return 1;
45265 +
45266 + if (is_writable_mmap(file))
45267 + return 0;
45268 +
45269 + mode =
45270 + gr_search_file(file->f_path.dentry,
45271 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45272 + file->f_path.mnt);
45273 +
45274 + if (!gr_tpe_allow(file))
45275 + return 0;
45276 +
45277 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45278 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45279 + return 0;
45280 + } else if (unlikely(!(mode & GR_EXEC))) {
45281 + return 0;
45282 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45283 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45284 + return 1;
45285 + }
45286 +
45287 + return 1;
45288 +}
45289 +
45290 +int
45291 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45292 +{
45293 + __u32 mode;
45294 +
45295 + if (unlikely(!file || !(prot & PROT_EXEC)))
45296 + return 1;
45297 +
45298 + if (is_writable_mmap(file))
45299 + return 0;
45300 +
45301 + mode =
45302 + gr_search_file(file->f_path.dentry,
45303 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45304 + file->f_path.mnt);
45305 +
45306 + if (!gr_tpe_allow(file))
45307 + return 0;
45308 +
45309 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45310 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45311 + return 0;
45312 + } else if (unlikely(!(mode & GR_EXEC))) {
45313 + return 0;
45314 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45315 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45316 + return 1;
45317 + }
45318 +
45319 + return 1;
45320 +}
45321 +
45322 +void
45323 +gr_acl_handle_psacct(struct task_struct *task, const long code)
45324 +{
45325 + unsigned long runtime;
45326 + unsigned long cputime;
45327 + unsigned int wday, cday;
45328 + __u8 whr, chr;
45329 + __u8 wmin, cmin;
45330 + __u8 wsec, csec;
45331 + struct timespec timeval;
45332 +
45333 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45334 + !(task->acl->mode & GR_PROCACCT)))
45335 + return;
45336 +
45337 + do_posix_clock_monotonic_gettime(&timeval);
45338 + runtime = timeval.tv_sec - task->start_time.tv_sec;
45339 + wday = runtime / (3600 * 24);
45340 + runtime -= wday * (3600 * 24);
45341 + whr = runtime / 3600;
45342 + runtime -= whr * 3600;
45343 + wmin = runtime / 60;
45344 + runtime -= wmin * 60;
45345 + wsec = runtime;
45346 +
45347 + cputime = (task->utime + task->stime) / HZ;
45348 + cday = cputime / (3600 * 24);
45349 + cputime -= cday * (3600 * 24);
45350 + chr = cputime / 3600;
45351 + cputime -= chr * 3600;
45352 + cmin = cputime / 60;
45353 + cputime -= cmin * 60;
45354 + csec = cputime;
45355 +
45356 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45357 +
45358 + return;
45359 +}
45360 +
45361 +void gr_set_kernel_label(struct task_struct *task)
45362 +{
45363 + if (gr_status & GR_READY) {
45364 + task->role = kernel_role;
45365 + task->acl = kernel_role->root_label;
45366 + }
45367 + return;
45368 +}
45369 +
45370 +#ifdef CONFIG_TASKSTATS
45371 +int gr_is_taskstats_denied(int pid)
45372 +{
45373 + struct task_struct *task;
45374 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45375 + const struct cred *cred;
45376 +#endif
45377 + int ret = 0;
45378 +
45379 + /* restrict taskstats viewing to un-chrooted root users
45380 + who have the 'view' subject flag if the RBAC system is enabled
45381 + */
45382 +
45383 + rcu_read_lock();
45384 + read_lock(&tasklist_lock);
45385 + task = find_task_by_vpid(pid);
45386 + if (task) {
45387 +#ifdef CONFIG_GRKERNSEC_CHROOT
45388 + if (proc_is_chrooted(task))
45389 + ret = -EACCES;
45390 +#endif
45391 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45392 + cred = __task_cred(task);
45393 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45394 + if (cred->uid != 0)
45395 + ret = -EACCES;
45396 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45397 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45398 + ret = -EACCES;
45399 +#endif
45400 +#endif
45401 + if (gr_status & GR_READY) {
45402 + if (!(task->acl->mode & GR_VIEW))
45403 + ret = -EACCES;
45404 + }
45405 + } else
45406 + ret = -ENOENT;
45407 +
45408 + read_unlock(&tasklist_lock);
45409 + rcu_read_unlock();
45410 +
45411 + return ret;
45412 +}
45413 +#endif
45414 +
45415 +/* AUXV entries are filled via a descendant of search_binary_handler
45416 + after we've already applied the subject for the target
45417 +*/
45418 +int gr_acl_enable_at_secure(void)
45419 +{
45420 + if (unlikely(!(gr_status & GR_READY)))
45421 + return 0;
45422 +
45423 + if (current->acl->mode & GR_ATSECURE)
45424 + return 1;
45425 +
45426 + return 0;
45427 +}
45428 +
45429 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45430 +{
45431 + struct task_struct *task = current;
45432 + struct dentry *dentry = file->f_path.dentry;
45433 + struct vfsmount *mnt = file->f_path.mnt;
45434 + struct acl_object_label *obj, *tmp;
45435 + struct acl_subject_label *subj;
45436 + unsigned int bufsize;
45437 + int is_not_root;
45438 + char *path;
45439 + dev_t dev = __get_dev(dentry);
45440 +
45441 + if (unlikely(!(gr_status & GR_READY)))
45442 + return 1;
45443 +
45444 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45445 + return 1;
45446 +
45447 + /* ignore Eric Biederman */
45448 + if (IS_PRIVATE(dentry->d_inode))
45449 + return 1;
45450 +
45451 + subj = task->acl;
45452 + do {
45453 + obj = lookup_acl_obj_label(ino, dev, subj);
45454 + if (obj != NULL)
45455 + return (obj->mode & GR_FIND) ? 1 : 0;
45456 + } while ((subj = subj->parent_subject));
45457 +
45458 + /* this is purely an optimization since we're looking for an object
45459 + for the directory we're doing a readdir on
45460 + if it's possible for any globbed object to match the entry we're
45461 + filling into the directory, then the object we find here will be
45462 + an anchor point with attached globbed objects
45463 + */
45464 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45465 + if (obj->globbed == NULL)
45466 + return (obj->mode & GR_FIND) ? 1 : 0;
45467 +
45468 + is_not_root = ((obj->filename[0] == '/') &&
45469 + (obj->filename[1] == '\0')) ? 0 : 1;
45470 + bufsize = PAGE_SIZE - namelen - is_not_root;
45471 +
45472 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
45473 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45474 + return 1;
45475 +
45476 + preempt_disable();
45477 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45478 + bufsize);
45479 +
45480 + bufsize = strlen(path);
45481 +
45482 + /* if base is "/", don't append an additional slash */
45483 + if (is_not_root)
45484 + *(path + bufsize) = '/';
45485 + memcpy(path + bufsize + is_not_root, name, namelen);
45486 + *(path + bufsize + namelen + is_not_root) = '\0';
45487 +
45488 + tmp = obj->globbed;
45489 + while (tmp) {
45490 + if (!glob_match(tmp->filename, path)) {
45491 + preempt_enable();
45492 + return (tmp->mode & GR_FIND) ? 1 : 0;
45493 + }
45494 + tmp = tmp->next;
45495 + }
45496 + preempt_enable();
45497 + return (obj->mode & GR_FIND) ? 1 : 0;
45498 +}
45499 +
45500 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45501 +EXPORT_SYMBOL(gr_acl_is_enabled);
45502 +#endif
45503 +EXPORT_SYMBOL(gr_learn_resource);
45504 +EXPORT_SYMBOL(gr_set_kernel_label);
45505 +#ifdef CONFIG_SECURITY
45506 +EXPORT_SYMBOL(gr_check_user_change);
45507 +EXPORT_SYMBOL(gr_check_group_change);
45508 +#endif
45509 +
45510 diff -urNp linux-3.0.3/grsecurity/gracl_cap.c linux-3.0.3/grsecurity/gracl_cap.c
45511 --- linux-3.0.3/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45512 +++ linux-3.0.3/grsecurity/gracl_cap.c 2011-08-23 21:48:14.000000000 -0400
45513 @@ -0,0 +1,139 @@
45514 +#include <linux/kernel.h>
45515 +#include <linux/module.h>
45516 +#include <linux/sched.h>
45517 +#include <linux/gracl.h>
45518 +#include <linux/grsecurity.h>
45519 +#include <linux/grinternal.h>
45520 +
45521 +static const char *captab_log[] = {
45522 + "CAP_CHOWN",
45523 + "CAP_DAC_OVERRIDE",
45524 + "CAP_DAC_READ_SEARCH",
45525 + "CAP_FOWNER",
45526 + "CAP_FSETID",
45527 + "CAP_KILL",
45528 + "CAP_SETGID",
45529 + "CAP_SETUID",
45530 + "CAP_SETPCAP",
45531 + "CAP_LINUX_IMMUTABLE",
45532 + "CAP_NET_BIND_SERVICE",
45533 + "CAP_NET_BROADCAST",
45534 + "CAP_NET_ADMIN",
45535 + "CAP_NET_RAW",
45536 + "CAP_IPC_LOCK",
45537 + "CAP_IPC_OWNER",
45538 + "CAP_SYS_MODULE",
45539 + "CAP_SYS_RAWIO",
45540 + "CAP_SYS_CHROOT",
45541 + "CAP_SYS_PTRACE",
45542 + "CAP_SYS_PACCT",
45543 + "CAP_SYS_ADMIN",
45544 + "CAP_SYS_BOOT",
45545 + "CAP_SYS_NICE",
45546 + "CAP_SYS_RESOURCE",
45547 + "CAP_SYS_TIME",
45548 + "CAP_SYS_TTY_CONFIG",
45549 + "CAP_MKNOD",
45550 + "CAP_LEASE",
45551 + "CAP_AUDIT_WRITE",
45552 + "CAP_AUDIT_CONTROL",
45553 + "CAP_SETFCAP",
45554 + "CAP_MAC_OVERRIDE",
45555 + "CAP_MAC_ADMIN",
45556 + "CAP_SYSLOG"
45557 +};
45558 +
45559 +EXPORT_SYMBOL(gr_is_capable);
45560 +EXPORT_SYMBOL(gr_is_capable_nolog);
45561 +
45562 +int
45563 +gr_is_capable(const int cap)
45564 +{
45565 + struct task_struct *task = current;
45566 + const struct cred *cred = current_cred();
45567 + struct acl_subject_label *curracl;
45568 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45569 + kernel_cap_t cap_audit = __cap_empty_set;
45570 +
45571 + if (!gr_acl_is_enabled())
45572 + return 1;
45573 +
45574 + curracl = task->acl;
45575 +
45576 + cap_drop = curracl->cap_lower;
45577 + cap_mask = curracl->cap_mask;
45578 + cap_audit = curracl->cap_invert_audit;
45579 +
45580 + while ((curracl = curracl->parent_subject)) {
45581 + /* if the cap isn't specified in the current computed mask but is specified in the
45582 + current level subject, and is lowered in the current level subject, then add
45583 + it to the set of dropped capabilities
45584 + otherwise, add the current level subject's mask to the current computed mask
45585 + */
45586 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45587 + cap_raise(cap_mask, cap);
45588 + if (cap_raised(curracl->cap_lower, cap))
45589 + cap_raise(cap_drop, cap);
45590 + if (cap_raised(curracl->cap_invert_audit, cap))
45591 + cap_raise(cap_audit, cap);
45592 + }
45593 + }
45594 +
45595 + if (!cap_raised(cap_drop, cap)) {
45596 + if (cap_raised(cap_audit, cap))
45597 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45598 + return 1;
45599 + }
45600 +
45601 + curracl = task->acl;
45602 +
45603 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45604 + && cap_raised(cred->cap_effective, cap)) {
45605 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45606 + task->role->roletype, cred->uid,
45607 + cred->gid, task->exec_file ?
45608 + gr_to_filename(task->exec_file->f_path.dentry,
45609 + task->exec_file->f_path.mnt) : curracl->filename,
45610 + curracl->filename, 0UL,
45611 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45612 + return 1;
45613 + }
45614 +
45615 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45616 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45617 + return 0;
45618 +}
45619 +
45620 +int
45621 +gr_is_capable_nolog(const int cap)
45622 +{
45623 + struct acl_subject_label *curracl;
45624 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45625 +
45626 + if (!gr_acl_is_enabled())
45627 + return 1;
45628 +
45629 + curracl = current->acl;
45630 +
45631 + cap_drop = curracl->cap_lower;
45632 + cap_mask = curracl->cap_mask;
45633 +
45634 + while ((curracl = curracl->parent_subject)) {
45635 + /* if the cap isn't specified in the current computed mask but is specified in the
45636 + current level subject, and is lowered in the current level subject, then add
45637 + it to the set of dropped capabilities
45638 + otherwise, add the current level subject's mask to the current computed mask
45639 + */
45640 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45641 + cap_raise(cap_mask, cap);
45642 + if (cap_raised(curracl->cap_lower, cap))
45643 + cap_raise(cap_drop, cap);
45644 + }
45645 + }
45646 +
45647 + if (!cap_raised(cap_drop, cap))
45648 + return 1;
45649 +
45650 + return 0;
45651 +}
45652 +
45653 diff -urNp linux-3.0.3/grsecurity/gracl_fs.c linux-3.0.3/grsecurity/gracl_fs.c
45654 --- linux-3.0.3/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45655 +++ linux-3.0.3/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
45656 @@ -0,0 +1,431 @@
45657 +#include <linux/kernel.h>
45658 +#include <linux/sched.h>
45659 +#include <linux/types.h>
45660 +#include <linux/fs.h>
45661 +#include <linux/file.h>
45662 +#include <linux/stat.h>
45663 +#include <linux/grsecurity.h>
45664 +#include <linux/grinternal.h>
45665 +#include <linux/gracl.h>
45666 +
45667 +__u32
45668 +gr_acl_handle_hidden_file(const struct dentry * dentry,
45669 + const struct vfsmount * mnt)
45670 +{
45671 + __u32 mode;
45672 +
45673 + if (unlikely(!dentry->d_inode))
45674 + return GR_FIND;
45675 +
45676 + mode =
45677 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45678 +
45679 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45680 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45681 + return mode;
45682 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45683 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45684 + return 0;
45685 + } else if (unlikely(!(mode & GR_FIND)))
45686 + return 0;
45687 +
45688 + return GR_FIND;
45689 +}
45690 +
45691 +__u32
45692 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45693 + const int fmode)
45694 +{
45695 + __u32 reqmode = GR_FIND;
45696 + __u32 mode;
45697 +
45698 + if (unlikely(!dentry->d_inode))
45699 + return reqmode;
45700 +
45701 + if (unlikely(fmode & O_APPEND))
45702 + reqmode |= GR_APPEND;
45703 + else if (unlikely(fmode & FMODE_WRITE))
45704 + reqmode |= GR_WRITE;
45705 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45706 + reqmode |= GR_READ;
45707 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45708 + reqmode &= ~GR_READ;
45709 + mode =
45710 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45711 + mnt);
45712 +
45713 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45714 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45715 + reqmode & GR_READ ? " reading" : "",
45716 + reqmode & GR_WRITE ? " writing" : reqmode &
45717 + GR_APPEND ? " appending" : "");
45718 + return reqmode;
45719 + } else
45720 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45721 + {
45722 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45723 + reqmode & GR_READ ? " reading" : "",
45724 + reqmode & GR_WRITE ? " writing" : reqmode &
45725 + GR_APPEND ? " appending" : "");
45726 + return 0;
45727 + } else if (unlikely((mode & reqmode) != reqmode))
45728 + return 0;
45729 +
45730 + return reqmode;
45731 +}
45732 +
45733 +__u32
45734 +gr_acl_handle_creat(const struct dentry * dentry,
45735 + const struct dentry * p_dentry,
45736 + const struct vfsmount * p_mnt, const int fmode,
45737 + const int imode)
45738 +{
45739 + __u32 reqmode = GR_WRITE | GR_CREATE;
45740 + __u32 mode;
45741 +
45742 + if (unlikely(fmode & O_APPEND))
45743 + reqmode |= GR_APPEND;
45744 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45745 + reqmode |= GR_READ;
45746 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45747 + reqmode |= GR_SETID;
45748 +
45749 + mode =
45750 + gr_check_create(dentry, p_dentry, p_mnt,
45751 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45752 +
45753 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45754 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45755 + reqmode & GR_READ ? " reading" : "",
45756 + reqmode & GR_WRITE ? " writing" : reqmode &
45757 + GR_APPEND ? " appending" : "");
45758 + return reqmode;
45759 + } else
45760 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45761 + {
45762 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45763 + reqmode & GR_READ ? " reading" : "",
45764 + reqmode & GR_WRITE ? " writing" : reqmode &
45765 + GR_APPEND ? " appending" : "");
45766 + return 0;
45767 + } else if (unlikely((mode & reqmode) != reqmode))
45768 + return 0;
45769 +
45770 + return reqmode;
45771 +}
45772 +
45773 +__u32
45774 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45775 + const int fmode)
45776 +{
45777 + __u32 mode, reqmode = GR_FIND;
45778 +
45779 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45780 + reqmode |= GR_EXEC;
45781 + if (fmode & S_IWOTH)
45782 + reqmode |= GR_WRITE;
45783 + if (fmode & S_IROTH)
45784 + reqmode |= GR_READ;
45785 +
45786 + mode =
45787 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45788 + mnt);
45789 +
45790 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45791 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45792 + reqmode & GR_READ ? " reading" : "",
45793 + reqmode & GR_WRITE ? " writing" : "",
45794 + reqmode & GR_EXEC ? " executing" : "");
45795 + return reqmode;
45796 + } else
45797 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45798 + {
45799 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45800 + reqmode & GR_READ ? " reading" : "",
45801 + reqmode & GR_WRITE ? " writing" : "",
45802 + reqmode & GR_EXEC ? " executing" : "");
45803 + return 0;
45804 + } else if (unlikely((mode & reqmode) != reqmode))
45805 + return 0;
45806 +
45807 + return reqmode;
45808 +}
45809 +
45810 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45811 +{
45812 + __u32 mode;
45813 +
45814 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45815 +
45816 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45817 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45818 + return mode;
45819 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45820 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45821 + return 0;
45822 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45823 + return 0;
45824 +
45825 + return (reqmode);
45826 +}
45827 +
45828 +__u32
45829 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45830 +{
45831 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45832 +}
45833 +
45834 +__u32
45835 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45836 +{
45837 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45838 +}
45839 +
45840 +__u32
45841 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45842 +{
45843 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45844 +}
45845 +
45846 +__u32
45847 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45848 +{
45849 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45850 +}
45851 +
45852 +__u32
45853 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45854 + mode_t mode)
45855 +{
45856 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45857 + return 1;
45858 +
45859 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45860 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45861 + GR_FCHMOD_ACL_MSG);
45862 + } else {
45863 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45864 + }
45865 +}
45866 +
45867 +__u32
45868 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45869 + mode_t mode)
45870 +{
45871 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45872 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45873 + GR_CHMOD_ACL_MSG);
45874 + } else {
45875 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45876 + }
45877 +}
45878 +
45879 +__u32
45880 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45881 +{
45882 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45883 +}
45884 +
45885 +__u32
45886 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45887 +{
45888 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45889 +}
45890 +
45891 +__u32
45892 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45893 +{
45894 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45895 +}
45896 +
45897 +__u32
45898 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45899 +{
45900 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45901 + GR_UNIXCONNECT_ACL_MSG);
45902 +}
45903 +
45904 +/* hardlinks require at minimum create permission,
45905 + any additional privilege required is based on the
45906 + privilege of the file being linked to
45907 +*/
45908 +__u32
45909 +gr_acl_handle_link(const struct dentry * new_dentry,
45910 + const struct dentry * parent_dentry,
45911 + const struct vfsmount * parent_mnt,
45912 + const struct dentry * old_dentry,
45913 + const struct vfsmount * old_mnt, const char *to)
45914 +{
45915 + __u32 mode;
45916 + __u32 needmode = GR_CREATE | GR_LINK;
45917 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
45918 +
45919 + mode =
45920 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
45921 + old_mnt);
45922 +
45923 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
45924 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45925 + return mode;
45926 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45927 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45928 + return 0;
45929 + } else if (unlikely((mode & needmode) != needmode))
45930 + return 0;
45931 +
45932 + return 1;
45933 +}
45934 +
45935 +__u32
45936 +gr_acl_handle_symlink(const struct dentry * new_dentry,
45937 + const struct dentry * parent_dentry,
45938 + const struct vfsmount * parent_mnt, const char *from)
45939 +{
45940 + __u32 needmode = GR_WRITE | GR_CREATE;
45941 + __u32 mode;
45942 +
45943 + mode =
45944 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45945 + GR_CREATE | GR_AUDIT_CREATE |
45946 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
45947 +
45948 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
45949 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45950 + return mode;
45951 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45952 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45953 + return 0;
45954 + } else if (unlikely((mode & needmode) != needmode))
45955 + return 0;
45956 +
45957 + return (GR_WRITE | GR_CREATE);
45958 +}
45959 +
45960 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
45961 +{
45962 + __u32 mode;
45963 +
45964 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45965 +
45966 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45967 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
45968 + return mode;
45969 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45970 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
45971 + return 0;
45972 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45973 + return 0;
45974 +
45975 + return (reqmode);
45976 +}
45977 +
45978 +__u32
45979 +gr_acl_handle_mknod(const struct dentry * new_dentry,
45980 + const struct dentry * parent_dentry,
45981 + const struct vfsmount * parent_mnt,
45982 + const int mode)
45983 +{
45984 + __u32 reqmode = GR_WRITE | GR_CREATE;
45985 + if (unlikely(mode & (S_ISUID | S_ISGID)))
45986 + reqmode |= GR_SETID;
45987 +
45988 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45989 + reqmode, GR_MKNOD_ACL_MSG);
45990 +}
45991 +
45992 +__u32
45993 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
45994 + const struct dentry *parent_dentry,
45995 + const struct vfsmount *parent_mnt)
45996 +{
45997 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45998 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
45999 +}
46000 +
46001 +#define RENAME_CHECK_SUCCESS(old, new) \
46002 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
46003 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
46004 +
46005 +int
46006 +gr_acl_handle_rename(struct dentry *new_dentry,
46007 + struct dentry *parent_dentry,
46008 + const struct vfsmount *parent_mnt,
46009 + struct dentry *old_dentry,
46010 + struct inode *old_parent_inode,
46011 + struct vfsmount *old_mnt, const char *newname)
46012 +{
46013 + __u32 comp1, comp2;
46014 + int error = 0;
46015 +
46016 + if (unlikely(!gr_acl_is_enabled()))
46017 + return 0;
46018 +
46019 + if (!new_dentry->d_inode) {
46020 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
46021 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
46022 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
46023 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
46024 + GR_DELETE | GR_AUDIT_DELETE |
46025 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46026 + GR_SUPPRESS, old_mnt);
46027 + } else {
46028 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
46029 + GR_CREATE | GR_DELETE |
46030 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
46031 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46032 + GR_SUPPRESS, parent_mnt);
46033 + comp2 =
46034 + gr_search_file(old_dentry,
46035 + GR_READ | GR_WRITE | GR_AUDIT_READ |
46036 + GR_DELETE | GR_AUDIT_DELETE |
46037 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
46038 + }
46039 +
46040 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
46041 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
46042 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46043 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
46044 + && !(comp2 & GR_SUPPRESS)) {
46045 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46046 + error = -EACCES;
46047 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
46048 + error = -EACCES;
46049 +
46050 + return error;
46051 +}
46052 +
46053 +void
46054 +gr_acl_handle_exit(void)
46055 +{
46056 + u16 id;
46057 + char *rolename;
46058 + struct file *exec_file;
46059 +
46060 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
46061 + !(current->role->roletype & GR_ROLE_PERSIST))) {
46062 + id = current->acl_role_id;
46063 + rolename = current->role->rolename;
46064 + gr_set_acls(1);
46065 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
46066 + }
46067 +
46068 + write_lock(&grsec_exec_file_lock);
46069 + exec_file = current->exec_file;
46070 + current->exec_file = NULL;
46071 + write_unlock(&grsec_exec_file_lock);
46072 +
46073 + if (exec_file)
46074 + fput(exec_file);
46075 +}
46076 +
46077 +int
46078 +gr_acl_handle_procpidmem(const struct task_struct *task)
46079 +{
46080 + if (unlikely(!gr_acl_is_enabled()))
46081 + return 0;
46082 +
46083 + if (task != current && task->acl->mode & GR_PROTPROCFD)
46084 + return -EACCES;
46085 +
46086 + return 0;
46087 +}
46088 diff -urNp linux-3.0.3/grsecurity/gracl_ip.c linux-3.0.3/grsecurity/gracl_ip.c
46089 --- linux-3.0.3/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46090 +++ linux-3.0.3/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
46091 @@ -0,0 +1,381 @@
46092 +#include <linux/kernel.h>
46093 +#include <asm/uaccess.h>
46094 +#include <asm/errno.h>
46095 +#include <net/sock.h>
46096 +#include <linux/file.h>
46097 +#include <linux/fs.h>
46098 +#include <linux/net.h>
46099 +#include <linux/in.h>
46100 +#include <linux/skbuff.h>
46101 +#include <linux/ip.h>
46102 +#include <linux/udp.h>
46103 +#include <linux/types.h>
46104 +#include <linux/sched.h>
46105 +#include <linux/netdevice.h>
46106 +#include <linux/inetdevice.h>
46107 +#include <linux/gracl.h>
46108 +#include <linux/grsecurity.h>
46109 +#include <linux/grinternal.h>
46110 +
46111 +#define GR_BIND 0x01
46112 +#define GR_CONNECT 0x02
46113 +#define GR_INVERT 0x04
46114 +#define GR_BINDOVERRIDE 0x08
46115 +#define GR_CONNECTOVERRIDE 0x10
46116 +#define GR_SOCK_FAMILY 0x20
46117 +
46118 +static const char * gr_protocols[IPPROTO_MAX] = {
46119 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46120 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46121 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46122 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46123 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46124 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46125 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46126 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46127 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46128 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46129 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46130 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46131 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46132 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46133 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46134 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46135 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46136 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46137 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46138 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46139 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46140 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46141 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46142 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46143 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46144 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46145 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46146 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46147 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46148 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46149 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46150 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46151 + };
46152 +
46153 +static const char * gr_socktypes[SOCK_MAX] = {
46154 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46155 + "unknown:7", "unknown:8", "unknown:9", "packet"
46156 + };
46157 +
46158 +static const char * gr_sockfamilies[AF_MAX+1] = {
46159 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46160 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46161 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46162 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46163 + };
46164 +
46165 +const char *
46166 +gr_proto_to_name(unsigned char proto)
46167 +{
46168 + return gr_protocols[proto];
46169 +}
46170 +
46171 +const char *
46172 +gr_socktype_to_name(unsigned char type)
46173 +{
46174 + return gr_socktypes[type];
46175 +}
46176 +
46177 +const char *
46178 +gr_sockfamily_to_name(unsigned char family)
46179 +{
46180 + return gr_sockfamilies[family];
46181 +}
46182 +
46183 +int
46184 +gr_search_socket(const int domain, const int type, const int protocol)
46185 +{
46186 + struct acl_subject_label *curr;
46187 + const struct cred *cred = current_cred();
46188 +
46189 + if (unlikely(!gr_acl_is_enabled()))
46190 + goto exit;
46191 +
46192 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
46193 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46194 + goto exit; // let the kernel handle it
46195 +
46196 + curr = current->acl;
46197 +
46198 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46199 + /* the family is allowed, if this is PF_INET allow it only if
46200 + the extra sock type/protocol checks pass */
46201 + if (domain == PF_INET)
46202 + goto inet_check;
46203 + goto exit;
46204 + } else {
46205 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46206 + __u32 fakeip = 0;
46207 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46208 + current->role->roletype, cred->uid,
46209 + cred->gid, current->exec_file ?
46210 + gr_to_filename(current->exec_file->f_path.dentry,
46211 + current->exec_file->f_path.mnt) :
46212 + curr->filename, curr->filename,
46213 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46214 + &current->signal->saved_ip);
46215 + goto exit;
46216 + }
46217 + goto exit_fail;
46218 + }
46219 +
46220 +inet_check:
46221 + /* the rest of this checking is for IPv4 only */
46222 + if (!curr->ips)
46223 + goto exit;
46224 +
46225 + if ((curr->ip_type & (1 << type)) &&
46226 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46227 + goto exit;
46228 +
46229 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46230 + /* we don't place acls on raw sockets , and sometimes
46231 + dgram/ip sockets are opened for ioctl and not
46232 + bind/connect, so we'll fake a bind learn log */
46233 + if (type == SOCK_RAW || type == SOCK_PACKET) {
46234 + __u32 fakeip = 0;
46235 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46236 + current->role->roletype, cred->uid,
46237 + cred->gid, current->exec_file ?
46238 + gr_to_filename(current->exec_file->f_path.dentry,
46239 + current->exec_file->f_path.mnt) :
46240 + curr->filename, curr->filename,
46241 + &fakeip, 0, type,
46242 + protocol, GR_CONNECT, &current->signal->saved_ip);
46243 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46244 + __u32 fakeip = 0;
46245 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46246 + current->role->roletype, cred->uid,
46247 + cred->gid, current->exec_file ?
46248 + gr_to_filename(current->exec_file->f_path.dentry,
46249 + current->exec_file->f_path.mnt) :
46250 + curr->filename, curr->filename,
46251 + &fakeip, 0, type,
46252 + protocol, GR_BIND, &current->signal->saved_ip);
46253 + }
46254 + /* we'll log when they use connect or bind */
46255 + goto exit;
46256 + }
46257 +
46258 +exit_fail:
46259 + if (domain == PF_INET)
46260 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46261 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
46262 + else
46263 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46264 + gr_socktype_to_name(type), protocol);
46265 +
46266 + return 0;
46267 +exit:
46268 + return 1;
46269 +}
46270 +
46271 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46272 +{
46273 + if ((ip->mode & mode) &&
46274 + (ip_port >= ip->low) &&
46275 + (ip_port <= ip->high) &&
46276 + ((ntohl(ip_addr) & our_netmask) ==
46277 + (ntohl(our_addr) & our_netmask))
46278 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46279 + && (ip->type & (1 << type))) {
46280 + if (ip->mode & GR_INVERT)
46281 + return 2; // specifically denied
46282 + else
46283 + return 1; // allowed
46284 + }
46285 +
46286 + return 0; // not specifically allowed, may continue parsing
46287 +}
46288 +
46289 +static int
46290 +gr_search_connectbind(const int full_mode, struct sock *sk,
46291 + struct sockaddr_in *addr, const int type)
46292 +{
46293 + char iface[IFNAMSIZ] = {0};
46294 + struct acl_subject_label *curr;
46295 + struct acl_ip_label *ip;
46296 + struct inet_sock *isk;
46297 + struct net_device *dev;
46298 + struct in_device *idev;
46299 + unsigned long i;
46300 + int ret;
46301 + int mode = full_mode & (GR_BIND | GR_CONNECT);
46302 + __u32 ip_addr = 0;
46303 + __u32 our_addr;
46304 + __u32 our_netmask;
46305 + char *p;
46306 + __u16 ip_port = 0;
46307 + const struct cred *cred = current_cred();
46308 +
46309 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46310 + return 0;
46311 +
46312 + curr = current->acl;
46313 + isk = inet_sk(sk);
46314 +
46315 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46316 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46317 + addr->sin_addr.s_addr = curr->inaddr_any_override;
46318 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46319 + struct sockaddr_in saddr;
46320 + int err;
46321 +
46322 + saddr.sin_family = AF_INET;
46323 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
46324 + saddr.sin_port = isk->inet_sport;
46325 +
46326 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46327 + if (err)
46328 + return err;
46329 +
46330 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46331 + if (err)
46332 + return err;
46333 + }
46334 +
46335 + if (!curr->ips)
46336 + return 0;
46337 +
46338 + ip_addr = addr->sin_addr.s_addr;
46339 + ip_port = ntohs(addr->sin_port);
46340 +
46341 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46342 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46343 + current->role->roletype, cred->uid,
46344 + cred->gid, current->exec_file ?
46345 + gr_to_filename(current->exec_file->f_path.dentry,
46346 + current->exec_file->f_path.mnt) :
46347 + curr->filename, curr->filename,
46348 + &ip_addr, ip_port, type,
46349 + sk->sk_protocol, mode, &current->signal->saved_ip);
46350 + return 0;
46351 + }
46352 +
46353 + for (i = 0; i < curr->ip_num; i++) {
46354 + ip = *(curr->ips + i);
46355 + if (ip->iface != NULL) {
46356 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
46357 + p = strchr(iface, ':');
46358 + if (p != NULL)
46359 + *p = '\0';
46360 + dev = dev_get_by_name(sock_net(sk), iface);
46361 + if (dev == NULL)
46362 + continue;
46363 + idev = in_dev_get(dev);
46364 + if (idev == NULL) {
46365 + dev_put(dev);
46366 + continue;
46367 + }
46368 + rcu_read_lock();
46369 + for_ifa(idev) {
46370 + if (!strcmp(ip->iface, ifa->ifa_label)) {
46371 + our_addr = ifa->ifa_address;
46372 + our_netmask = 0xffffffff;
46373 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46374 + if (ret == 1) {
46375 + rcu_read_unlock();
46376 + in_dev_put(idev);
46377 + dev_put(dev);
46378 + return 0;
46379 + } else if (ret == 2) {
46380 + rcu_read_unlock();
46381 + in_dev_put(idev);
46382 + dev_put(dev);
46383 + goto denied;
46384 + }
46385 + }
46386 + } endfor_ifa(idev);
46387 + rcu_read_unlock();
46388 + in_dev_put(idev);
46389 + dev_put(dev);
46390 + } else {
46391 + our_addr = ip->addr;
46392 + our_netmask = ip->netmask;
46393 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46394 + if (ret == 1)
46395 + return 0;
46396 + else if (ret == 2)
46397 + goto denied;
46398 + }
46399 + }
46400 +
46401 +denied:
46402 + if (mode == GR_BIND)
46403 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46404 + else if (mode == GR_CONNECT)
46405 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46406 +
46407 + return -EACCES;
46408 +}
46409 +
46410 +int
46411 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46412 +{
46413 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46414 +}
46415 +
46416 +int
46417 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46418 +{
46419 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46420 +}
46421 +
46422 +int gr_search_listen(struct socket *sock)
46423 +{
46424 + struct sock *sk = sock->sk;
46425 + struct sockaddr_in addr;
46426 +
46427 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46428 + addr.sin_port = inet_sk(sk)->inet_sport;
46429 +
46430 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46431 +}
46432 +
46433 +int gr_search_accept(struct socket *sock)
46434 +{
46435 + struct sock *sk = sock->sk;
46436 + struct sockaddr_in addr;
46437 +
46438 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46439 + addr.sin_port = inet_sk(sk)->inet_sport;
46440 +
46441 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46442 +}
46443 +
46444 +int
46445 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46446 +{
46447 + if (addr)
46448 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46449 + else {
46450 + struct sockaddr_in sin;
46451 + const struct inet_sock *inet = inet_sk(sk);
46452 +
46453 + sin.sin_addr.s_addr = inet->inet_daddr;
46454 + sin.sin_port = inet->inet_dport;
46455 +
46456 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46457 + }
46458 +}
46459 +
46460 +int
46461 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46462 +{
46463 + struct sockaddr_in sin;
46464 +
46465 + if (unlikely(skb->len < sizeof (struct udphdr)))
46466 + return 0; // skip this packet
46467 +
46468 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46469 + sin.sin_port = udp_hdr(skb)->source;
46470 +
46471 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46472 +}
46473 diff -urNp linux-3.0.3/grsecurity/gracl_learn.c linux-3.0.3/grsecurity/gracl_learn.c
46474 --- linux-3.0.3/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46475 +++ linux-3.0.3/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
46476 @@ -0,0 +1,207 @@
46477 +#include <linux/kernel.h>
46478 +#include <linux/mm.h>
46479 +#include <linux/sched.h>
46480 +#include <linux/poll.h>
46481 +#include <linux/string.h>
46482 +#include <linux/file.h>
46483 +#include <linux/types.h>
46484 +#include <linux/vmalloc.h>
46485 +#include <linux/grinternal.h>
46486 +
46487 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46488 + size_t count, loff_t *ppos);
46489 +extern int gr_acl_is_enabled(void);
46490 +
46491 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46492 +static int gr_learn_attached;
46493 +
46494 +/* use a 512k buffer */
46495 +#define LEARN_BUFFER_SIZE (512 * 1024)
46496 +
46497 +static DEFINE_SPINLOCK(gr_learn_lock);
46498 +static DEFINE_MUTEX(gr_learn_user_mutex);
46499 +
46500 +/* we need to maintain two buffers, so that the kernel context of grlearn
46501 + uses a semaphore around the userspace copying, and the other kernel contexts
46502 + use a spinlock when copying into the buffer, since they cannot sleep
46503 +*/
46504 +static char *learn_buffer;
46505 +static char *learn_buffer_user;
46506 +static int learn_buffer_len;
46507 +static int learn_buffer_user_len;
46508 +
46509 +static ssize_t
46510 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46511 +{
46512 + DECLARE_WAITQUEUE(wait, current);
46513 + ssize_t retval = 0;
46514 +
46515 + add_wait_queue(&learn_wait, &wait);
46516 + set_current_state(TASK_INTERRUPTIBLE);
46517 + do {
46518 + mutex_lock(&gr_learn_user_mutex);
46519 + spin_lock(&gr_learn_lock);
46520 + if (learn_buffer_len)
46521 + break;
46522 + spin_unlock(&gr_learn_lock);
46523 + mutex_unlock(&gr_learn_user_mutex);
46524 + if (file->f_flags & O_NONBLOCK) {
46525 + retval = -EAGAIN;
46526 + goto out;
46527 + }
46528 + if (signal_pending(current)) {
46529 + retval = -ERESTARTSYS;
46530 + goto out;
46531 + }
46532 +
46533 + schedule();
46534 + } while (1);
46535 +
46536 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46537 + learn_buffer_user_len = learn_buffer_len;
46538 + retval = learn_buffer_len;
46539 + learn_buffer_len = 0;
46540 +
46541 + spin_unlock(&gr_learn_lock);
46542 +
46543 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46544 + retval = -EFAULT;
46545 +
46546 + mutex_unlock(&gr_learn_user_mutex);
46547 +out:
46548 + set_current_state(TASK_RUNNING);
46549 + remove_wait_queue(&learn_wait, &wait);
46550 + return retval;
46551 +}
46552 +
46553 +static unsigned int
46554 +poll_learn(struct file * file, poll_table * wait)
46555 +{
46556 + poll_wait(file, &learn_wait, wait);
46557 +
46558 + if (learn_buffer_len)
46559 + return (POLLIN | POLLRDNORM);
46560 +
46561 + return 0;
46562 +}
46563 +
46564 +void
46565 +gr_clear_learn_entries(void)
46566 +{
46567 + char *tmp;
46568 +
46569 + mutex_lock(&gr_learn_user_mutex);
46570 + spin_lock(&gr_learn_lock);
46571 + tmp = learn_buffer;
46572 + learn_buffer = NULL;
46573 + spin_unlock(&gr_learn_lock);
46574 + if (tmp)
46575 + vfree(tmp);
46576 + if (learn_buffer_user != NULL) {
46577 + vfree(learn_buffer_user);
46578 + learn_buffer_user = NULL;
46579 + }
46580 + learn_buffer_len = 0;
46581 + mutex_unlock(&gr_learn_user_mutex);
46582 +
46583 + return;
46584 +}
46585 +
46586 +void
46587 +gr_add_learn_entry(const char *fmt, ...)
46588 +{
46589 + va_list args;
46590 + unsigned int len;
46591 +
46592 + if (!gr_learn_attached)
46593 + return;
46594 +
46595 + spin_lock(&gr_learn_lock);
46596 +
46597 + /* leave a gap at the end so we know when it's "full" but don't have to
46598 + compute the exact length of the string we're trying to append
46599 + */
46600 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46601 + spin_unlock(&gr_learn_lock);
46602 + wake_up_interruptible(&learn_wait);
46603 + return;
46604 + }
46605 + if (learn_buffer == NULL) {
46606 + spin_unlock(&gr_learn_lock);
46607 + return;
46608 + }
46609 +
46610 + va_start(args, fmt);
46611 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46612 + va_end(args);
46613 +
46614 + learn_buffer_len += len + 1;
46615 +
46616 + spin_unlock(&gr_learn_lock);
46617 + wake_up_interruptible(&learn_wait);
46618 +
46619 + return;
46620 +}
46621 +
46622 +static int
46623 +open_learn(struct inode *inode, struct file *file)
46624 +{
46625 + if (file->f_mode & FMODE_READ && gr_learn_attached)
46626 + return -EBUSY;
46627 + if (file->f_mode & FMODE_READ) {
46628 + int retval = 0;
46629 + mutex_lock(&gr_learn_user_mutex);
46630 + if (learn_buffer == NULL)
46631 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46632 + if (learn_buffer_user == NULL)
46633 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46634 + if (learn_buffer == NULL) {
46635 + retval = -ENOMEM;
46636 + goto out_error;
46637 + }
46638 + if (learn_buffer_user == NULL) {
46639 + retval = -ENOMEM;
46640 + goto out_error;
46641 + }
46642 + learn_buffer_len = 0;
46643 + learn_buffer_user_len = 0;
46644 + gr_learn_attached = 1;
46645 +out_error:
46646 + mutex_unlock(&gr_learn_user_mutex);
46647 + return retval;
46648 + }
46649 + return 0;
46650 +}
46651 +
46652 +static int
46653 +close_learn(struct inode *inode, struct file *file)
46654 +{
46655 + if (file->f_mode & FMODE_READ) {
46656 + char *tmp = NULL;
46657 + mutex_lock(&gr_learn_user_mutex);
46658 + spin_lock(&gr_learn_lock);
46659 + tmp = learn_buffer;
46660 + learn_buffer = NULL;
46661 + spin_unlock(&gr_learn_lock);
46662 + if (tmp)
46663 + vfree(tmp);
46664 + if (learn_buffer_user != NULL) {
46665 + vfree(learn_buffer_user);
46666 + learn_buffer_user = NULL;
46667 + }
46668 + learn_buffer_len = 0;
46669 + learn_buffer_user_len = 0;
46670 + gr_learn_attached = 0;
46671 + mutex_unlock(&gr_learn_user_mutex);
46672 + }
46673 +
46674 + return 0;
46675 +}
46676 +
46677 +const struct file_operations grsec_fops = {
46678 + .read = read_learn,
46679 + .write = write_grsec_handler,
46680 + .open = open_learn,
46681 + .release = close_learn,
46682 + .poll = poll_learn,
46683 +};
46684 diff -urNp linux-3.0.3/grsecurity/gracl_res.c linux-3.0.3/grsecurity/gracl_res.c
46685 --- linux-3.0.3/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46686 +++ linux-3.0.3/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
46687 @@ -0,0 +1,68 @@
46688 +#include <linux/kernel.h>
46689 +#include <linux/sched.h>
46690 +#include <linux/gracl.h>
46691 +#include <linux/grinternal.h>
46692 +
46693 +static const char *restab_log[] = {
46694 + [RLIMIT_CPU] = "RLIMIT_CPU",
46695 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46696 + [RLIMIT_DATA] = "RLIMIT_DATA",
46697 + [RLIMIT_STACK] = "RLIMIT_STACK",
46698 + [RLIMIT_CORE] = "RLIMIT_CORE",
46699 + [RLIMIT_RSS] = "RLIMIT_RSS",
46700 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
46701 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46702 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46703 + [RLIMIT_AS] = "RLIMIT_AS",
46704 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46705 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46706 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46707 + [RLIMIT_NICE] = "RLIMIT_NICE",
46708 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46709 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46710 + [GR_CRASH_RES] = "RLIMIT_CRASH"
46711 +};
46712 +
46713 +void
46714 +gr_log_resource(const struct task_struct *task,
46715 + const int res, const unsigned long wanted, const int gt)
46716 +{
46717 + const struct cred *cred;
46718 + unsigned long rlim;
46719 +
46720 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
46721 + return;
46722 +
46723 + // not yet supported resource
46724 + if (unlikely(!restab_log[res]))
46725 + return;
46726 +
46727 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46728 + rlim = task_rlimit_max(task, res);
46729 + else
46730 + rlim = task_rlimit(task, res);
46731 +
46732 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46733 + return;
46734 +
46735 + rcu_read_lock();
46736 + cred = __task_cred(task);
46737 +
46738 + if (res == RLIMIT_NPROC &&
46739 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46740 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46741 + goto out_rcu_unlock;
46742 + else if (res == RLIMIT_MEMLOCK &&
46743 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46744 + goto out_rcu_unlock;
46745 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46746 + goto out_rcu_unlock;
46747 + rcu_read_unlock();
46748 +
46749 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46750 +
46751 + return;
46752 +out_rcu_unlock:
46753 + rcu_read_unlock();
46754 + return;
46755 +}
46756 diff -urNp linux-3.0.3/grsecurity/gracl_segv.c linux-3.0.3/grsecurity/gracl_segv.c
46757 --- linux-3.0.3/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46758 +++ linux-3.0.3/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
46759 @@ -0,0 +1,299 @@
46760 +#include <linux/kernel.h>
46761 +#include <linux/mm.h>
46762 +#include <asm/uaccess.h>
46763 +#include <asm/errno.h>
46764 +#include <asm/mman.h>
46765 +#include <net/sock.h>
46766 +#include <linux/file.h>
46767 +#include <linux/fs.h>
46768 +#include <linux/net.h>
46769 +#include <linux/in.h>
46770 +#include <linux/slab.h>
46771 +#include <linux/types.h>
46772 +#include <linux/sched.h>
46773 +#include <linux/timer.h>
46774 +#include <linux/gracl.h>
46775 +#include <linux/grsecurity.h>
46776 +#include <linux/grinternal.h>
46777 +
46778 +static struct crash_uid *uid_set;
46779 +static unsigned short uid_used;
46780 +static DEFINE_SPINLOCK(gr_uid_lock);
46781 +extern rwlock_t gr_inode_lock;
46782 +extern struct acl_subject_label *
46783 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46784 + struct acl_role_label *role);
46785 +
46786 +#ifdef CONFIG_BTRFS_FS
46787 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46788 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46789 +#endif
46790 +
46791 +static inline dev_t __get_dev(const struct dentry *dentry)
46792 +{
46793 +#ifdef CONFIG_BTRFS_FS
46794 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46795 + return get_btrfs_dev_from_inode(dentry->d_inode);
46796 + else
46797 +#endif
46798 + return dentry->d_inode->i_sb->s_dev;
46799 +}
46800 +
46801 +int
46802 +gr_init_uidset(void)
46803 +{
46804 + uid_set =
46805 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46806 + uid_used = 0;
46807 +
46808 + return uid_set ? 1 : 0;
46809 +}
46810 +
46811 +void
46812 +gr_free_uidset(void)
46813 +{
46814 + if (uid_set)
46815 + kfree(uid_set);
46816 +
46817 + return;
46818 +}
46819 +
46820 +int
46821 +gr_find_uid(const uid_t uid)
46822 +{
46823 + struct crash_uid *tmp = uid_set;
46824 + uid_t buid;
46825 + int low = 0, high = uid_used - 1, mid;
46826 +
46827 + while (high >= low) {
46828 + mid = (low + high) >> 1;
46829 + buid = tmp[mid].uid;
46830 + if (buid == uid)
46831 + return mid;
46832 + if (buid > uid)
46833 + high = mid - 1;
46834 + if (buid < uid)
46835 + low = mid + 1;
46836 + }
46837 +
46838 + return -1;
46839 +}
46840 +
46841 +static __inline__ void
46842 +gr_insertsort(void)
46843 +{
46844 + unsigned short i, j;
46845 + struct crash_uid index;
46846 +
46847 + for (i = 1; i < uid_used; i++) {
46848 + index = uid_set[i];
46849 + j = i;
46850 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46851 + uid_set[j] = uid_set[j - 1];
46852 + j--;
46853 + }
46854 + uid_set[j] = index;
46855 + }
46856 +
46857 + return;
46858 +}
46859 +
46860 +static __inline__ void
46861 +gr_insert_uid(const uid_t uid, const unsigned long expires)
46862 +{
46863 + int loc;
46864 +
46865 + if (uid_used == GR_UIDTABLE_MAX)
46866 + return;
46867 +
46868 + loc = gr_find_uid(uid);
46869 +
46870 + if (loc >= 0) {
46871 + uid_set[loc].expires = expires;
46872 + return;
46873 + }
46874 +
46875 + uid_set[uid_used].uid = uid;
46876 + uid_set[uid_used].expires = expires;
46877 + uid_used++;
46878 +
46879 + gr_insertsort();
46880 +
46881 + return;
46882 +}
46883 +
46884 +void
46885 +gr_remove_uid(const unsigned short loc)
46886 +{
46887 + unsigned short i;
46888 +
46889 + for (i = loc + 1; i < uid_used; i++)
46890 + uid_set[i - 1] = uid_set[i];
46891 +
46892 + uid_used--;
46893 +
46894 + return;
46895 +}
46896 +
46897 +int
46898 +gr_check_crash_uid(const uid_t uid)
46899 +{
46900 + int loc;
46901 + int ret = 0;
46902 +
46903 + if (unlikely(!gr_acl_is_enabled()))
46904 + return 0;
46905 +
46906 + spin_lock(&gr_uid_lock);
46907 + loc = gr_find_uid(uid);
46908 +
46909 + if (loc < 0)
46910 + goto out_unlock;
46911 +
46912 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
46913 + gr_remove_uid(loc);
46914 + else
46915 + ret = 1;
46916 +
46917 +out_unlock:
46918 + spin_unlock(&gr_uid_lock);
46919 + return ret;
46920 +}
46921 +
46922 +static __inline__ int
46923 +proc_is_setxid(const struct cred *cred)
46924 +{
46925 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
46926 + cred->uid != cred->fsuid)
46927 + return 1;
46928 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
46929 + cred->gid != cred->fsgid)
46930 + return 1;
46931 +
46932 + return 0;
46933 +}
46934 +
46935 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
46936 +
46937 +void
46938 +gr_handle_crash(struct task_struct *task, const int sig)
46939 +{
46940 + struct acl_subject_label *curr;
46941 + struct acl_subject_label *curr2;
46942 + struct task_struct *tsk, *tsk2;
46943 + const struct cred *cred;
46944 + const struct cred *cred2;
46945 +
46946 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
46947 + return;
46948 +
46949 + if (unlikely(!gr_acl_is_enabled()))
46950 + return;
46951 +
46952 + curr = task->acl;
46953 +
46954 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
46955 + return;
46956 +
46957 + if (time_before_eq(curr->expires, get_seconds())) {
46958 + curr->expires = 0;
46959 + curr->crashes = 0;
46960 + }
46961 +
46962 + curr->crashes++;
46963 +
46964 + if (!curr->expires)
46965 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
46966 +
46967 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46968 + time_after(curr->expires, get_seconds())) {
46969 + rcu_read_lock();
46970 + cred = __task_cred(task);
46971 + if (cred->uid && proc_is_setxid(cred)) {
46972 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46973 + spin_lock(&gr_uid_lock);
46974 + gr_insert_uid(cred->uid, curr->expires);
46975 + spin_unlock(&gr_uid_lock);
46976 + curr->expires = 0;
46977 + curr->crashes = 0;
46978 + read_lock(&tasklist_lock);
46979 + do_each_thread(tsk2, tsk) {
46980 + cred2 = __task_cred(tsk);
46981 + if (tsk != task && cred2->uid == cred->uid)
46982 + gr_fake_force_sig(SIGKILL, tsk);
46983 + } while_each_thread(tsk2, tsk);
46984 + read_unlock(&tasklist_lock);
46985 + } else {
46986 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46987 + read_lock(&tasklist_lock);
46988 + do_each_thread(tsk2, tsk) {
46989 + if (likely(tsk != task)) {
46990 + curr2 = tsk->acl;
46991 +
46992 + if (curr2->device == curr->device &&
46993 + curr2->inode == curr->inode)
46994 + gr_fake_force_sig(SIGKILL, tsk);
46995 + }
46996 + } while_each_thread(tsk2, tsk);
46997 + read_unlock(&tasklist_lock);
46998 + }
46999 + rcu_read_unlock();
47000 + }
47001 +
47002 + return;
47003 +}
47004 +
47005 +int
47006 +gr_check_crash_exec(const struct file *filp)
47007 +{
47008 + struct acl_subject_label *curr;
47009 +
47010 + if (unlikely(!gr_acl_is_enabled()))
47011 + return 0;
47012 +
47013 + read_lock(&gr_inode_lock);
47014 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
47015 + __get_dev(filp->f_path.dentry),
47016 + current->role);
47017 + read_unlock(&gr_inode_lock);
47018 +
47019 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
47020 + (!curr->crashes && !curr->expires))
47021 + return 0;
47022 +
47023 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47024 + time_after(curr->expires, get_seconds()))
47025 + return 1;
47026 + else if (time_before_eq(curr->expires, get_seconds())) {
47027 + curr->crashes = 0;
47028 + curr->expires = 0;
47029 + }
47030 +
47031 + return 0;
47032 +}
47033 +
47034 +void
47035 +gr_handle_alertkill(struct task_struct *task)
47036 +{
47037 + struct acl_subject_label *curracl;
47038 + __u32 curr_ip;
47039 + struct task_struct *p, *p2;
47040 +
47041 + if (unlikely(!gr_acl_is_enabled()))
47042 + return;
47043 +
47044 + curracl = task->acl;
47045 + curr_ip = task->signal->curr_ip;
47046 +
47047 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
47048 + read_lock(&tasklist_lock);
47049 + do_each_thread(p2, p) {
47050 + if (p->signal->curr_ip == curr_ip)
47051 + gr_fake_force_sig(SIGKILL, p);
47052 + } while_each_thread(p2, p);
47053 + read_unlock(&tasklist_lock);
47054 + } else if (curracl->mode & GR_KILLPROC)
47055 + gr_fake_force_sig(SIGKILL, task);
47056 +
47057 + return;
47058 +}
47059 diff -urNp linux-3.0.3/grsecurity/gracl_shm.c linux-3.0.3/grsecurity/gracl_shm.c
47060 --- linux-3.0.3/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
47061 +++ linux-3.0.3/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
47062 @@ -0,0 +1,40 @@
47063 +#include <linux/kernel.h>
47064 +#include <linux/mm.h>
47065 +#include <linux/sched.h>
47066 +#include <linux/file.h>
47067 +#include <linux/ipc.h>
47068 +#include <linux/gracl.h>
47069 +#include <linux/grsecurity.h>
47070 +#include <linux/grinternal.h>
47071 +
47072 +int
47073 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47074 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47075 +{
47076 + struct task_struct *task;
47077 +
47078 + if (!gr_acl_is_enabled())
47079 + return 1;
47080 +
47081 + rcu_read_lock();
47082 + read_lock(&tasklist_lock);
47083 +
47084 + task = find_task_by_vpid(shm_cprid);
47085 +
47086 + if (unlikely(!task))
47087 + task = find_task_by_vpid(shm_lapid);
47088 +
47089 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47090 + (task->pid == shm_lapid)) &&
47091 + (task->acl->mode & GR_PROTSHM) &&
47092 + (task->acl != current->acl))) {
47093 + read_unlock(&tasklist_lock);
47094 + rcu_read_unlock();
47095 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47096 + return 0;
47097 + }
47098 + read_unlock(&tasklist_lock);
47099 + rcu_read_unlock();
47100 +
47101 + return 1;
47102 +}
47103 diff -urNp linux-3.0.3/grsecurity/grsec_chdir.c linux-3.0.3/grsecurity/grsec_chdir.c
47104 --- linux-3.0.3/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47105 +++ linux-3.0.3/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
47106 @@ -0,0 +1,19 @@
47107 +#include <linux/kernel.h>
47108 +#include <linux/sched.h>
47109 +#include <linux/fs.h>
47110 +#include <linux/file.h>
47111 +#include <linux/grsecurity.h>
47112 +#include <linux/grinternal.h>
47113 +
47114 +void
47115 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47116 +{
47117 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47118 + if ((grsec_enable_chdir && grsec_enable_group &&
47119 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47120 + !grsec_enable_group)) {
47121 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47122 + }
47123 +#endif
47124 + return;
47125 +}
47126 diff -urNp linux-3.0.3/grsecurity/grsec_chroot.c linux-3.0.3/grsecurity/grsec_chroot.c
47127 --- linux-3.0.3/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47128 +++ linux-3.0.3/grsecurity/grsec_chroot.c 2011-08-23 21:48:14.000000000 -0400
47129 @@ -0,0 +1,349 @@
47130 +#include <linux/kernel.h>
47131 +#include <linux/module.h>
47132 +#include <linux/sched.h>
47133 +#include <linux/file.h>
47134 +#include <linux/fs.h>
47135 +#include <linux/mount.h>
47136 +#include <linux/types.h>
47137 +#include <linux/pid_namespace.h>
47138 +#include <linux/grsecurity.h>
47139 +#include <linux/grinternal.h>
47140 +
47141 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47142 +{
47143 +#ifdef CONFIG_GRKERNSEC
47144 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47145 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47146 + task->gr_is_chrooted = 1;
47147 + else
47148 + task->gr_is_chrooted = 0;
47149 +
47150 + task->gr_chroot_dentry = path->dentry;
47151 +#endif
47152 + return;
47153 +}
47154 +
47155 +void gr_clear_chroot_entries(struct task_struct *task)
47156 +{
47157 +#ifdef CONFIG_GRKERNSEC
47158 + task->gr_is_chrooted = 0;
47159 + task->gr_chroot_dentry = NULL;
47160 +#endif
47161 + return;
47162 +}
47163 +
47164 +int
47165 +gr_handle_chroot_unix(const pid_t pid)
47166 +{
47167 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47168 + struct task_struct *p;
47169 +
47170 + if (unlikely(!grsec_enable_chroot_unix))
47171 + return 1;
47172 +
47173 + if (likely(!proc_is_chrooted(current)))
47174 + return 1;
47175 +
47176 + rcu_read_lock();
47177 + read_lock(&tasklist_lock);
47178 + p = find_task_by_vpid_unrestricted(pid);
47179 + if (unlikely(p && !have_same_root(current, p))) {
47180 + read_unlock(&tasklist_lock);
47181 + rcu_read_unlock();
47182 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47183 + return 0;
47184 + }
47185 + read_unlock(&tasklist_lock);
47186 + rcu_read_unlock();
47187 +#endif
47188 + return 1;
47189 +}
47190 +
47191 +int
47192 +gr_handle_chroot_nice(void)
47193 +{
47194 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47195 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47196 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47197 + return -EPERM;
47198 + }
47199 +#endif
47200 + return 0;
47201 +}
47202 +
47203 +int
47204 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47205 +{
47206 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47207 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47208 + && proc_is_chrooted(current)) {
47209 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47210 + return -EACCES;
47211 + }
47212 +#endif
47213 + return 0;
47214 +}
47215 +
47216 +int
47217 +gr_handle_chroot_rawio(const struct inode *inode)
47218 +{
47219 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47220 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47221 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47222 + return 1;
47223 +#endif
47224 + return 0;
47225 +}
47226 +
47227 +int
47228 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47229 +{
47230 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47231 + struct task_struct *p;
47232 + int ret = 0;
47233 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47234 + return ret;
47235 +
47236 + read_lock(&tasklist_lock);
47237 + do_each_pid_task(pid, type, p) {
47238 + if (!have_same_root(current, p)) {
47239 + ret = 1;
47240 + goto out;
47241 + }
47242 + } while_each_pid_task(pid, type, p);
47243 +out:
47244 + read_unlock(&tasklist_lock);
47245 + return ret;
47246 +#endif
47247 + return 0;
47248 +}
47249 +
47250 +int
47251 +gr_pid_is_chrooted(struct task_struct *p)
47252 +{
47253 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47254 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47255 + return 0;
47256 +
47257 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47258 + !have_same_root(current, p)) {
47259 + return 1;
47260 + }
47261 +#endif
47262 + return 0;
47263 +}
47264 +
47265 +EXPORT_SYMBOL(gr_pid_is_chrooted);
47266 +
47267 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47268 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47269 +{
47270 + struct path path, currentroot;
47271 + int ret = 0;
47272 +
47273 + path.dentry = (struct dentry *)u_dentry;
47274 + path.mnt = (struct vfsmount *)u_mnt;
47275 + get_fs_root(current->fs, &currentroot);
47276 + if (path_is_under(&path, &currentroot))
47277 + ret = 1;
47278 + path_put(&currentroot);
47279 +
47280 + return ret;
47281 +}
47282 +#endif
47283 +
47284 +int
47285 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47286 +{
47287 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47288 + if (!grsec_enable_chroot_fchdir)
47289 + return 1;
47290 +
47291 + if (!proc_is_chrooted(current))
47292 + return 1;
47293 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47294 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47295 + return 0;
47296 + }
47297 +#endif
47298 + return 1;
47299 +}
47300 +
47301 +int
47302 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47303 + const time_t shm_createtime)
47304 +{
47305 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47306 + struct task_struct *p;
47307 + time_t starttime;
47308 +
47309 + if (unlikely(!grsec_enable_chroot_shmat))
47310 + return 1;
47311 +
47312 + if (likely(!proc_is_chrooted(current)))
47313 + return 1;
47314 +
47315 + rcu_read_lock();
47316 + read_lock(&tasklist_lock);
47317 +
47318 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47319 + starttime = p->start_time.tv_sec;
47320 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47321 + if (have_same_root(current, p)) {
47322 + goto allow;
47323 + } else {
47324 + read_unlock(&tasklist_lock);
47325 + rcu_read_unlock();
47326 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47327 + return 0;
47328 + }
47329 + }
47330 + /* creator exited, pid reuse, fall through to next check */
47331 + }
47332 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47333 + if (unlikely(!have_same_root(current, p))) {
47334 + read_unlock(&tasklist_lock);
47335 + rcu_read_unlock();
47336 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47337 + return 0;
47338 + }
47339 + }
47340 +
47341 +allow:
47342 + read_unlock(&tasklist_lock);
47343 + rcu_read_unlock();
47344 +#endif
47345 + return 1;
47346 +}
47347 +
47348 +void
47349 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47350 +{
47351 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47352 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47353 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47354 +#endif
47355 + return;
47356 +}
47357 +
47358 +int
47359 +gr_handle_chroot_mknod(const struct dentry *dentry,
47360 + const struct vfsmount *mnt, const int mode)
47361 +{
47362 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47363 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47364 + proc_is_chrooted(current)) {
47365 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47366 + return -EPERM;
47367 + }
47368 +#endif
47369 + return 0;
47370 +}
47371 +
47372 +int
47373 +gr_handle_chroot_mount(const struct dentry *dentry,
47374 + const struct vfsmount *mnt, const char *dev_name)
47375 +{
47376 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47377 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47378 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47379 + return -EPERM;
47380 + }
47381 +#endif
47382 + return 0;
47383 +}
47384 +
47385 +int
47386 +gr_handle_chroot_pivot(void)
47387 +{
47388 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47389 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47390 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47391 + return -EPERM;
47392 + }
47393 +#endif
47394 + return 0;
47395 +}
47396 +
47397 +int
47398 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47399 +{
47400 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47401 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47402 + !gr_is_outside_chroot(dentry, mnt)) {
47403 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47404 + return -EPERM;
47405 + }
47406 +#endif
47407 + return 0;
47408 +}
47409 +
47410 +int
47411 +gr_handle_chroot_caps(struct path *path)
47412 +{
47413 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47414 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47415 + (init_task.fs->root.dentry != path->dentry) &&
47416 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47417 +
47418 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47419 + const struct cred *old = current_cred();
47420 + struct cred *new = prepare_creds();
47421 + if (new == NULL)
47422 + return 1;
47423 +
47424 + new->cap_permitted = cap_drop(old->cap_permitted,
47425 + chroot_caps);
47426 + new->cap_inheritable = cap_drop(old->cap_inheritable,
47427 + chroot_caps);
47428 + new->cap_effective = cap_drop(old->cap_effective,
47429 + chroot_caps);
47430 +
47431 + commit_creds(new);
47432 +
47433 + return 0;
47434 + }
47435 +#endif
47436 + return 0;
47437 +}
47438 +
47439 +int
47440 +gr_handle_chroot_sysctl(const int op)
47441 +{
47442 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47443 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47444 + proc_is_chrooted(current))
47445 + return -EACCES;
47446 +#endif
47447 + return 0;
47448 +}
47449 +
47450 +void
47451 +gr_handle_chroot_chdir(struct path *path)
47452 +{
47453 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47454 + if (grsec_enable_chroot_chdir)
47455 + set_fs_pwd(current->fs, path);
47456 +#endif
47457 + return;
47458 +}
47459 +
47460 +int
47461 +gr_handle_chroot_chmod(const struct dentry *dentry,
47462 + const struct vfsmount *mnt, const int mode)
47463 +{
47464 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47465 + /* allow chmod +s on directories, but not files */
47466 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47467 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47468 + proc_is_chrooted(current)) {
47469 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47470 + return -EPERM;
47471 + }
47472 +#endif
47473 + return 0;
47474 +}
47475 +
47476 +#ifdef CONFIG_SECURITY
47477 +EXPORT_SYMBOL(gr_handle_chroot_caps);
47478 +#endif
47479 diff -urNp linux-3.0.3/grsecurity/grsec_disabled.c linux-3.0.3/grsecurity/grsec_disabled.c
47480 --- linux-3.0.3/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47481 +++ linux-3.0.3/grsecurity/grsec_disabled.c 2011-08-23 21:48:14.000000000 -0400
47482 @@ -0,0 +1,447 @@
47483 +#include <linux/kernel.h>
47484 +#include <linux/module.h>
47485 +#include <linux/sched.h>
47486 +#include <linux/file.h>
47487 +#include <linux/fs.h>
47488 +#include <linux/kdev_t.h>
47489 +#include <linux/net.h>
47490 +#include <linux/in.h>
47491 +#include <linux/ip.h>
47492 +#include <linux/skbuff.h>
47493 +#include <linux/sysctl.h>
47494 +
47495 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47496 +void
47497 +pax_set_initial_flags(struct linux_binprm *bprm)
47498 +{
47499 + return;
47500 +}
47501 +#endif
47502 +
47503 +#ifdef CONFIG_SYSCTL
47504 +__u32
47505 +gr_handle_sysctl(const struct ctl_table * table, const int op)
47506 +{
47507 + return 0;
47508 +}
47509 +#endif
47510 +
47511 +#ifdef CONFIG_TASKSTATS
47512 +int gr_is_taskstats_denied(int pid)
47513 +{
47514 + return 0;
47515 +}
47516 +#endif
47517 +
47518 +int
47519 +gr_acl_is_enabled(void)
47520 +{
47521 + return 0;
47522 +}
47523 +
47524 +int
47525 +gr_handle_rawio(const struct inode *inode)
47526 +{
47527 + return 0;
47528 +}
47529 +
47530 +void
47531 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47532 +{
47533 + return;
47534 +}
47535 +
47536 +int
47537 +gr_handle_ptrace(struct task_struct *task, const long request)
47538 +{
47539 + return 0;
47540 +}
47541 +
47542 +int
47543 +gr_handle_proc_ptrace(struct task_struct *task)
47544 +{
47545 + return 0;
47546 +}
47547 +
47548 +void
47549 +gr_learn_resource(const struct task_struct *task,
47550 + const int res, const unsigned long wanted, const int gt)
47551 +{
47552 + return;
47553 +}
47554 +
47555 +int
47556 +gr_set_acls(const int type)
47557 +{
47558 + return 0;
47559 +}
47560 +
47561 +int
47562 +gr_check_hidden_task(const struct task_struct *tsk)
47563 +{
47564 + return 0;
47565 +}
47566 +
47567 +int
47568 +gr_check_protected_task(const struct task_struct *task)
47569 +{
47570 + return 0;
47571 +}
47572 +
47573 +int
47574 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47575 +{
47576 + return 0;
47577 +}
47578 +
47579 +void
47580 +gr_copy_label(struct task_struct *tsk)
47581 +{
47582 + return;
47583 +}
47584 +
47585 +void
47586 +gr_set_pax_flags(struct task_struct *task)
47587 +{
47588 + return;
47589 +}
47590 +
47591 +int
47592 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47593 + const int unsafe_share)
47594 +{
47595 + return 0;
47596 +}
47597 +
47598 +void
47599 +gr_handle_delete(const ino_t ino, const dev_t dev)
47600 +{
47601 + return;
47602 +}
47603 +
47604 +void
47605 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47606 +{
47607 + return;
47608 +}
47609 +
47610 +void
47611 +gr_handle_crash(struct task_struct *task, const int sig)
47612 +{
47613 + return;
47614 +}
47615 +
47616 +int
47617 +gr_check_crash_exec(const struct file *filp)
47618 +{
47619 + return 0;
47620 +}
47621 +
47622 +int
47623 +gr_check_crash_uid(const uid_t uid)
47624 +{
47625 + return 0;
47626 +}
47627 +
47628 +void
47629 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47630 + struct dentry *old_dentry,
47631 + struct dentry *new_dentry,
47632 + struct vfsmount *mnt, const __u8 replace)
47633 +{
47634 + return;
47635 +}
47636 +
47637 +int
47638 +gr_search_socket(const int family, const int type, const int protocol)
47639 +{
47640 + return 1;
47641 +}
47642 +
47643 +int
47644 +gr_search_connectbind(const int mode, const struct socket *sock,
47645 + const struct sockaddr_in *addr)
47646 +{
47647 + return 0;
47648 +}
47649 +
47650 +int
47651 +gr_is_capable(const int cap)
47652 +{
47653 + return 1;
47654 +}
47655 +
47656 +int
47657 +gr_is_capable_nolog(const int cap)
47658 +{
47659 + return 1;
47660 +}
47661 +
47662 +void
47663 +gr_handle_alertkill(struct task_struct *task)
47664 +{
47665 + return;
47666 +}
47667 +
47668 +__u32
47669 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47670 +{
47671 + return 1;
47672 +}
47673 +
47674 +__u32
47675 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47676 + const struct vfsmount * mnt)
47677 +{
47678 + return 1;
47679 +}
47680 +
47681 +__u32
47682 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47683 + const int fmode)
47684 +{
47685 + return 1;
47686 +}
47687 +
47688 +__u32
47689 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47690 +{
47691 + return 1;
47692 +}
47693 +
47694 +__u32
47695 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47696 +{
47697 + return 1;
47698 +}
47699 +
47700 +int
47701 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47702 + unsigned int *vm_flags)
47703 +{
47704 + return 1;
47705 +}
47706 +
47707 +__u32
47708 +gr_acl_handle_truncate(const struct dentry * dentry,
47709 + const struct vfsmount * mnt)
47710 +{
47711 + return 1;
47712 +}
47713 +
47714 +__u32
47715 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47716 +{
47717 + return 1;
47718 +}
47719 +
47720 +__u32
47721 +gr_acl_handle_access(const struct dentry * dentry,
47722 + const struct vfsmount * mnt, const int fmode)
47723 +{
47724 + return 1;
47725 +}
47726 +
47727 +__u32
47728 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47729 + mode_t mode)
47730 +{
47731 + return 1;
47732 +}
47733 +
47734 +__u32
47735 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47736 + mode_t mode)
47737 +{
47738 + return 1;
47739 +}
47740 +
47741 +__u32
47742 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47743 +{
47744 + return 1;
47745 +}
47746 +
47747 +__u32
47748 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47749 +{
47750 + return 1;
47751 +}
47752 +
47753 +void
47754 +grsecurity_init(void)
47755 +{
47756 + return;
47757 +}
47758 +
47759 +__u32
47760 +gr_acl_handle_mknod(const struct dentry * new_dentry,
47761 + const struct dentry * parent_dentry,
47762 + const struct vfsmount * parent_mnt,
47763 + const int mode)
47764 +{
47765 + return 1;
47766 +}
47767 +
47768 +__u32
47769 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
47770 + const struct dentry * parent_dentry,
47771 + const struct vfsmount * parent_mnt)
47772 +{
47773 + return 1;
47774 +}
47775 +
47776 +__u32
47777 +gr_acl_handle_symlink(const struct dentry * new_dentry,
47778 + const struct dentry * parent_dentry,
47779 + const struct vfsmount * parent_mnt, const char *from)
47780 +{
47781 + return 1;
47782 +}
47783 +
47784 +__u32
47785 +gr_acl_handle_link(const struct dentry * new_dentry,
47786 + const struct dentry * parent_dentry,
47787 + const struct vfsmount * parent_mnt,
47788 + const struct dentry * old_dentry,
47789 + const struct vfsmount * old_mnt, const char *to)
47790 +{
47791 + return 1;
47792 +}
47793 +
47794 +int
47795 +gr_acl_handle_rename(const struct dentry *new_dentry,
47796 + const struct dentry *parent_dentry,
47797 + const struct vfsmount *parent_mnt,
47798 + const struct dentry *old_dentry,
47799 + const struct inode *old_parent_inode,
47800 + const struct vfsmount *old_mnt, const char *newname)
47801 +{
47802 + return 0;
47803 +}
47804 +
47805 +int
47806 +gr_acl_handle_filldir(const struct file *file, const char *name,
47807 + const int namelen, const ino_t ino)
47808 +{
47809 + return 1;
47810 +}
47811 +
47812 +int
47813 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47814 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47815 +{
47816 + return 1;
47817 +}
47818 +
47819 +int
47820 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47821 +{
47822 + return 0;
47823 +}
47824 +
47825 +int
47826 +gr_search_accept(const struct socket *sock)
47827 +{
47828 + return 0;
47829 +}
47830 +
47831 +int
47832 +gr_search_listen(const struct socket *sock)
47833 +{
47834 + return 0;
47835 +}
47836 +
47837 +int
47838 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47839 +{
47840 + return 0;
47841 +}
47842 +
47843 +__u32
47844 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47845 +{
47846 + return 1;
47847 +}
47848 +
47849 +__u32
47850 +gr_acl_handle_creat(const struct dentry * dentry,
47851 + const struct dentry * p_dentry,
47852 + const struct vfsmount * p_mnt, const int fmode,
47853 + const int imode)
47854 +{
47855 + return 1;
47856 +}
47857 +
47858 +void
47859 +gr_acl_handle_exit(void)
47860 +{
47861 + return;
47862 +}
47863 +
47864 +int
47865 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47866 +{
47867 + return 1;
47868 +}
47869 +
47870 +void
47871 +gr_set_role_label(const uid_t uid, const gid_t gid)
47872 +{
47873 + return;
47874 +}
47875 +
47876 +int
47877 +gr_acl_handle_procpidmem(const struct task_struct *task)
47878 +{
47879 + return 0;
47880 +}
47881 +
47882 +int
47883 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47884 +{
47885 + return 0;
47886 +}
47887 +
47888 +int
47889 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47890 +{
47891 + return 0;
47892 +}
47893 +
47894 +void
47895 +gr_set_kernel_label(struct task_struct *task)
47896 +{
47897 + return;
47898 +}
47899 +
47900 +int
47901 +gr_check_user_change(int real, int effective, int fs)
47902 +{
47903 + return 0;
47904 +}
47905 +
47906 +int
47907 +gr_check_group_change(int real, int effective, int fs)
47908 +{
47909 + return 0;
47910 +}
47911 +
47912 +int gr_acl_enable_at_secure(void)
47913 +{
47914 + return 0;
47915 +}
47916 +
47917 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47918 +{
47919 + return dentry->d_inode->i_sb->s_dev;
47920 +}
47921 +
47922 +EXPORT_SYMBOL(gr_is_capable);
47923 +EXPORT_SYMBOL(gr_is_capable_nolog);
47924 +EXPORT_SYMBOL(gr_learn_resource);
47925 +EXPORT_SYMBOL(gr_set_kernel_label);
47926 +#ifdef CONFIG_SECURITY
47927 +EXPORT_SYMBOL(gr_check_user_change);
47928 +EXPORT_SYMBOL(gr_check_group_change);
47929 +#endif
47930 diff -urNp linux-3.0.3/grsecurity/grsec_exec.c linux-3.0.3/grsecurity/grsec_exec.c
47931 --- linux-3.0.3/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
47932 +++ linux-3.0.3/grsecurity/grsec_exec.c 2011-08-25 17:25:59.000000000 -0400
47933 @@ -0,0 +1,72 @@
47934 +#include <linux/kernel.h>
47935 +#include <linux/sched.h>
47936 +#include <linux/file.h>
47937 +#include <linux/binfmts.h>
47938 +#include <linux/fs.h>
47939 +#include <linux/types.h>
47940 +#include <linux/grdefs.h>
47941 +#include <linux/grsecurity.h>
47942 +#include <linux/grinternal.h>
47943 +#include <linux/capability.h>
47944 +
47945 +#include <asm/uaccess.h>
47946 +
47947 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47948 +static char gr_exec_arg_buf[132];
47949 +static DEFINE_MUTEX(gr_exec_arg_mutex);
47950 +#endif
47951 +
47952 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
47953 +
47954 +void
47955 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
47956 +{
47957 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47958 + char *grarg = gr_exec_arg_buf;
47959 + unsigned int i, x, execlen = 0;
47960 + char c;
47961 +
47962 + if (!((grsec_enable_execlog && grsec_enable_group &&
47963 + in_group_p(grsec_audit_gid))
47964 + || (grsec_enable_execlog && !grsec_enable_group)))
47965 + return;
47966 +
47967 + mutex_lock(&gr_exec_arg_mutex);
47968 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
47969 +
47970 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
47971 + const char __user *p;
47972 + unsigned int len;
47973 +
47974 + p = get_user_arg_ptr(argv, i);
47975 + if (IS_ERR(p))
47976 + goto log;
47977 +
47978 + len = strnlen_user(p, 128 - execlen);
47979 + if (len > 128 - execlen)
47980 + len = 128 - execlen;
47981 + else if (len > 0)
47982 + len--;
47983 + if (copy_from_user(grarg + execlen, p, len))
47984 + goto log;
47985 +
47986 + /* rewrite unprintable characters */
47987 + for (x = 0; x < len; x++) {
47988 + c = *(grarg + execlen + x);
47989 + if (c < 32 || c > 126)
47990 + *(grarg + execlen + x) = ' ';
47991 + }
47992 +
47993 + execlen += len;
47994 + *(grarg + execlen) = ' ';
47995 + *(grarg + execlen + 1) = '\0';
47996 + execlen++;
47997 + }
47998 +
47999 + log:
48000 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48001 + bprm->file->f_path.mnt, grarg);
48002 + mutex_unlock(&gr_exec_arg_mutex);
48003 +#endif
48004 + return;
48005 +}
48006 diff -urNp linux-3.0.3/grsecurity/grsec_fifo.c linux-3.0.3/grsecurity/grsec_fifo.c
48007 --- linux-3.0.3/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
48008 +++ linux-3.0.3/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
48009 @@ -0,0 +1,24 @@
48010 +#include <linux/kernel.h>
48011 +#include <linux/sched.h>
48012 +#include <linux/fs.h>
48013 +#include <linux/file.h>
48014 +#include <linux/grinternal.h>
48015 +
48016 +int
48017 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
48018 + const struct dentry *dir, const int flag, const int acc_mode)
48019 +{
48020 +#ifdef CONFIG_GRKERNSEC_FIFO
48021 + const struct cred *cred = current_cred();
48022 +
48023 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
48024 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
48025 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
48026 + (cred->fsuid != dentry->d_inode->i_uid)) {
48027 + if (!inode_permission(dentry->d_inode, acc_mode))
48028 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
48029 + return -EACCES;
48030 + }
48031 +#endif
48032 + return 0;
48033 +}
48034 diff -urNp linux-3.0.3/grsecurity/grsec_fork.c linux-3.0.3/grsecurity/grsec_fork.c
48035 --- linux-3.0.3/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48036 +++ linux-3.0.3/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
48037 @@ -0,0 +1,23 @@
48038 +#include <linux/kernel.h>
48039 +#include <linux/sched.h>
48040 +#include <linux/grsecurity.h>
48041 +#include <linux/grinternal.h>
48042 +#include <linux/errno.h>
48043 +
48044 +void
48045 +gr_log_forkfail(const int retval)
48046 +{
48047 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48048 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48049 + switch (retval) {
48050 + case -EAGAIN:
48051 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48052 + break;
48053 + case -ENOMEM:
48054 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48055 + break;
48056 + }
48057 + }
48058 +#endif
48059 + return;
48060 +}
48061 diff -urNp linux-3.0.3/grsecurity/grsec_init.c linux-3.0.3/grsecurity/grsec_init.c
48062 --- linux-3.0.3/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48063 +++ linux-3.0.3/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
48064 @@ -0,0 +1,269 @@
48065 +#include <linux/kernel.h>
48066 +#include <linux/sched.h>
48067 +#include <linux/mm.h>
48068 +#include <linux/gracl.h>
48069 +#include <linux/slab.h>
48070 +#include <linux/vmalloc.h>
48071 +#include <linux/percpu.h>
48072 +#include <linux/module.h>
48073 +
48074 +int grsec_enable_brute;
48075 +int grsec_enable_link;
48076 +int grsec_enable_dmesg;
48077 +int grsec_enable_harden_ptrace;
48078 +int grsec_enable_fifo;
48079 +int grsec_enable_execlog;
48080 +int grsec_enable_signal;
48081 +int grsec_enable_forkfail;
48082 +int grsec_enable_audit_ptrace;
48083 +int grsec_enable_time;
48084 +int grsec_enable_audit_textrel;
48085 +int grsec_enable_group;
48086 +int grsec_audit_gid;
48087 +int grsec_enable_chdir;
48088 +int grsec_enable_mount;
48089 +int grsec_enable_rofs;
48090 +int grsec_enable_chroot_findtask;
48091 +int grsec_enable_chroot_mount;
48092 +int grsec_enable_chroot_shmat;
48093 +int grsec_enable_chroot_fchdir;
48094 +int grsec_enable_chroot_double;
48095 +int grsec_enable_chroot_pivot;
48096 +int grsec_enable_chroot_chdir;
48097 +int grsec_enable_chroot_chmod;
48098 +int grsec_enable_chroot_mknod;
48099 +int grsec_enable_chroot_nice;
48100 +int grsec_enable_chroot_execlog;
48101 +int grsec_enable_chroot_caps;
48102 +int grsec_enable_chroot_sysctl;
48103 +int grsec_enable_chroot_unix;
48104 +int grsec_enable_tpe;
48105 +int grsec_tpe_gid;
48106 +int grsec_enable_blackhole;
48107 +#ifdef CONFIG_IPV6_MODULE
48108 +EXPORT_SYMBOL(grsec_enable_blackhole);
48109 +#endif
48110 +int grsec_lastack_retries;
48111 +int grsec_enable_tpe_all;
48112 +int grsec_enable_tpe_invert;
48113 +int grsec_enable_socket_all;
48114 +int grsec_socket_all_gid;
48115 +int grsec_enable_socket_client;
48116 +int grsec_socket_client_gid;
48117 +int grsec_enable_socket_server;
48118 +int grsec_socket_server_gid;
48119 +int grsec_resource_logging;
48120 +int grsec_disable_privio;
48121 +int grsec_enable_log_rwxmaps;
48122 +int grsec_lock;
48123 +
48124 +DEFINE_SPINLOCK(grsec_alert_lock);
48125 +unsigned long grsec_alert_wtime = 0;
48126 +unsigned long grsec_alert_fyet = 0;
48127 +
48128 +DEFINE_SPINLOCK(grsec_audit_lock);
48129 +
48130 +DEFINE_RWLOCK(grsec_exec_file_lock);
48131 +
48132 +char *gr_shared_page[4];
48133 +
48134 +char *gr_alert_log_fmt;
48135 +char *gr_audit_log_fmt;
48136 +char *gr_alert_log_buf;
48137 +char *gr_audit_log_buf;
48138 +
48139 +extern struct gr_arg *gr_usermode;
48140 +extern unsigned char *gr_system_salt;
48141 +extern unsigned char *gr_system_sum;
48142 +
48143 +void __init
48144 +grsecurity_init(void)
48145 +{
48146 + int j;
48147 + /* create the per-cpu shared pages */
48148 +
48149 +#ifdef CONFIG_X86
48150 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48151 +#endif
48152 +
48153 + for (j = 0; j < 4; j++) {
48154 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48155 + if (gr_shared_page[j] == NULL) {
48156 + panic("Unable to allocate grsecurity shared page");
48157 + return;
48158 + }
48159 + }
48160 +
48161 + /* allocate log buffers */
48162 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48163 + if (!gr_alert_log_fmt) {
48164 + panic("Unable to allocate grsecurity alert log format buffer");
48165 + return;
48166 + }
48167 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48168 + if (!gr_audit_log_fmt) {
48169 + panic("Unable to allocate grsecurity audit log format buffer");
48170 + return;
48171 + }
48172 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48173 + if (!gr_alert_log_buf) {
48174 + panic("Unable to allocate grsecurity alert log buffer");
48175 + return;
48176 + }
48177 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48178 + if (!gr_audit_log_buf) {
48179 + panic("Unable to allocate grsecurity audit log buffer");
48180 + return;
48181 + }
48182 +
48183 + /* allocate memory for authentication structure */
48184 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48185 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48186 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48187 +
48188 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48189 + panic("Unable to allocate grsecurity authentication structure");
48190 + return;
48191 + }
48192 +
48193 +
48194 +#ifdef CONFIG_GRKERNSEC_IO
48195 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48196 + grsec_disable_privio = 1;
48197 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48198 + grsec_disable_privio = 1;
48199 +#else
48200 + grsec_disable_privio = 0;
48201 +#endif
48202 +#endif
48203 +
48204 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48205 + /* for backward compatibility, tpe_invert always defaults to on if
48206 + enabled in the kernel
48207 + */
48208 + grsec_enable_tpe_invert = 1;
48209 +#endif
48210 +
48211 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48212 +#ifndef CONFIG_GRKERNSEC_SYSCTL
48213 + grsec_lock = 1;
48214 +#endif
48215 +
48216 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48217 + grsec_enable_audit_textrel = 1;
48218 +#endif
48219 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48220 + grsec_enable_log_rwxmaps = 1;
48221 +#endif
48222 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48223 + grsec_enable_group = 1;
48224 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48225 +#endif
48226 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48227 + grsec_enable_chdir = 1;
48228 +#endif
48229 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48230 + grsec_enable_harden_ptrace = 1;
48231 +#endif
48232 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48233 + grsec_enable_mount = 1;
48234 +#endif
48235 +#ifdef CONFIG_GRKERNSEC_LINK
48236 + grsec_enable_link = 1;
48237 +#endif
48238 +#ifdef CONFIG_GRKERNSEC_BRUTE
48239 + grsec_enable_brute = 1;
48240 +#endif
48241 +#ifdef CONFIG_GRKERNSEC_DMESG
48242 + grsec_enable_dmesg = 1;
48243 +#endif
48244 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48245 + grsec_enable_blackhole = 1;
48246 + grsec_lastack_retries = 4;
48247 +#endif
48248 +#ifdef CONFIG_GRKERNSEC_FIFO
48249 + grsec_enable_fifo = 1;
48250 +#endif
48251 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48252 + grsec_enable_execlog = 1;
48253 +#endif
48254 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48255 + grsec_enable_signal = 1;
48256 +#endif
48257 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48258 + grsec_enable_forkfail = 1;
48259 +#endif
48260 +#ifdef CONFIG_GRKERNSEC_TIME
48261 + grsec_enable_time = 1;
48262 +#endif
48263 +#ifdef CONFIG_GRKERNSEC_RESLOG
48264 + grsec_resource_logging = 1;
48265 +#endif
48266 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48267 + grsec_enable_chroot_findtask = 1;
48268 +#endif
48269 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48270 + grsec_enable_chroot_unix = 1;
48271 +#endif
48272 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48273 + grsec_enable_chroot_mount = 1;
48274 +#endif
48275 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48276 + grsec_enable_chroot_fchdir = 1;
48277 +#endif
48278 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48279 + grsec_enable_chroot_shmat = 1;
48280 +#endif
48281 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48282 + grsec_enable_audit_ptrace = 1;
48283 +#endif
48284 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48285 + grsec_enable_chroot_double = 1;
48286 +#endif
48287 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48288 + grsec_enable_chroot_pivot = 1;
48289 +#endif
48290 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48291 + grsec_enable_chroot_chdir = 1;
48292 +#endif
48293 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48294 + grsec_enable_chroot_chmod = 1;
48295 +#endif
48296 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48297 + grsec_enable_chroot_mknod = 1;
48298 +#endif
48299 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48300 + grsec_enable_chroot_nice = 1;
48301 +#endif
48302 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48303 + grsec_enable_chroot_execlog = 1;
48304 +#endif
48305 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48306 + grsec_enable_chroot_caps = 1;
48307 +#endif
48308 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48309 + grsec_enable_chroot_sysctl = 1;
48310 +#endif
48311 +#ifdef CONFIG_GRKERNSEC_TPE
48312 + grsec_enable_tpe = 1;
48313 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48314 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
48315 + grsec_enable_tpe_all = 1;
48316 +#endif
48317 +#endif
48318 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48319 + grsec_enable_socket_all = 1;
48320 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48321 +#endif
48322 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48323 + grsec_enable_socket_client = 1;
48324 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48325 +#endif
48326 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48327 + grsec_enable_socket_server = 1;
48328 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48329 +#endif
48330 +#endif
48331 +
48332 + return;
48333 +}
48334 diff -urNp linux-3.0.3/grsecurity/grsec_link.c linux-3.0.3/grsecurity/grsec_link.c
48335 --- linux-3.0.3/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48336 +++ linux-3.0.3/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
48337 @@ -0,0 +1,43 @@
48338 +#include <linux/kernel.h>
48339 +#include <linux/sched.h>
48340 +#include <linux/fs.h>
48341 +#include <linux/file.h>
48342 +#include <linux/grinternal.h>
48343 +
48344 +int
48345 +gr_handle_follow_link(const struct inode *parent,
48346 + const struct inode *inode,
48347 + const struct dentry *dentry, const struct vfsmount *mnt)
48348 +{
48349 +#ifdef CONFIG_GRKERNSEC_LINK
48350 + const struct cred *cred = current_cred();
48351 +
48352 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48353 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48354 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48355 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48356 + return -EACCES;
48357 + }
48358 +#endif
48359 + return 0;
48360 +}
48361 +
48362 +int
48363 +gr_handle_hardlink(const struct dentry *dentry,
48364 + const struct vfsmount *mnt,
48365 + struct inode *inode, const int mode, const char *to)
48366 +{
48367 +#ifdef CONFIG_GRKERNSEC_LINK
48368 + const struct cred *cred = current_cred();
48369 +
48370 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48371 + (!S_ISREG(mode) || (mode & S_ISUID) ||
48372 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48373 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48374 + !capable(CAP_FOWNER) && cred->uid) {
48375 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48376 + return -EPERM;
48377 + }
48378 +#endif
48379 + return 0;
48380 +}
48381 diff -urNp linux-3.0.3/grsecurity/grsec_log.c linux-3.0.3/grsecurity/grsec_log.c
48382 --- linux-3.0.3/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48383 +++ linux-3.0.3/grsecurity/grsec_log.c 2011-08-23 21:48:14.000000000 -0400
48384 @@ -0,0 +1,310 @@
48385 +#include <linux/kernel.h>
48386 +#include <linux/sched.h>
48387 +#include <linux/file.h>
48388 +#include <linux/tty.h>
48389 +#include <linux/fs.h>
48390 +#include <linux/grinternal.h>
48391 +
48392 +#ifdef CONFIG_TREE_PREEMPT_RCU
48393 +#define DISABLE_PREEMPT() preempt_disable()
48394 +#define ENABLE_PREEMPT() preempt_enable()
48395 +#else
48396 +#define DISABLE_PREEMPT()
48397 +#define ENABLE_PREEMPT()
48398 +#endif
48399 +
48400 +#define BEGIN_LOCKS(x) \
48401 + DISABLE_PREEMPT(); \
48402 + rcu_read_lock(); \
48403 + read_lock(&tasklist_lock); \
48404 + read_lock(&grsec_exec_file_lock); \
48405 + if (x != GR_DO_AUDIT) \
48406 + spin_lock(&grsec_alert_lock); \
48407 + else \
48408 + spin_lock(&grsec_audit_lock)
48409 +
48410 +#define END_LOCKS(x) \
48411 + if (x != GR_DO_AUDIT) \
48412 + spin_unlock(&grsec_alert_lock); \
48413 + else \
48414 + spin_unlock(&grsec_audit_lock); \
48415 + read_unlock(&grsec_exec_file_lock); \
48416 + read_unlock(&tasklist_lock); \
48417 + rcu_read_unlock(); \
48418 + ENABLE_PREEMPT(); \
48419 + if (x == GR_DONT_AUDIT) \
48420 + gr_handle_alertkill(current)
48421 +
48422 +enum {
48423 + FLOODING,
48424 + NO_FLOODING
48425 +};
48426 +
48427 +extern char *gr_alert_log_fmt;
48428 +extern char *gr_audit_log_fmt;
48429 +extern char *gr_alert_log_buf;
48430 +extern char *gr_audit_log_buf;
48431 +
48432 +static int gr_log_start(int audit)
48433 +{
48434 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48435 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48436 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48437 +
48438 + if (audit == GR_DO_AUDIT)
48439 + goto set_fmt;
48440 +
48441 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48442 + grsec_alert_wtime = jiffies;
48443 + grsec_alert_fyet = 0;
48444 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48445 + grsec_alert_fyet++;
48446 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48447 + grsec_alert_wtime = jiffies;
48448 + grsec_alert_fyet++;
48449 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48450 + return FLOODING;
48451 + } else return FLOODING;
48452 +
48453 +set_fmt:
48454 + memset(buf, 0, PAGE_SIZE);
48455 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
48456 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48457 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48458 + } else if (current->signal->curr_ip) {
48459 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48460 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48461 + } else if (gr_acl_is_enabled()) {
48462 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48463 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48464 + } else {
48465 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
48466 + strcpy(buf, fmt);
48467 + }
48468 +
48469 + return NO_FLOODING;
48470 +}
48471 +
48472 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48473 + __attribute__ ((format (printf, 2, 0)));
48474 +
48475 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48476 +{
48477 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48478 + unsigned int len = strlen(buf);
48479 +
48480 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48481 +
48482 + return;
48483 +}
48484 +
48485 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48486 + __attribute__ ((format (printf, 2, 3)));
48487 +
48488 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48489 +{
48490 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48491 + unsigned int len = strlen(buf);
48492 + va_list ap;
48493 +
48494 + va_start(ap, msg);
48495 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48496 + va_end(ap);
48497 +
48498 + return;
48499 +}
48500 +
48501 +static void gr_log_end(int audit)
48502 +{
48503 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48504 + unsigned int len = strlen(buf);
48505 +
48506 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48507 + printk("%s\n", buf);
48508 +
48509 + return;
48510 +}
48511 +
48512 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48513 +{
48514 + int logtype;
48515 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48516 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48517 + void *voidptr = NULL;
48518 + int num1 = 0, num2 = 0;
48519 + unsigned long ulong1 = 0, ulong2 = 0;
48520 + struct dentry *dentry = NULL;
48521 + struct vfsmount *mnt = NULL;
48522 + struct file *file = NULL;
48523 + struct task_struct *task = NULL;
48524 + const struct cred *cred, *pcred;
48525 + va_list ap;
48526 +
48527 + BEGIN_LOCKS(audit);
48528 + logtype = gr_log_start(audit);
48529 + if (logtype == FLOODING) {
48530 + END_LOCKS(audit);
48531 + return;
48532 + }
48533 + va_start(ap, argtypes);
48534 + switch (argtypes) {
48535 + case GR_TTYSNIFF:
48536 + task = va_arg(ap, struct task_struct *);
48537 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48538 + break;
48539 + case GR_SYSCTL_HIDDEN:
48540 + str1 = va_arg(ap, char *);
48541 + gr_log_middle_varargs(audit, msg, result, str1);
48542 + break;
48543 + case GR_RBAC:
48544 + dentry = va_arg(ap, struct dentry *);
48545 + mnt = va_arg(ap, struct vfsmount *);
48546 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48547 + break;
48548 + case GR_RBAC_STR:
48549 + dentry = va_arg(ap, struct dentry *);
48550 + mnt = va_arg(ap, struct vfsmount *);
48551 + str1 = va_arg(ap, char *);
48552 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48553 + break;
48554 + case GR_STR_RBAC:
48555 + str1 = va_arg(ap, char *);
48556 + dentry = va_arg(ap, struct dentry *);
48557 + mnt = va_arg(ap, struct vfsmount *);
48558 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48559 + break;
48560 + case GR_RBAC_MODE2:
48561 + dentry = va_arg(ap, struct dentry *);
48562 + mnt = va_arg(ap, struct vfsmount *);
48563 + str1 = va_arg(ap, char *);
48564 + str2 = va_arg(ap, char *);
48565 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48566 + break;
48567 + case GR_RBAC_MODE3:
48568 + dentry = va_arg(ap, struct dentry *);
48569 + mnt = va_arg(ap, struct vfsmount *);
48570 + str1 = va_arg(ap, char *);
48571 + str2 = va_arg(ap, char *);
48572 + str3 = va_arg(ap, char *);
48573 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48574 + break;
48575 + case GR_FILENAME:
48576 + dentry = va_arg(ap, struct dentry *);
48577 + mnt = va_arg(ap, struct vfsmount *);
48578 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48579 + break;
48580 + case GR_STR_FILENAME:
48581 + str1 = va_arg(ap, char *);
48582 + dentry = va_arg(ap, struct dentry *);
48583 + mnt = va_arg(ap, struct vfsmount *);
48584 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48585 + break;
48586 + case GR_FILENAME_STR:
48587 + dentry = va_arg(ap, struct dentry *);
48588 + mnt = va_arg(ap, struct vfsmount *);
48589 + str1 = va_arg(ap, char *);
48590 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48591 + break;
48592 + case GR_FILENAME_TWO_INT:
48593 + dentry = va_arg(ap, struct dentry *);
48594 + mnt = va_arg(ap, struct vfsmount *);
48595 + num1 = va_arg(ap, int);
48596 + num2 = va_arg(ap, int);
48597 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48598 + break;
48599 + case GR_FILENAME_TWO_INT_STR:
48600 + dentry = va_arg(ap, struct dentry *);
48601 + mnt = va_arg(ap, struct vfsmount *);
48602 + num1 = va_arg(ap, int);
48603 + num2 = va_arg(ap, int);
48604 + str1 = va_arg(ap, char *);
48605 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48606 + break;
48607 + case GR_TEXTREL:
48608 + file = va_arg(ap, struct file *);
48609 + ulong1 = va_arg(ap, unsigned long);
48610 + ulong2 = va_arg(ap, unsigned long);
48611 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48612 + break;
48613 + case GR_PTRACE:
48614 + task = va_arg(ap, struct task_struct *);
48615 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48616 + break;
48617 + case GR_RESOURCE:
48618 + task = va_arg(ap, struct task_struct *);
48619 + cred = __task_cred(task);
48620 + pcred = __task_cred(task->real_parent);
48621 + ulong1 = va_arg(ap, unsigned long);
48622 + str1 = va_arg(ap, char *);
48623 + ulong2 = va_arg(ap, unsigned long);
48624 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48625 + break;
48626 + case GR_CAP:
48627 + task = va_arg(ap, struct task_struct *);
48628 + cred = __task_cred(task);
48629 + pcred = __task_cred(task->real_parent);
48630 + str1 = va_arg(ap, char *);
48631 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48632 + break;
48633 + case GR_SIG:
48634 + str1 = va_arg(ap, char *);
48635 + voidptr = va_arg(ap, void *);
48636 + gr_log_middle_varargs(audit, msg, str1, voidptr);
48637 + break;
48638 + case GR_SIG2:
48639 + task = va_arg(ap, struct task_struct *);
48640 + cred = __task_cred(task);
48641 + pcred = __task_cred(task->real_parent);
48642 + num1 = va_arg(ap, int);
48643 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48644 + break;
48645 + case GR_CRASH1:
48646 + task = va_arg(ap, struct task_struct *);
48647 + cred = __task_cred(task);
48648 + pcred = __task_cred(task->real_parent);
48649 + ulong1 = va_arg(ap, unsigned long);
48650 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48651 + break;
48652 + case GR_CRASH2:
48653 + task = va_arg(ap, struct task_struct *);
48654 + cred = __task_cred(task);
48655 + pcred = __task_cred(task->real_parent);
48656 + ulong1 = va_arg(ap, unsigned long);
48657 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48658 + break;
48659 + case GR_RWXMAP:
48660 + file = va_arg(ap, struct file *);
48661 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48662 + break;
48663 + case GR_PSACCT:
48664 + {
48665 + unsigned int wday, cday;
48666 + __u8 whr, chr;
48667 + __u8 wmin, cmin;
48668 + __u8 wsec, csec;
48669 + char cur_tty[64] = { 0 };
48670 + char parent_tty[64] = { 0 };
48671 +
48672 + task = va_arg(ap, struct task_struct *);
48673 + wday = va_arg(ap, unsigned int);
48674 + cday = va_arg(ap, unsigned int);
48675 + whr = va_arg(ap, int);
48676 + chr = va_arg(ap, int);
48677 + wmin = va_arg(ap, int);
48678 + cmin = va_arg(ap, int);
48679 + wsec = va_arg(ap, int);
48680 + csec = va_arg(ap, int);
48681 + ulong1 = va_arg(ap, unsigned long);
48682 + cred = __task_cred(task);
48683 + pcred = __task_cred(task->real_parent);
48684 +
48685 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48686 + }
48687 + break;
48688 + default:
48689 + gr_log_middle(audit, msg, ap);
48690 + }
48691 + va_end(ap);
48692 + gr_log_end(audit);
48693 + END_LOCKS(audit);
48694 +}
48695 diff -urNp linux-3.0.3/grsecurity/grsec_mem.c linux-3.0.3/grsecurity/grsec_mem.c
48696 --- linux-3.0.3/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48697 +++ linux-3.0.3/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
48698 @@ -0,0 +1,33 @@
48699 +#include <linux/kernel.h>
48700 +#include <linux/sched.h>
48701 +#include <linux/mm.h>
48702 +#include <linux/mman.h>
48703 +#include <linux/grinternal.h>
48704 +
48705 +void
48706 +gr_handle_ioperm(void)
48707 +{
48708 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48709 + return;
48710 +}
48711 +
48712 +void
48713 +gr_handle_iopl(void)
48714 +{
48715 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48716 + return;
48717 +}
48718 +
48719 +void
48720 +gr_handle_mem_readwrite(u64 from, u64 to)
48721 +{
48722 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48723 + return;
48724 +}
48725 +
48726 +void
48727 +gr_handle_vm86(void)
48728 +{
48729 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48730 + return;
48731 +}
48732 diff -urNp linux-3.0.3/grsecurity/grsec_mount.c linux-3.0.3/grsecurity/grsec_mount.c
48733 --- linux-3.0.3/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48734 +++ linux-3.0.3/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
48735 @@ -0,0 +1,62 @@
48736 +#include <linux/kernel.h>
48737 +#include <linux/sched.h>
48738 +#include <linux/mount.h>
48739 +#include <linux/grsecurity.h>
48740 +#include <linux/grinternal.h>
48741 +
48742 +void
48743 +gr_log_remount(const char *devname, const int retval)
48744 +{
48745 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48746 + if (grsec_enable_mount && (retval >= 0))
48747 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48748 +#endif
48749 + return;
48750 +}
48751 +
48752 +void
48753 +gr_log_unmount(const char *devname, const int retval)
48754 +{
48755 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48756 + if (grsec_enable_mount && (retval >= 0))
48757 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48758 +#endif
48759 + return;
48760 +}
48761 +
48762 +void
48763 +gr_log_mount(const char *from, const char *to, const int retval)
48764 +{
48765 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48766 + if (grsec_enable_mount && (retval >= 0))
48767 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48768 +#endif
48769 + return;
48770 +}
48771 +
48772 +int
48773 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48774 +{
48775 +#ifdef CONFIG_GRKERNSEC_ROFS
48776 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48777 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48778 + return -EPERM;
48779 + } else
48780 + return 0;
48781 +#endif
48782 + return 0;
48783 +}
48784 +
48785 +int
48786 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48787 +{
48788 +#ifdef CONFIG_GRKERNSEC_ROFS
48789 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48790 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48791 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48792 + return -EPERM;
48793 + } else
48794 + return 0;
48795 +#endif
48796 + return 0;
48797 +}
48798 diff -urNp linux-3.0.3/grsecurity/grsec_pax.c linux-3.0.3/grsecurity/grsec_pax.c
48799 --- linux-3.0.3/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48800 +++ linux-3.0.3/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
48801 @@ -0,0 +1,36 @@
48802 +#include <linux/kernel.h>
48803 +#include <linux/sched.h>
48804 +#include <linux/mm.h>
48805 +#include <linux/file.h>
48806 +#include <linux/grinternal.h>
48807 +#include <linux/grsecurity.h>
48808 +
48809 +void
48810 +gr_log_textrel(struct vm_area_struct * vma)
48811 +{
48812 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48813 + if (grsec_enable_audit_textrel)
48814 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48815 +#endif
48816 + return;
48817 +}
48818 +
48819 +void
48820 +gr_log_rwxmmap(struct file *file)
48821 +{
48822 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48823 + if (grsec_enable_log_rwxmaps)
48824 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
48825 +#endif
48826 + return;
48827 +}
48828 +
48829 +void
48830 +gr_log_rwxmprotect(struct file *file)
48831 +{
48832 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48833 + if (grsec_enable_log_rwxmaps)
48834 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
48835 +#endif
48836 + return;
48837 +}
48838 diff -urNp linux-3.0.3/grsecurity/grsec_ptrace.c linux-3.0.3/grsecurity/grsec_ptrace.c
48839 --- linux-3.0.3/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
48840 +++ linux-3.0.3/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
48841 @@ -0,0 +1,14 @@
48842 +#include <linux/kernel.h>
48843 +#include <linux/sched.h>
48844 +#include <linux/grinternal.h>
48845 +#include <linux/grsecurity.h>
48846 +
48847 +void
48848 +gr_audit_ptrace(struct task_struct *task)
48849 +{
48850 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48851 + if (grsec_enable_audit_ptrace)
48852 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
48853 +#endif
48854 + return;
48855 +}
48856 diff -urNp linux-3.0.3/grsecurity/grsec_sig.c linux-3.0.3/grsecurity/grsec_sig.c
48857 --- linux-3.0.3/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
48858 +++ linux-3.0.3/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
48859 @@ -0,0 +1,206 @@
48860 +#include <linux/kernel.h>
48861 +#include <linux/sched.h>
48862 +#include <linux/delay.h>
48863 +#include <linux/grsecurity.h>
48864 +#include <linux/grinternal.h>
48865 +#include <linux/hardirq.h>
48866 +
48867 +char *signames[] = {
48868 + [SIGSEGV] = "Segmentation fault",
48869 + [SIGILL] = "Illegal instruction",
48870 + [SIGABRT] = "Abort",
48871 + [SIGBUS] = "Invalid alignment/Bus error"
48872 +};
48873 +
48874 +void
48875 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
48876 +{
48877 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48878 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
48879 + (sig == SIGABRT) || (sig == SIGBUS))) {
48880 + if (t->pid == current->pid) {
48881 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
48882 + } else {
48883 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
48884 + }
48885 + }
48886 +#endif
48887 + return;
48888 +}
48889 +
48890 +int
48891 +gr_handle_signal(const struct task_struct *p, const int sig)
48892 +{
48893 +#ifdef CONFIG_GRKERNSEC
48894 + if (current->pid > 1 && gr_check_protected_task(p)) {
48895 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
48896 + return -EPERM;
48897 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
48898 + return -EPERM;
48899 + }
48900 +#endif
48901 + return 0;
48902 +}
48903 +
48904 +#ifdef CONFIG_GRKERNSEC
48905 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
48906 +
48907 +int gr_fake_force_sig(int sig, struct task_struct *t)
48908 +{
48909 + unsigned long int flags;
48910 + int ret, blocked, ignored;
48911 + struct k_sigaction *action;
48912 +
48913 + spin_lock_irqsave(&t->sighand->siglock, flags);
48914 + action = &t->sighand->action[sig-1];
48915 + ignored = action->sa.sa_handler == SIG_IGN;
48916 + blocked = sigismember(&t->blocked, sig);
48917 + if (blocked || ignored) {
48918 + action->sa.sa_handler = SIG_DFL;
48919 + if (blocked) {
48920 + sigdelset(&t->blocked, sig);
48921 + recalc_sigpending_and_wake(t);
48922 + }
48923 + }
48924 + if (action->sa.sa_handler == SIG_DFL)
48925 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
48926 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
48927 +
48928 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
48929 +
48930 + return ret;
48931 +}
48932 +#endif
48933 +
48934 +#ifdef CONFIG_GRKERNSEC_BRUTE
48935 +#define GR_USER_BAN_TIME (15 * 60)
48936 +
48937 +static int __get_dumpable(unsigned long mm_flags)
48938 +{
48939 + int ret;
48940 +
48941 + ret = mm_flags & MMF_DUMPABLE_MASK;
48942 + return (ret >= 2) ? 2 : ret;
48943 +}
48944 +#endif
48945 +
48946 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
48947 +{
48948 +#ifdef CONFIG_GRKERNSEC_BRUTE
48949 + uid_t uid = 0;
48950 +
48951 + if (!grsec_enable_brute)
48952 + return;
48953 +
48954 + rcu_read_lock();
48955 + read_lock(&tasklist_lock);
48956 + read_lock(&grsec_exec_file_lock);
48957 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
48958 + p->real_parent->brute = 1;
48959 + else {
48960 + const struct cred *cred = __task_cred(p), *cred2;
48961 + struct task_struct *tsk, *tsk2;
48962 +
48963 + if (!__get_dumpable(mm_flags) && cred->uid) {
48964 + struct user_struct *user;
48965 +
48966 + uid = cred->uid;
48967 +
48968 + /* this is put upon execution past expiration */
48969 + user = find_user(uid);
48970 + if (user == NULL)
48971 + goto unlock;
48972 + user->banned = 1;
48973 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
48974 + if (user->ban_expires == ~0UL)
48975 + user->ban_expires--;
48976 +
48977 + do_each_thread(tsk2, tsk) {
48978 + cred2 = __task_cred(tsk);
48979 + if (tsk != p && cred2->uid == uid)
48980 + gr_fake_force_sig(SIGKILL, tsk);
48981 + } while_each_thread(tsk2, tsk);
48982 + }
48983 + }
48984 +unlock:
48985 + read_unlock(&grsec_exec_file_lock);
48986 + read_unlock(&tasklist_lock);
48987 + rcu_read_unlock();
48988 +
48989 + if (uid)
48990 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
48991 +
48992 +#endif
48993 + return;
48994 +}
48995 +
48996 +void gr_handle_brute_check(void)
48997 +{
48998 +#ifdef CONFIG_GRKERNSEC_BRUTE
48999 + if (current->brute)
49000 + msleep(30 * 1000);
49001 +#endif
49002 + return;
49003 +}
49004 +
49005 +void gr_handle_kernel_exploit(void)
49006 +{
49007 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
49008 + const struct cred *cred;
49009 + struct task_struct *tsk, *tsk2;
49010 + struct user_struct *user;
49011 + uid_t uid;
49012 +
49013 + if (in_irq() || in_serving_softirq() || in_nmi())
49014 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
49015 +
49016 + uid = current_uid();
49017 +
49018 + if (uid == 0)
49019 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
49020 + else {
49021 + /* kill all the processes of this user, hold a reference
49022 + to their creds struct, and prevent them from creating
49023 + another process until system reset
49024 + */
49025 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
49026 + /* we intentionally leak this ref */
49027 + user = get_uid(current->cred->user);
49028 + if (user) {
49029 + user->banned = 1;
49030 + user->ban_expires = ~0UL;
49031 + }
49032 +
49033 + read_lock(&tasklist_lock);
49034 + do_each_thread(tsk2, tsk) {
49035 + cred = __task_cred(tsk);
49036 + if (cred->uid == uid)
49037 + gr_fake_force_sig(SIGKILL, tsk);
49038 + } while_each_thread(tsk2, tsk);
49039 + read_unlock(&tasklist_lock);
49040 + }
49041 +#endif
49042 +}
49043 +
49044 +int __gr_process_user_ban(struct user_struct *user)
49045 +{
49046 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49047 + if (unlikely(user->banned)) {
49048 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49049 + user->banned = 0;
49050 + user->ban_expires = 0;
49051 + free_uid(user);
49052 + } else
49053 + return -EPERM;
49054 + }
49055 +#endif
49056 + return 0;
49057 +}
49058 +
49059 +int gr_process_user_ban(void)
49060 +{
49061 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49062 + return __gr_process_user_ban(current->cred->user);
49063 +#endif
49064 + return 0;
49065 +}
49066 diff -urNp linux-3.0.3/grsecurity/grsec_sock.c linux-3.0.3/grsecurity/grsec_sock.c
49067 --- linux-3.0.3/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49068 +++ linux-3.0.3/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
49069 @@ -0,0 +1,244 @@
49070 +#include <linux/kernel.h>
49071 +#include <linux/module.h>
49072 +#include <linux/sched.h>
49073 +#include <linux/file.h>
49074 +#include <linux/net.h>
49075 +#include <linux/in.h>
49076 +#include <linux/ip.h>
49077 +#include <net/sock.h>
49078 +#include <net/inet_sock.h>
49079 +#include <linux/grsecurity.h>
49080 +#include <linux/grinternal.h>
49081 +#include <linux/gracl.h>
49082 +
49083 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49084 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49085 +
49086 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
49087 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
49088 +
49089 +#ifdef CONFIG_UNIX_MODULE
49090 +EXPORT_SYMBOL(gr_acl_handle_unix);
49091 +EXPORT_SYMBOL(gr_acl_handle_mknod);
49092 +EXPORT_SYMBOL(gr_handle_chroot_unix);
49093 +EXPORT_SYMBOL(gr_handle_create);
49094 +#endif
49095 +
49096 +#ifdef CONFIG_GRKERNSEC
49097 +#define gr_conn_table_size 32749
49098 +struct conn_table_entry {
49099 + struct conn_table_entry *next;
49100 + struct signal_struct *sig;
49101 +};
49102 +
49103 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49104 +DEFINE_SPINLOCK(gr_conn_table_lock);
49105 +
49106 +extern const char * gr_socktype_to_name(unsigned char type);
49107 +extern const char * gr_proto_to_name(unsigned char proto);
49108 +extern const char * gr_sockfamily_to_name(unsigned char family);
49109 +
49110 +static __inline__ int
49111 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49112 +{
49113 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49114 +}
49115 +
49116 +static __inline__ int
49117 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49118 + __u16 sport, __u16 dport)
49119 +{
49120 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49121 + sig->gr_sport == sport && sig->gr_dport == dport))
49122 + return 1;
49123 + else
49124 + return 0;
49125 +}
49126 +
49127 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49128 +{
49129 + struct conn_table_entry **match;
49130 + unsigned int index;
49131 +
49132 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49133 + sig->gr_sport, sig->gr_dport,
49134 + gr_conn_table_size);
49135 +
49136 + newent->sig = sig;
49137 +
49138 + match = &gr_conn_table[index];
49139 + newent->next = *match;
49140 + *match = newent;
49141 +
49142 + return;
49143 +}
49144 +
49145 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49146 +{
49147 + struct conn_table_entry *match, *last = NULL;
49148 + unsigned int index;
49149 +
49150 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49151 + sig->gr_sport, sig->gr_dport,
49152 + gr_conn_table_size);
49153 +
49154 + match = gr_conn_table[index];
49155 + while (match && !conn_match(match->sig,
49156 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49157 + sig->gr_dport)) {
49158 + last = match;
49159 + match = match->next;
49160 + }
49161 +
49162 + if (match) {
49163 + if (last)
49164 + last->next = match->next;
49165 + else
49166 + gr_conn_table[index] = NULL;
49167 + kfree(match);
49168 + }
49169 +
49170 + return;
49171 +}
49172 +
49173 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49174 + __u16 sport, __u16 dport)
49175 +{
49176 + struct conn_table_entry *match;
49177 + unsigned int index;
49178 +
49179 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49180 +
49181 + match = gr_conn_table[index];
49182 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49183 + match = match->next;
49184 +
49185 + if (match)
49186 + return match->sig;
49187 + else
49188 + return NULL;
49189 +}
49190 +
49191 +#endif
49192 +
49193 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49194 +{
49195 +#ifdef CONFIG_GRKERNSEC
49196 + struct signal_struct *sig = task->signal;
49197 + struct conn_table_entry *newent;
49198 +
49199 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49200 + if (newent == NULL)
49201 + return;
49202 + /* no bh lock needed since we are called with bh disabled */
49203 + spin_lock(&gr_conn_table_lock);
49204 + gr_del_task_from_ip_table_nolock(sig);
49205 + sig->gr_saddr = inet->inet_rcv_saddr;
49206 + sig->gr_daddr = inet->inet_daddr;
49207 + sig->gr_sport = inet->inet_sport;
49208 + sig->gr_dport = inet->inet_dport;
49209 + gr_add_to_task_ip_table_nolock(sig, newent);
49210 + spin_unlock(&gr_conn_table_lock);
49211 +#endif
49212 + return;
49213 +}
49214 +
49215 +void gr_del_task_from_ip_table(struct task_struct *task)
49216 +{
49217 +#ifdef CONFIG_GRKERNSEC
49218 + spin_lock_bh(&gr_conn_table_lock);
49219 + gr_del_task_from_ip_table_nolock(task->signal);
49220 + spin_unlock_bh(&gr_conn_table_lock);
49221 +#endif
49222 + return;
49223 +}
49224 +
49225 +void
49226 +gr_attach_curr_ip(const struct sock *sk)
49227 +{
49228 +#ifdef CONFIG_GRKERNSEC
49229 + struct signal_struct *p, *set;
49230 + const struct inet_sock *inet = inet_sk(sk);
49231 +
49232 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49233 + return;
49234 +
49235 + set = current->signal;
49236 +
49237 + spin_lock_bh(&gr_conn_table_lock);
49238 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49239 + inet->inet_dport, inet->inet_sport);
49240 + if (unlikely(p != NULL)) {
49241 + set->curr_ip = p->curr_ip;
49242 + set->used_accept = 1;
49243 + gr_del_task_from_ip_table_nolock(p);
49244 + spin_unlock_bh(&gr_conn_table_lock);
49245 + return;
49246 + }
49247 + spin_unlock_bh(&gr_conn_table_lock);
49248 +
49249 + set->curr_ip = inet->inet_daddr;
49250 + set->used_accept = 1;
49251 +#endif
49252 + return;
49253 +}
49254 +
49255 +int
49256 +gr_handle_sock_all(const int family, const int type, const int protocol)
49257 +{
49258 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49259 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49260 + (family != AF_UNIX)) {
49261 + if (family == AF_INET)
49262 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49263 + else
49264 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49265 + return -EACCES;
49266 + }
49267 +#endif
49268 + return 0;
49269 +}
49270 +
49271 +int
49272 +gr_handle_sock_server(const struct sockaddr *sck)
49273 +{
49274 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49275 + if (grsec_enable_socket_server &&
49276 + in_group_p(grsec_socket_server_gid) &&
49277 + sck && (sck->sa_family != AF_UNIX) &&
49278 + (sck->sa_family != AF_LOCAL)) {
49279 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49280 + return -EACCES;
49281 + }
49282 +#endif
49283 + return 0;
49284 +}
49285 +
49286 +int
49287 +gr_handle_sock_server_other(const struct sock *sck)
49288 +{
49289 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49290 + if (grsec_enable_socket_server &&
49291 + in_group_p(grsec_socket_server_gid) &&
49292 + sck && (sck->sk_family != AF_UNIX) &&
49293 + (sck->sk_family != AF_LOCAL)) {
49294 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49295 + return -EACCES;
49296 + }
49297 +#endif
49298 + return 0;
49299 +}
49300 +
49301 +int
49302 +gr_handle_sock_client(const struct sockaddr *sck)
49303 +{
49304 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49305 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49306 + sck && (sck->sa_family != AF_UNIX) &&
49307 + (sck->sa_family != AF_LOCAL)) {
49308 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49309 + return -EACCES;
49310 + }
49311 +#endif
49312 + return 0;
49313 +}
49314 diff -urNp linux-3.0.3/grsecurity/grsec_sysctl.c linux-3.0.3/grsecurity/grsec_sysctl.c
49315 --- linux-3.0.3/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49316 +++ linux-3.0.3/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
49317 @@ -0,0 +1,433 @@
49318 +#include <linux/kernel.h>
49319 +#include <linux/sched.h>
49320 +#include <linux/sysctl.h>
49321 +#include <linux/grsecurity.h>
49322 +#include <linux/grinternal.h>
49323 +
49324 +int
49325 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49326 +{
49327 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49328 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49329 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49330 + return -EACCES;
49331 + }
49332 +#endif
49333 + return 0;
49334 +}
49335 +
49336 +#ifdef CONFIG_GRKERNSEC_ROFS
49337 +static int __maybe_unused one = 1;
49338 +#endif
49339 +
49340 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49341 +struct ctl_table grsecurity_table[] = {
49342 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49343 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49344 +#ifdef CONFIG_GRKERNSEC_IO
49345 + {
49346 + .procname = "disable_priv_io",
49347 + .data = &grsec_disable_privio,
49348 + .maxlen = sizeof(int),
49349 + .mode = 0600,
49350 + .proc_handler = &proc_dointvec,
49351 + },
49352 +#endif
49353 +#endif
49354 +#ifdef CONFIG_GRKERNSEC_LINK
49355 + {
49356 + .procname = "linking_restrictions",
49357 + .data = &grsec_enable_link,
49358 + .maxlen = sizeof(int),
49359 + .mode = 0600,
49360 + .proc_handler = &proc_dointvec,
49361 + },
49362 +#endif
49363 +#ifdef CONFIG_GRKERNSEC_BRUTE
49364 + {
49365 + .procname = "deter_bruteforce",
49366 + .data = &grsec_enable_brute,
49367 + .maxlen = sizeof(int),
49368 + .mode = 0600,
49369 + .proc_handler = &proc_dointvec,
49370 + },
49371 +#endif
49372 +#ifdef CONFIG_GRKERNSEC_FIFO
49373 + {
49374 + .procname = "fifo_restrictions",
49375 + .data = &grsec_enable_fifo,
49376 + .maxlen = sizeof(int),
49377 + .mode = 0600,
49378 + .proc_handler = &proc_dointvec,
49379 + },
49380 +#endif
49381 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49382 + {
49383 + .procname = "ip_blackhole",
49384 + .data = &grsec_enable_blackhole,
49385 + .maxlen = sizeof(int),
49386 + .mode = 0600,
49387 + .proc_handler = &proc_dointvec,
49388 + },
49389 + {
49390 + .procname = "lastack_retries",
49391 + .data = &grsec_lastack_retries,
49392 + .maxlen = sizeof(int),
49393 + .mode = 0600,
49394 + .proc_handler = &proc_dointvec,
49395 + },
49396 +#endif
49397 +#ifdef CONFIG_GRKERNSEC_EXECLOG
49398 + {
49399 + .procname = "exec_logging",
49400 + .data = &grsec_enable_execlog,
49401 + .maxlen = sizeof(int),
49402 + .mode = 0600,
49403 + .proc_handler = &proc_dointvec,
49404 + },
49405 +#endif
49406 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49407 + {
49408 + .procname = "rwxmap_logging",
49409 + .data = &grsec_enable_log_rwxmaps,
49410 + .maxlen = sizeof(int),
49411 + .mode = 0600,
49412 + .proc_handler = &proc_dointvec,
49413 + },
49414 +#endif
49415 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49416 + {
49417 + .procname = "signal_logging",
49418 + .data = &grsec_enable_signal,
49419 + .maxlen = sizeof(int),
49420 + .mode = 0600,
49421 + .proc_handler = &proc_dointvec,
49422 + },
49423 +#endif
49424 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
49425 + {
49426 + .procname = "forkfail_logging",
49427 + .data = &grsec_enable_forkfail,
49428 + .maxlen = sizeof(int),
49429 + .mode = 0600,
49430 + .proc_handler = &proc_dointvec,
49431 + },
49432 +#endif
49433 +#ifdef CONFIG_GRKERNSEC_TIME
49434 + {
49435 + .procname = "timechange_logging",
49436 + .data = &grsec_enable_time,
49437 + .maxlen = sizeof(int),
49438 + .mode = 0600,
49439 + .proc_handler = &proc_dointvec,
49440 + },
49441 +#endif
49442 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49443 + {
49444 + .procname = "chroot_deny_shmat",
49445 + .data = &grsec_enable_chroot_shmat,
49446 + .maxlen = sizeof(int),
49447 + .mode = 0600,
49448 + .proc_handler = &proc_dointvec,
49449 + },
49450 +#endif
49451 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49452 + {
49453 + .procname = "chroot_deny_unix",
49454 + .data = &grsec_enable_chroot_unix,
49455 + .maxlen = sizeof(int),
49456 + .mode = 0600,
49457 + .proc_handler = &proc_dointvec,
49458 + },
49459 +#endif
49460 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49461 + {
49462 + .procname = "chroot_deny_mount",
49463 + .data = &grsec_enable_chroot_mount,
49464 + .maxlen = sizeof(int),
49465 + .mode = 0600,
49466 + .proc_handler = &proc_dointvec,
49467 + },
49468 +#endif
49469 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49470 + {
49471 + .procname = "chroot_deny_fchdir",
49472 + .data = &grsec_enable_chroot_fchdir,
49473 + .maxlen = sizeof(int),
49474 + .mode = 0600,
49475 + .proc_handler = &proc_dointvec,
49476 + },
49477 +#endif
49478 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49479 + {
49480 + .procname = "chroot_deny_chroot",
49481 + .data = &grsec_enable_chroot_double,
49482 + .maxlen = sizeof(int),
49483 + .mode = 0600,
49484 + .proc_handler = &proc_dointvec,
49485 + },
49486 +#endif
49487 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49488 + {
49489 + .procname = "chroot_deny_pivot",
49490 + .data = &grsec_enable_chroot_pivot,
49491 + .maxlen = sizeof(int),
49492 + .mode = 0600,
49493 + .proc_handler = &proc_dointvec,
49494 + },
49495 +#endif
49496 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49497 + {
49498 + .procname = "chroot_enforce_chdir",
49499 + .data = &grsec_enable_chroot_chdir,
49500 + .maxlen = sizeof(int),
49501 + .mode = 0600,
49502 + .proc_handler = &proc_dointvec,
49503 + },
49504 +#endif
49505 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49506 + {
49507 + .procname = "chroot_deny_chmod",
49508 + .data = &grsec_enable_chroot_chmod,
49509 + .maxlen = sizeof(int),
49510 + .mode = 0600,
49511 + .proc_handler = &proc_dointvec,
49512 + },
49513 +#endif
49514 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49515 + {
49516 + .procname = "chroot_deny_mknod",
49517 + .data = &grsec_enable_chroot_mknod,
49518 + .maxlen = sizeof(int),
49519 + .mode = 0600,
49520 + .proc_handler = &proc_dointvec,
49521 + },
49522 +#endif
49523 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49524 + {
49525 + .procname = "chroot_restrict_nice",
49526 + .data = &grsec_enable_chroot_nice,
49527 + .maxlen = sizeof(int),
49528 + .mode = 0600,
49529 + .proc_handler = &proc_dointvec,
49530 + },
49531 +#endif
49532 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49533 + {
49534 + .procname = "chroot_execlog",
49535 + .data = &grsec_enable_chroot_execlog,
49536 + .maxlen = sizeof(int),
49537 + .mode = 0600,
49538 + .proc_handler = &proc_dointvec,
49539 + },
49540 +#endif
49541 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49542 + {
49543 + .procname = "chroot_caps",
49544 + .data = &grsec_enable_chroot_caps,
49545 + .maxlen = sizeof(int),
49546 + .mode = 0600,
49547 + .proc_handler = &proc_dointvec,
49548 + },
49549 +#endif
49550 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49551 + {
49552 + .procname = "chroot_deny_sysctl",
49553 + .data = &grsec_enable_chroot_sysctl,
49554 + .maxlen = sizeof(int),
49555 + .mode = 0600,
49556 + .proc_handler = &proc_dointvec,
49557 + },
49558 +#endif
49559 +#ifdef CONFIG_GRKERNSEC_TPE
49560 + {
49561 + .procname = "tpe",
49562 + .data = &grsec_enable_tpe,
49563 + .maxlen = sizeof(int),
49564 + .mode = 0600,
49565 + .proc_handler = &proc_dointvec,
49566 + },
49567 + {
49568 + .procname = "tpe_gid",
49569 + .data = &grsec_tpe_gid,
49570 + .maxlen = sizeof(int),
49571 + .mode = 0600,
49572 + .proc_handler = &proc_dointvec,
49573 + },
49574 +#endif
49575 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49576 + {
49577 + .procname = "tpe_invert",
49578 + .data = &grsec_enable_tpe_invert,
49579 + .maxlen = sizeof(int),
49580 + .mode = 0600,
49581 + .proc_handler = &proc_dointvec,
49582 + },
49583 +#endif
49584 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49585 + {
49586 + .procname = "tpe_restrict_all",
49587 + .data = &grsec_enable_tpe_all,
49588 + .maxlen = sizeof(int),
49589 + .mode = 0600,
49590 + .proc_handler = &proc_dointvec,
49591 + },
49592 +#endif
49593 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49594 + {
49595 + .procname = "socket_all",
49596 + .data = &grsec_enable_socket_all,
49597 + .maxlen = sizeof(int),
49598 + .mode = 0600,
49599 + .proc_handler = &proc_dointvec,
49600 + },
49601 + {
49602 + .procname = "socket_all_gid",
49603 + .data = &grsec_socket_all_gid,
49604 + .maxlen = sizeof(int),
49605 + .mode = 0600,
49606 + .proc_handler = &proc_dointvec,
49607 + },
49608 +#endif
49609 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49610 + {
49611 + .procname = "socket_client",
49612 + .data = &grsec_enable_socket_client,
49613 + .maxlen = sizeof(int),
49614 + .mode = 0600,
49615 + .proc_handler = &proc_dointvec,
49616 + },
49617 + {
49618 + .procname = "socket_client_gid",
49619 + .data = &grsec_socket_client_gid,
49620 + .maxlen = sizeof(int),
49621 + .mode = 0600,
49622 + .proc_handler = &proc_dointvec,
49623 + },
49624 +#endif
49625 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49626 + {
49627 + .procname = "socket_server",
49628 + .data = &grsec_enable_socket_server,
49629 + .maxlen = sizeof(int),
49630 + .mode = 0600,
49631 + .proc_handler = &proc_dointvec,
49632 + },
49633 + {
49634 + .procname = "socket_server_gid",
49635 + .data = &grsec_socket_server_gid,
49636 + .maxlen = sizeof(int),
49637 + .mode = 0600,
49638 + .proc_handler = &proc_dointvec,
49639 + },
49640 +#endif
49641 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49642 + {
49643 + .procname = "audit_group",
49644 + .data = &grsec_enable_group,
49645 + .maxlen = sizeof(int),
49646 + .mode = 0600,
49647 + .proc_handler = &proc_dointvec,
49648 + },
49649 + {
49650 + .procname = "audit_gid",
49651 + .data = &grsec_audit_gid,
49652 + .maxlen = sizeof(int),
49653 + .mode = 0600,
49654 + .proc_handler = &proc_dointvec,
49655 + },
49656 +#endif
49657 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49658 + {
49659 + .procname = "audit_chdir",
49660 + .data = &grsec_enable_chdir,
49661 + .maxlen = sizeof(int),
49662 + .mode = 0600,
49663 + .proc_handler = &proc_dointvec,
49664 + },
49665 +#endif
49666 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49667 + {
49668 + .procname = "audit_mount",
49669 + .data = &grsec_enable_mount,
49670 + .maxlen = sizeof(int),
49671 + .mode = 0600,
49672 + .proc_handler = &proc_dointvec,
49673 + },
49674 +#endif
49675 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49676 + {
49677 + .procname = "audit_textrel",
49678 + .data = &grsec_enable_audit_textrel,
49679 + .maxlen = sizeof(int),
49680 + .mode = 0600,
49681 + .proc_handler = &proc_dointvec,
49682 + },
49683 +#endif
49684 +#ifdef CONFIG_GRKERNSEC_DMESG
49685 + {
49686 + .procname = "dmesg",
49687 + .data = &grsec_enable_dmesg,
49688 + .maxlen = sizeof(int),
49689 + .mode = 0600,
49690 + .proc_handler = &proc_dointvec,
49691 + },
49692 +#endif
49693 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49694 + {
49695 + .procname = "chroot_findtask",
49696 + .data = &grsec_enable_chroot_findtask,
49697 + .maxlen = sizeof(int),
49698 + .mode = 0600,
49699 + .proc_handler = &proc_dointvec,
49700 + },
49701 +#endif
49702 +#ifdef CONFIG_GRKERNSEC_RESLOG
49703 + {
49704 + .procname = "resource_logging",
49705 + .data = &grsec_resource_logging,
49706 + .maxlen = sizeof(int),
49707 + .mode = 0600,
49708 + .proc_handler = &proc_dointvec,
49709 + },
49710 +#endif
49711 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49712 + {
49713 + .procname = "audit_ptrace",
49714 + .data = &grsec_enable_audit_ptrace,
49715 + .maxlen = sizeof(int),
49716 + .mode = 0600,
49717 + .proc_handler = &proc_dointvec,
49718 + },
49719 +#endif
49720 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49721 + {
49722 + .procname = "harden_ptrace",
49723 + .data = &grsec_enable_harden_ptrace,
49724 + .maxlen = sizeof(int),
49725 + .mode = 0600,
49726 + .proc_handler = &proc_dointvec,
49727 + },
49728 +#endif
49729 + {
49730 + .procname = "grsec_lock",
49731 + .data = &grsec_lock,
49732 + .maxlen = sizeof(int),
49733 + .mode = 0600,
49734 + .proc_handler = &proc_dointvec,
49735 + },
49736 +#endif
49737 +#ifdef CONFIG_GRKERNSEC_ROFS
49738 + {
49739 + .procname = "romount_protect",
49740 + .data = &grsec_enable_rofs,
49741 + .maxlen = sizeof(int),
49742 + .mode = 0600,
49743 + .proc_handler = &proc_dointvec_minmax,
49744 + .extra1 = &one,
49745 + .extra2 = &one,
49746 + },
49747 +#endif
49748 + { }
49749 +};
49750 +#endif
49751 diff -urNp linux-3.0.3/grsecurity/grsec_time.c linux-3.0.3/grsecurity/grsec_time.c
49752 --- linux-3.0.3/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49753 +++ linux-3.0.3/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
49754 @@ -0,0 +1,16 @@
49755 +#include <linux/kernel.h>
49756 +#include <linux/sched.h>
49757 +#include <linux/grinternal.h>
49758 +#include <linux/module.h>
49759 +
49760 +void
49761 +gr_log_timechange(void)
49762 +{
49763 +#ifdef CONFIG_GRKERNSEC_TIME
49764 + if (grsec_enable_time)
49765 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49766 +#endif
49767 + return;
49768 +}
49769 +
49770 +EXPORT_SYMBOL(gr_log_timechange);
49771 diff -urNp linux-3.0.3/grsecurity/grsec_tpe.c linux-3.0.3/grsecurity/grsec_tpe.c
49772 --- linux-3.0.3/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49773 +++ linux-3.0.3/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
49774 @@ -0,0 +1,39 @@
49775 +#include <linux/kernel.h>
49776 +#include <linux/sched.h>
49777 +#include <linux/file.h>
49778 +#include <linux/fs.h>
49779 +#include <linux/grinternal.h>
49780 +
49781 +extern int gr_acl_tpe_check(void);
49782 +
49783 +int
49784 +gr_tpe_allow(const struct file *file)
49785 +{
49786 +#ifdef CONFIG_GRKERNSEC
49787 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49788 + const struct cred *cred = current_cred();
49789 +
49790 + if (cred->uid && ((grsec_enable_tpe &&
49791 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49792 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49793 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49794 +#else
49795 + in_group_p(grsec_tpe_gid)
49796 +#endif
49797 + ) || gr_acl_tpe_check()) &&
49798 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49799 + (inode->i_mode & S_IWOTH))))) {
49800 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49801 + return 0;
49802 + }
49803 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49804 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49805 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49806 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49807 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49808 + return 0;
49809 + }
49810 +#endif
49811 +#endif
49812 + return 1;
49813 +}
49814 diff -urNp linux-3.0.3/grsecurity/grsum.c linux-3.0.3/grsecurity/grsum.c
49815 --- linux-3.0.3/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
49816 +++ linux-3.0.3/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
49817 @@ -0,0 +1,61 @@
49818 +#include <linux/err.h>
49819 +#include <linux/kernel.h>
49820 +#include <linux/sched.h>
49821 +#include <linux/mm.h>
49822 +#include <linux/scatterlist.h>
49823 +#include <linux/crypto.h>
49824 +#include <linux/gracl.h>
49825 +
49826 +
49827 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
49828 +#error "crypto and sha256 must be built into the kernel"
49829 +#endif
49830 +
49831 +int
49832 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
49833 +{
49834 + char *p;
49835 + struct crypto_hash *tfm;
49836 + struct hash_desc desc;
49837 + struct scatterlist sg;
49838 + unsigned char temp_sum[GR_SHA_LEN];
49839 + volatile int retval = 0;
49840 + volatile int dummy = 0;
49841 + unsigned int i;
49842 +
49843 + sg_init_table(&sg, 1);
49844 +
49845 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
49846 + if (IS_ERR(tfm)) {
49847 + /* should never happen, since sha256 should be built in */
49848 + return 1;
49849 + }
49850 +
49851 + desc.tfm = tfm;
49852 + desc.flags = 0;
49853 +
49854 + crypto_hash_init(&desc);
49855 +
49856 + p = salt;
49857 + sg_set_buf(&sg, p, GR_SALT_LEN);
49858 + crypto_hash_update(&desc, &sg, sg.length);
49859 +
49860 + p = entry->pw;
49861 + sg_set_buf(&sg, p, strlen(p));
49862 +
49863 + crypto_hash_update(&desc, &sg, sg.length);
49864 +
49865 + crypto_hash_final(&desc, temp_sum);
49866 +
49867 + memset(entry->pw, 0, GR_PW_LEN);
49868 +
49869 + for (i = 0; i < GR_SHA_LEN; i++)
49870 + if (sum[i] != temp_sum[i])
49871 + retval = 1;
49872 + else
49873 + dummy = 1; // waste a cycle
49874 +
49875 + crypto_free_hash(tfm);
49876 +
49877 + return retval;
49878 +}
49879 diff -urNp linux-3.0.3/grsecurity/Kconfig linux-3.0.3/grsecurity/Kconfig
49880 --- linux-3.0.3/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
49881 +++ linux-3.0.3/grsecurity/Kconfig 2011-08-25 17:25:34.000000000 -0400
49882 @@ -0,0 +1,1038 @@
49883 +#
49884 +# grecurity configuration
49885 +#
49886 +
49887 +menu "Grsecurity"
49888 +
49889 +config GRKERNSEC
49890 + bool "Grsecurity"
49891 + select CRYPTO
49892 + select CRYPTO_SHA256
49893 + help
49894 + If you say Y here, you will be able to configure many features
49895 + that will enhance the security of your system. It is highly
49896 + recommended that you say Y here and read through the help
49897 + for each option so that you fully understand the features and
49898 + can evaluate their usefulness for your machine.
49899 +
49900 +choice
49901 + prompt "Security Level"
49902 + depends on GRKERNSEC
49903 + default GRKERNSEC_CUSTOM
49904 +
49905 +config GRKERNSEC_LOW
49906 + bool "Low"
49907 + select GRKERNSEC_LINK
49908 + select GRKERNSEC_FIFO
49909 + select GRKERNSEC_RANDNET
49910 + select GRKERNSEC_DMESG
49911 + select GRKERNSEC_CHROOT
49912 + select GRKERNSEC_CHROOT_CHDIR
49913 +
49914 + help
49915 + If you choose this option, several of the grsecurity options will
49916 + be enabled that will give you greater protection against a number
49917 + of attacks, while assuring that none of your software will have any
49918 + conflicts with the additional security measures. If you run a lot
49919 + of unusual software, or you are having problems with the higher
49920 + security levels, you should say Y here. With this option, the
49921 + following features are enabled:
49922 +
49923 + - Linking restrictions
49924 + - FIFO restrictions
49925 + - Restricted dmesg
49926 + - Enforced chdir("/") on chroot
49927 + - Runtime module disabling
49928 +
49929 +config GRKERNSEC_MEDIUM
49930 + bool "Medium"
49931 + select PAX
49932 + select PAX_EI_PAX
49933 + select PAX_PT_PAX_FLAGS
49934 + select PAX_HAVE_ACL_FLAGS
49935 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49936 + select GRKERNSEC_CHROOT
49937 + select GRKERNSEC_CHROOT_SYSCTL
49938 + select GRKERNSEC_LINK
49939 + select GRKERNSEC_FIFO
49940 + select GRKERNSEC_DMESG
49941 + select GRKERNSEC_RANDNET
49942 + select GRKERNSEC_FORKFAIL
49943 + select GRKERNSEC_TIME
49944 + select GRKERNSEC_SIGNAL
49945 + select GRKERNSEC_CHROOT
49946 + select GRKERNSEC_CHROOT_UNIX
49947 + select GRKERNSEC_CHROOT_MOUNT
49948 + select GRKERNSEC_CHROOT_PIVOT
49949 + select GRKERNSEC_CHROOT_DOUBLE
49950 + select GRKERNSEC_CHROOT_CHDIR
49951 + select GRKERNSEC_CHROOT_MKNOD
49952 + select GRKERNSEC_PROC
49953 + select GRKERNSEC_PROC_USERGROUP
49954 + select PAX_RANDUSTACK
49955 + select PAX_ASLR
49956 + select PAX_RANDMMAP
49957 + select PAX_REFCOUNT if (X86 || SPARC64)
49958 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49959 +
49960 + help
49961 + If you say Y here, several features in addition to those included
49962 + in the low additional security level will be enabled. These
49963 + features provide even more security to your system, though in rare
49964 + cases they may be incompatible with very old or poorly written
49965 + software. If you enable this option, make sure that your auth
49966 + service (identd) is running as gid 1001. With this option,
49967 + the following features (in addition to those provided in the
49968 + low additional security level) will be enabled:
49969 +
49970 + - Failed fork logging
49971 + - Time change logging
49972 + - Signal logging
49973 + - Deny mounts in chroot
49974 + - Deny double chrooting
49975 + - Deny sysctl writes in chroot
49976 + - Deny mknod in chroot
49977 + - Deny access to abstract AF_UNIX sockets out of chroot
49978 + - Deny pivot_root in chroot
49979 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
49980 + - /proc restrictions with special GID set to 10 (usually wheel)
49981 + - Address Space Layout Randomization (ASLR)
49982 + - Prevent exploitation of most refcount overflows
49983 + - Bounds checking of copying between the kernel and userland
49984 +
49985 +config GRKERNSEC_HIGH
49986 + bool "High"
49987 + select GRKERNSEC_LINK
49988 + select GRKERNSEC_FIFO
49989 + select GRKERNSEC_DMESG
49990 + select GRKERNSEC_FORKFAIL
49991 + select GRKERNSEC_TIME
49992 + select GRKERNSEC_SIGNAL
49993 + select GRKERNSEC_CHROOT
49994 + select GRKERNSEC_CHROOT_SHMAT
49995 + select GRKERNSEC_CHROOT_UNIX
49996 + select GRKERNSEC_CHROOT_MOUNT
49997 + select GRKERNSEC_CHROOT_FCHDIR
49998 + select GRKERNSEC_CHROOT_PIVOT
49999 + select GRKERNSEC_CHROOT_DOUBLE
50000 + select GRKERNSEC_CHROOT_CHDIR
50001 + select GRKERNSEC_CHROOT_MKNOD
50002 + select GRKERNSEC_CHROOT_CAPS
50003 + select GRKERNSEC_CHROOT_SYSCTL
50004 + select GRKERNSEC_CHROOT_FINDTASK
50005 + select GRKERNSEC_SYSFS_RESTRICT
50006 + select GRKERNSEC_PROC
50007 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50008 + select GRKERNSEC_HIDESYM
50009 + select GRKERNSEC_BRUTE
50010 + select GRKERNSEC_PROC_USERGROUP
50011 + select GRKERNSEC_KMEM
50012 + select GRKERNSEC_RESLOG
50013 + select GRKERNSEC_RANDNET
50014 + select GRKERNSEC_PROC_ADD
50015 + select GRKERNSEC_CHROOT_CHMOD
50016 + select GRKERNSEC_CHROOT_NICE
50017 + select GRKERNSEC_AUDIT_MOUNT
50018 + select GRKERNSEC_MODHARDEN if (MODULES)
50019 + select GRKERNSEC_HARDEN_PTRACE
50020 + select GRKERNSEC_VM86 if (X86_32)
50021 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50022 + select PAX
50023 + select PAX_RANDUSTACK
50024 + select PAX_ASLR
50025 + select PAX_RANDMMAP
50026 + select PAX_NOEXEC
50027 + select PAX_MPROTECT
50028 + select PAX_EI_PAX
50029 + select PAX_PT_PAX_FLAGS
50030 + select PAX_HAVE_ACL_FLAGS
50031 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50032 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
50033 + select PAX_RANDKSTACK if (X86_TSC && X86)
50034 + select PAX_SEGMEXEC if (X86_32)
50035 + select PAX_PAGEEXEC
50036 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50037 + select PAX_EMUTRAMP if (PARISC)
50038 + select PAX_EMUSIGRT if (PARISC)
50039 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50040 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50041 + select PAX_REFCOUNT if (X86 || SPARC64)
50042 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50043 + help
50044 + If you say Y here, many of the features of grsecurity will be
50045 + enabled, which will protect you against many kinds of attacks
50046 + against your system. The heightened security comes at a cost
50047 + of an increased chance of incompatibilities with rare software
50048 + on your machine. Since this security level enables PaX, you should
50049 + view <http://pax.grsecurity.net> and read about the PaX
50050 + project. While you are there, download chpax and run it on
50051 + binaries that cause problems with PaX. Also remember that
50052 + since the /proc restrictions are enabled, you must run your
50053 + identd as gid 1001. This security level enables the following
50054 + features in addition to those listed in the low and medium
50055 + security levels:
50056 +
50057 + - Additional /proc restrictions
50058 + - Chmod restrictions in chroot
50059 + - No signals, ptrace, or viewing of processes outside of chroot
50060 + - Capability restrictions in chroot
50061 + - Deny fchdir out of chroot
50062 + - Priority restrictions in chroot
50063 + - Segmentation-based implementation of PaX
50064 + - Mprotect restrictions
50065 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50066 + - Kernel stack randomization
50067 + - Mount/unmount/remount logging
50068 + - Kernel symbol hiding
50069 + - Prevention of memory exhaustion-based exploits
50070 + - Hardening of module auto-loading
50071 + - Ptrace restrictions
50072 + - Restricted vm86 mode
50073 + - Restricted sysfs/debugfs
50074 + - Active kernel exploit response
50075 +
50076 +config GRKERNSEC_CUSTOM
50077 + bool "Custom"
50078 + help
50079 + If you say Y here, you will be able to configure every grsecurity
50080 + option, which allows you to enable many more features that aren't
50081 + covered in the basic security levels. These additional features
50082 + include TPE, socket restrictions, and the sysctl system for
50083 + grsecurity. It is advised that you read through the help for
50084 + each option to determine its usefulness in your situation.
50085 +
50086 +endchoice
50087 +
50088 +menu "Address Space Protection"
50089 +depends on GRKERNSEC
50090 +
50091 +config GRKERNSEC_KMEM
50092 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50093 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50094 + help
50095 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50096 + be written to via mmap or otherwise to modify the running kernel.
50097 + /dev/port will also not be allowed to be opened. If you have module
50098 + support disabled, enabling this will close up four ways that are
50099 + currently used to insert malicious code into the running kernel.
50100 + Even with all these features enabled, we still highly recommend that
50101 + you use the RBAC system, as it is still possible for an attacker to
50102 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50103 + If you are not using XFree86, you may be able to stop this additional
50104 + case by enabling the 'Disable privileged I/O' option. Though nothing
50105 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50106 + but only to video memory, which is the only writing we allow in this
50107 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50108 + not be allowed to mprotect it with PROT_WRITE later.
50109 + It is highly recommended that you say Y here if you meet all the
50110 + conditions above.
50111 +
50112 +config GRKERNSEC_VM86
50113 + bool "Restrict VM86 mode"
50114 + depends on X86_32
50115 +
50116 + help
50117 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50118 + make use of a special execution mode on 32bit x86 processors called
50119 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50120 + video cards and will still work with this option enabled. The purpose
50121 + of the option is to prevent exploitation of emulation errors in
50122 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50123 + Nearly all users should be able to enable this option.
50124 +
50125 +config GRKERNSEC_IO
50126 + bool "Disable privileged I/O"
50127 + depends on X86
50128 + select RTC_CLASS
50129 + select RTC_INTF_DEV
50130 + select RTC_DRV_CMOS
50131 +
50132 + help
50133 + If you say Y here, all ioperm and iopl calls will return an error.
50134 + Ioperm and iopl can be used to modify the running kernel.
50135 + Unfortunately, some programs need this access to operate properly,
50136 + the most notable of which are XFree86 and hwclock. hwclock can be
50137 + remedied by having RTC support in the kernel, so real-time
50138 + clock support is enabled if this option is enabled, to ensure
50139 + that hwclock operates correctly. XFree86 still will not
50140 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50141 + IF YOU USE XFree86. If you use XFree86 and you still want to
50142 + protect your kernel against modification, use the RBAC system.
50143 +
50144 +config GRKERNSEC_PROC_MEMMAP
50145 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50146 + default y if (PAX_NOEXEC || PAX_ASLR)
50147 + depends on PAX_NOEXEC || PAX_ASLR
50148 + help
50149 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50150 + give no information about the addresses of its mappings if
50151 + PaX features that rely on random addresses are enabled on the task.
50152 + If you use PaX it is greatly recommended that you say Y here as it
50153 + closes up a hole that makes the full ASLR useless for suid
50154 + binaries.
50155 +
50156 +config GRKERNSEC_BRUTE
50157 + bool "Deter exploit bruteforcing"
50158 + help
50159 + If you say Y here, attempts to bruteforce exploits against forking
50160 + daemons such as apache or sshd, as well as against suid/sgid binaries
50161 + will be deterred. When a child of a forking daemon is killed by PaX
50162 + or crashes due to an illegal instruction or other suspicious signal,
50163 + the parent process will be delayed 30 seconds upon every subsequent
50164 + fork until the administrator is able to assess the situation and
50165 + restart the daemon.
50166 + In the suid/sgid case, the attempt is logged, the user has all their
50167 + processes terminated, and they are prevented from executing any further
50168 + processes for 15 minutes.
50169 + It is recommended that you also enable signal logging in the auditing
50170 + section so that logs are generated when a process triggers a suspicious
50171 + signal.
50172 + If the sysctl option is enabled, a sysctl option with name
50173 + "deter_bruteforce" is created.
50174 +
50175 +
50176 +config GRKERNSEC_MODHARDEN
50177 + bool "Harden module auto-loading"
50178 + depends on MODULES
50179 + help
50180 + If you say Y here, module auto-loading in response to use of some
50181 + feature implemented by an unloaded module will be restricted to
50182 + root users. Enabling this option helps defend against attacks
50183 + by unprivileged users who abuse the auto-loading behavior to
50184 + cause a vulnerable module to load that is then exploited.
50185 +
50186 + If this option prevents a legitimate use of auto-loading for a
50187 + non-root user, the administrator can execute modprobe manually
50188 + with the exact name of the module mentioned in the alert log.
50189 + Alternatively, the administrator can add the module to the list
50190 + of modules loaded at boot by modifying init scripts.
50191 +
50192 + Modification of init scripts will most likely be needed on
50193 + Ubuntu servers with encrypted home directory support enabled,
50194 + as the first non-root user logging in will cause the ecb(aes),
50195 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50196 +
50197 +config GRKERNSEC_HIDESYM
50198 + bool "Hide kernel symbols"
50199 + help
50200 + If you say Y here, getting information on loaded modules, and
50201 + displaying all kernel symbols through a syscall will be restricted
50202 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50203 + /proc/kallsyms will be restricted to the root user. The RBAC
50204 + system can hide that entry even from root.
50205 +
50206 + This option also prevents leaking of kernel addresses through
50207 + several /proc entries.
50208 +
50209 + Note that this option is only effective provided the following
50210 + conditions are met:
50211 + 1) The kernel using grsecurity is not precompiled by some distribution
50212 + 2) You have also enabled GRKERNSEC_DMESG
50213 + 3) You are using the RBAC system and hiding other files such as your
50214 + kernel image and System.map. Alternatively, enabling this option
50215 + causes the permissions on /boot, /lib/modules, and the kernel
50216 + source directory to change at compile time to prevent
50217 + reading by non-root users.
50218 + If the above conditions are met, this option will aid in providing a
50219 + useful protection against local kernel exploitation of overflows
50220 + and arbitrary read/write vulnerabilities.
50221 +
50222 +config GRKERNSEC_KERN_LOCKOUT
50223 + bool "Active kernel exploit response"
50224 + depends on X86 || ARM || PPC || SPARC
50225 + help
50226 + If you say Y here, when a PaX alert is triggered due to suspicious
50227 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50228 + or an OOPs occurs due to bad memory accesses, instead of just
50229 + terminating the offending process (and potentially allowing
50230 + a subsequent exploit from the same user), we will take one of two
50231 + actions:
50232 + If the user was root, we will panic the system
50233 + If the user was non-root, we will log the attempt, terminate
50234 + all processes owned by the user, then prevent them from creating
50235 + any new processes until the system is restarted
50236 + This deters repeated kernel exploitation/bruteforcing attempts
50237 + and is useful for later forensics.
50238 +
50239 +endmenu
50240 +menu "Role Based Access Control Options"
50241 +depends on GRKERNSEC
50242 +
50243 +config GRKERNSEC_RBAC_DEBUG
50244 + bool
50245 +
50246 +config GRKERNSEC_NO_RBAC
50247 + bool "Disable RBAC system"
50248 + help
50249 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50250 + preventing the RBAC system from being enabled. You should only say Y
50251 + here if you have no intention of using the RBAC system, so as to prevent
50252 + an attacker with root access from misusing the RBAC system to hide files
50253 + and processes when loadable module support and /dev/[k]mem have been
50254 + locked down.
50255 +
50256 +config GRKERNSEC_ACL_HIDEKERN
50257 + bool "Hide kernel processes"
50258 + help
50259 + If you say Y here, all kernel threads will be hidden to all
50260 + processes but those whose subject has the "view hidden processes"
50261 + flag.
50262 +
50263 +config GRKERNSEC_ACL_MAXTRIES
50264 + int "Maximum tries before password lockout"
50265 + default 3
50266 + help
50267 + This option enforces the maximum number of times a user can attempt
50268 + to authorize themselves with the grsecurity RBAC system before being
50269 + denied the ability to attempt authorization again for a specified time.
50270 + The lower the number, the harder it will be to brute-force a password.
50271 +
50272 +config GRKERNSEC_ACL_TIMEOUT
50273 + int "Time to wait after max password tries, in seconds"
50274 + default 30
50275 + help
50276 + This option specifies the time the user must wait after attempting to
50277 + authorize to the RBAC system with the maximum number of invalid
50278 + passwords. The higher the number, the harder it will be to brute-force
50279 + a password.
50280 +
50281 +endmenu
50282 +menu "Filesystem Protections"
50283 +depends on GRKERNSEC
50284 +
50285 +config GRKERNSEC_PROC
50286 + bool "Proc restrictions"
50287 + help
50288 + If you say Y here, the permissions of the /proc filesystem
50289 + will be altered to enhance system security and privacy. You MUST
50290 + choose either a user only restriction or a user and group restriction.
50291 + Depending upon the option you choose, you can either restrict users to
50292 + see only the processes they themselves run, or choose a group that can
50293 + view all processes and files normally restricted to root if you choose
50294 + the "restrict to user only" option. NOTE: If you're running identd as
50295 + a non-root user, you will have to run it as the group you specify here.
50296 +
50297 +config GRKERNSEC_PROC_USER
50298 + bool "Restrict /proc to user only"
50299 + depends on GRKERNSEC_PROC
50300 + help
50301 + If you say Y here, non-root users will only be able to view their own
50302 + processes, and restricts them from viewing network-related information,
50303 + and viewing kernel symbol and module information.
50304 +
50305 +config GRKERNSEC_PROC_USERGROUP
50306 + bool "Allow special group"
50307 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50308 + help
50309 + If you say Y here, you will be able to select a group that will be
50310 + able to view all processes and network-related information. If you've
50311 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50312 + remain hidden. This option is useful if you want to run identd as
50313 + a non-root user.
50314 +
50315 +config GRKERNSEC_PROC_GID
50316 + int "GID for special group"
50317 + depends on GRKERNSEC_PROC_USERGROUP
50318 + default 1001
50319 +
50320 +config GRKERNSEC_PROC_ADD
50321 + bool "Additional restrictions"
50322 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50323 + help
50324 + If you say Y here, additional restrictions will be placed on
50325 + /proc that keep normal users from viewing device information and
50326 + slabinfo information that could be useful for exploits.
50327 +
50328 +config GRKERNSEC_LINK
50329 + bool "Linking restrictions"
50330 + help
50331 + If you say Y here, /tmp race exploits will be prevented, since users
50332 + will no longer be able to follow symlinks owned by other users in
50333 + world-writable +t directories (e.g. /tmp), unless the owner of the
50334 + symlink is the owner of the directory. users will also not be
50335 + able to hardlink to files they do not own. If the sysctl option is
50336 + enabled, a sysctl option with name "linking_restrictions" is created.
50337 +
50338 +config GRKERNSEC_FIFO
50339 + bool "FIFO restrictions"
50340 + help
50341 + If you say Y here, users will not be able to write to FIFOs they don't
50342 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50343 + the FIFO is the same owner of the directory it's held in. If the sysctl
50344 + option is enabled, a sysctl option with name "fifo_restrictions" is
50345 + created.
50346 +
50347 +config GRKERNSEC_SYSFS_RESTRICT
50348 + bool "Sysfs/debugfs restriction"
50349 + depends on SYSFS
50350 + help
50351 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50352 + any filesystem normally mounted under it (e.g. debugfs) will only
50353 + be accessible by root. These filesystems generally provide access
50354 + to hardware and debug information that isn't appropriate for unprivileged
50355 + users of the system. Sysfs and debugfs have also become a large source
50356 + of new vulnerabilities, ranging from infoleaks to local compromise.
50357 + There has been very little oversight with an eye toward security involved
50358 + in adding new exporters of information to these filesystems, so their
50359 + use is discouraged.
50360 + This option is equivalent to a chmod 0700 of the mount paths.
50361 +
50362 +config GRKERNSEC_ROFS
50363 + bool "Runtime read-only mount protection"
50364 + help
50365 + If you say Y here, a sysctl option with name "romount_protect" will
50366 + be created. By setting this option to 1 at runtime, filesystems
50367 + will be protected in the following ways:
50368 + * No new writable mounts will be allowed
50369 + * Existing read-only mounts won't be able to be remounted read/write
50370 + * Write operations will be denied on all block devices
50371 + This option acts independently of grsec_lock: once it is set to 1,
50372 + it cannot be turned off. Therefore, please be mindful of the resulting
50373 + behavior if this option is enabled in an init script on a read-only
50374 + filesystem. This feature is mainly intended for secure embedded systems.
50375 +
50376 +config GRKERNSEC_CHROOT
50377 + bool "Chroot jail restrictions"
50378 + help
50379 + If you say Y here, you will be able to choose several options that will
50380 + make breaking out of a chrooted jail much more difficult. If you
50381 + encounter no software incompatibilities with the following options, it
50382 + is recommended that you enable each one.
50383 +
50384 +config GRKERNSEC_CHROOT_MOUNT
50385 + bool "Deny mounts"
50386 + depends on GRKERNSEC_CHROOT
50387 + help
50388 + If you say Y here, processes inside a chroot will not be able to
50389 + mount or remount filesystems. If the sysctl option is enabled, a
50390 + sysctl option with name "chroot_deny_mount" is created.
50391 +
50392 +config GRKERNSEC_CHROOT_DOUBLE
50393 + bool "Deny double-chroots"
50394 + depends on GRKERNSEC_CHROOT
50395 + help
50396 + If you say Y here, processes inside a chroot will not be able to chroot
50397 + again outside the chroot. This is a widely used method of breaking
50398 + out of a chroot jail and should not be allowed. If the sysctl
50399 + option is enabled, a sysctl option with name
50400 + "chroot_deny_chroot" is created.
50401 +
50402 +config GRKERNSEC_CHROOT_PIVOT
50403 + bool "Deny pivot_root in chroot"
50404 + depends on GRKERNSEC_CHROOT
50405 + help
50406 + If you say Y here, processes inside a chroot will not be able to use
50407 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50408 + works similar to chroot in that it changes the root filesystem. This
50409 + function could be misused in a chrooted process to attempt to break out
50410 + of the chroot, and therefore should not be allowed. If the sysctl
50411 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50412 + created.
50413 +
50414 +config GRKERNSEC_CHROOT_CHDIR
50415 + bool "Enforce chdir(\"/\") on all chroots"
50416 + depends on GRKERNSEC_CHROOT
50417 + help
50418 + If you say Y here, the current working directory of all newly-chrooted
50419 + applications will be set to the the root directory of the chroot.
50420 + The man page on chroot(2) states:
50421 + Note that this call does not change the current working
50422 + directory, so that `.' can be outside the tree rooted at
50423 + `/'. In particular, the super-user can escape from a
50424 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50425 +
50426 + It is recommended that you say Y here, since it's not known to break
50427 + any software. If the sysctl option is enabled, a sysctl option with
50428 + name "chroot_enforce_chdir" is created.
50429 +
50430 +config GRKERNSEC_CHROOT_CHMOD
50431 + bool "Deny (f)chmod +s"
50432 + depends on GRKERNSEC_CHROOT
50433 + help
50434 + If you say Y here, processes inside a chroot will not be able to chmod
50435 + or fchmod files to make them have suid or sgid bits. This protects
50436 + against another published method of breaking a chroot. If the sysctl
50437 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50438 + created.
50439 +
50440 +config GRKERNSEC_CHROOT_FCHDIR
50441 + bool "Deny fchdir out of chroot"
50442 + depends on GRKERNSEC_CHROOT
50443 + help
50444 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50445 + to a file descriptor of the chrooting process that points to a directory
50446 + outside the filesystem will be stopped. If the sysctl option
50447 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50448 +
50449 +config GRKERNSEC_CHROOT_MKNOD
50450 + bool "Deny mknod"
50451 + depends on GRKERNSEC_CHROOT
50452 + help
50453 + If you say Y here, processes inside a chroot will not be allowed to
50454 + mknod. The problem with using mknod inside a chroot is that it
50455 + would allow an attacker to create a device entry that is the same
50456 + as one on the physical root of your system, which could range from
50457 + anything from the console device to a device for your harddrive (which
50458 + they could then use to wipe the drive or steal data). It is recommended
50459 + that you say Y here, unless you run into software incompatibilities.
50460 + If the sysctl option is enabled, a sysctl option with name
50461 + "chroot_deny_mknod" is created.
50462 +
50463 +config GRKERNSEC_CHROOT_SHMAT
50464 + bool "Deny shmat() out of chroot"
50465 + depends on GRKERNSEC_CHROOT
50466 + help
50467 + If you say Y here, processes inside a chroot will not be able to attach
50468 + to shared memory segments that were created outside of the chroot jail.
50469 + It is recommended that you say Y here. If the sysctl option is enabled,
50470 + a sysctl option with name "chroot_deny_shmat" is created.
50471 +
50472 +config GRKERNSEC_CHROOT_UNIX
50473 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50474 + depends on GRKERNSEC_CHROOT
50475 + help
50476 + If you say Y here, processes inside a chroot will not be able to
50477 + connect to abstract (meaning not belonging to a filesystem) Unix
50478 + domain sockets that were bound outside of a chroot. It is recommended
50479 + that you say Y here. If the sysctl option is enabled, a sysctl option
50480 + with name "chroot_deny_unix" is created.
50481 +
50482 +config GRKERNSEC_CHROOT_FINDTASK
50483 + bool "Protect outside processes"
50484 + depends on GRKERNSEC_CHROOT
50485 + help
50486 + If you say Y here, processes inside a chroot will not be able to
50487 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50488 + getsid, or view any process outside of the chroot. If the sysctl
50489 + option is enabled, a sysctl option with name "chroot_findtask" is
50490 + created.
50491 +
50492 +config GRKERNSEC_CHROOT_NICE
50493 + bool "Restrict priority changes"
50494 + depends on GRKERNSEC_CHROOT
50495 + help
50496 + If you say Y here, processes inside a chroot will not be able to raise
50497 + the priority of processes in the chroot, or alter the priority of
50498 + processes outside the chroot. This provides more security than simply
50499 + removing CAP_SYS_NICE from the process' capability set. If the
50500 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50501 + is created.
50502 +
50503 +config GRKERNSEC_CHROOT_SYSCTL
50504 + bool "Deny sysctl writes"
50505 + depends on GRKERNSEC_CHROOT
50506 + help
50507 + If you say Y here, an attacker in a chroot will not be able to
50508 + write to sysctl entries, either by sysctl(2) or through a /proc
50509 + interface. It is strongly recommended that you say Y here. If the
50510 + sysctl option is enabled, a sysctl option with name
50511 + "chroot_deny_sysctl" is created.
50512 +
50513 +config GRKERNSEC_CHROOT_CAPS
50514 + bool "Capability restrictions"
50515 + depends on GRKERNSEC_CHROOT
50516 + help
50517 + If you say Y here, the capabilities on all root processes within a
50518 + chroot jail will be lowered to stop module insertion, raw i/o,
50519 + system and net admin tasks, rebooting the system, modifying immutable
50520 + files, modifying IPC owned by another, and changing the system time.
50521 + This is left an option because it can break some apps. Disable this
50522 + if your chrooted apps are having problems performing those kinds of
50523 + tasks. If the sysctl option is enabled, a sysctl option with
50524 + name "chroot_caps" is created.
50525 +
50526 +endmenu
50527 +menu "Kernel Auditing"
50528 +depends on GRKERNSEC
50529 +
50530 +config GRKERNSEC_AUDIT_GROUP
50531 + bool "Single group for auditing"
50532 + help
50533 + If you say Y here, the exec, chdir, and (un)mount logging features
50534 + will only operate on a group you specify. This option is recommended
50535 + if you only want to watch certain users instead of having a large
50536 + amount of logs from the entire system. If the sysctl option is enabled,
50537 + a sysctl option with name "audit_group" is created.
50538 +
50539 +config GRKERNSEC_AUDIT_GID
50540 + int "GID for auditing"
50541 + depends on GRKERNSEC_AUDIT_GROUP
50542 + default 1007
50543 +
50544 +config GRKERNSEC_EXECLOG
50545 + bool "Exec logging"
50546 + help
50547 + If you say Y here, all execve() calls will be logged (since the
50548 + other exec*() calls are frontends to execve(), all execution
50549 + will be logged). Useful for shell-servers that like to keep track
50550 + of their users. If the sysctl option is enabled, a sysctl option with
50551 + name "exec_logging" is created.
50552 + WARNING: This option when enabled will produce a LOT of logs, especially
50553 + on an active system.
50554 +
50555 +config GRKERNSEC_RESLOG
50556 + bool "Resource logging"
50557 + help
50558 + If you say Y here, all attempts to overstep resource limits will
50559 + be logged with the resource name, the requested size, and the current
50560 + limit. It is highly recommended that you say Y here. If the sysctl
50561 + option is enabled, a sysctl option with name "resource_logging" is
50562 + created. If the RBAC system is enabled, the sysctl value is ignored.
50563 +
50564 +config GRKERNSEC_CHROOT_EXECLOG
50565 + bool "Log execs within chroot"
50566 + help
50567 + If you say Y here, all executions inside a chroot jail will be logged
50568 + to syslog. This can cause a large amount of logs if certain
50569 + applications (eg. djb's daemontools) are installed on the system, and
50570 + is therefore left as an option. If the sysctl option is enabled, a
50571 + sysctl option with name "chroot_execlog" is created.
50572 +
50573 +config GRKERNSEC_AUDIT_PTRACE
50574 + bool "Ptrace logging"
50575 + help
50576 + If you say Y here, all attempts to attach to a process via ptrace
50577 + will be logged. If the sysctl option is enabled, a sysctl option
50578 + with name "audit_ptrace" is created.
50579 +
50580 +config GRKERNSEC_AUDIT_CHDIR
50581 + bool "Chdir logging"
50582 + help
50583 + If you say Y here, all chdir() calls will be logged. If the sysctl
50584 + option is enabled, a sysctl option with name "audit_chdir" is created.
50585 +
50586 +config GRKERNSEC_AUDIT_MOUNT
50587 + bool "(Un)Mount logging"
50588 + help
50589 + If you say Y here, all mounts and unmounts will be logged. If the
50590 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50591 + created.
50592 +
50593 +config GRKERNSEC_SIGNAL
50594 + bool "Signal logging"
50595 + help
50596 + If you say Y here, certain important signals will be logged, such as
50597 + SIGSEGV, which will as a result inform you of when a error in a program
50598 + occurred, which in some cases could mean a possible exploit attempt.
50599 + If the sysctl option is enabled, a sysctl option with name
50600 + "signal_logging" is created.
50601 +
50602 +config GRKERNSEC_FORKFAIL
50603 + bool "Fork failure logging"
50604 + help
50605 + If you say Y here, all failed fork() attempts will be logged.
50606 + This could suggest a fork bomb, or someone attempting to overstep
50607 + their process limit. If the sysctl option is enabled, a sysctl option
50608 + with name "forkfail_logging" is created.
50609 +
50610 +config GRKERNSEC_TIME
50611 + bool "Time change logging"
50612 + help
50613 + If you say Y here, any changes of the system clock will be logged.
50614 + If the sysctl option is enabled, a sysctl option with name
50615 + "timechange_logging" is created.
50616 +
50617 +config GRKERNSEC_PROC_IPADDR
50618 + bool "/proc/<pid>/ipaddr support"
50619 + help
50620 + If you say Y here, a new entry will be added to each /proc/<pid>
50621 + directory that contains the IP address of the person using the task.
50622 + The IP is carried across local TCP and AF_UNIX stream sockets.
50623 + This information can be useful for IDS/IPSes to perform remote response
50624 + to a local attack. The entry is readable by only the owner of the
50625 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50626 + the RBAC system), and thus does not create privacy concerns.
50627 +
50628 +config GRKERNSEC_RWXMAP_LOG
50629 + bool 'Denied RWX mmap/mprotect logging'
50630 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50631 + help
50632 + If you say Y here, calls to mmap() and mprotect() with explicit
50633 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50634 + denied by the PAX_MPROTECT feature. If the sysctl option is
50635 + enabled, a sysctl option with name "rwxmap_logging" is created.
50636 +
50637 +config GRKERNSEC_AUDIT_TEXTREL
50638 + bool 'ELF text relocations logging (READ HELP)'
50639 + depends on PAX_MPROTECT
50640 + help
50641 + If you say Y here, text relocations will be logged with the filename
50642 + of the offending library or binary. The purpose of the feature is
50643 + to help Linux distribution developers get rid of libraries and
50644 + binaries that need text relocations which hinder the future progress
50645 + of PaX. Only Linux distribution developers should say Y here, and
50646 + never on a production machine, as this option creates an information
50647 + leak that could aid an attacker in defeating the randomization of
50648 + a single memory region. If the sysctl option is enabled, a sysctl
50649 + option with name "audit_textrel" is created.
50650 +
50651 +endmenu
50652 +
50653 +menu "Executable Protections"
50654 +depends on GRKERNSEC
50655 +
50656 +config GRKERNSEC_DMESG
50657 + bool "Dmesg(8) restriction"
50658 + help
50659 + If you say Y here, non-root users will not be able to use dmesg(8)
50660 + to view up to the last 4kb of messages in the kernel's log buffer.
50661 + The kernel's log buffer often contains kernel addresses and other
50662 + identifying information useful to an attacker in fingerprinting a
50663 + system for a targeted exploit.
50664 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50665 + created.
50666 +
50667 +config GRKERNSEC_HARDEN_PTRACE
50668 + bool "Deter ptrace-based process snooping"
50669 + help
50670 + If you say Y here, TTY sniffers and other malicious monitoring
50671 + programs implemented through ptrace will be defeated. If you
50672 + have been using the RBAC system, this option has already been
50673 + enabled for several years for all users, with the ability to make
50674 + fine-grained exceptions.
50675 +
50676 + This option only affects the ability of non-root users to ptrace
50677 + processes that are not a descendent of the ptracing process.
50678 + This means that strace ./binary and gdb ./binary will still work,
50679 + but attaching to arbitrary processes will not. If the sysctl
50680 + option is enabled, a sysctl option with name "harden_ptrace" is
50681 + created.
50682 +
50683 +config GRKERNSEC_TPE
50684 + bool "Trusted Path Execution (TPE)"
50685 + help
50686 + If you say Y here, you will be able to choose a gid to add to the
50687 + supplementary groups of users you want to mark as "untrusted."
50688 + These users will not be able to execute any files that are not in
50689 + root-owned directories writable only by root. If the sysctl option
50690 + is enabled, a sysctl option with name "tpe" is created.
50691 +
50692 +config GRKERNSEC_TPE_ALL
50693 + bool "Partially restrict all non-root users"
50694 + depends on GRKERNSEC_TPE
50695 + help
50696 + If you say Y here, all non-root users will be covered under
50697 + a weaker TPE restriction. This is separate from, and in addition to,
50698 + the main TPE options that you have selected elsewhere. Thus, if a
50699 + "trusted" GID is chosen, this restriction applies to even that GID.
50700 + Under this restriction, all non-root users will only be allowed to
50701 + execute files in directories they own that are not group or
50702 + world-writable, or in directories owned by root and writable only by
50703 + root. If the sysctl option is enabled, a sysctl option with name
50704 + "tpe_restrict_all" is created.
50705 +
50706 +config GRKERNSEC_TPE_INVERT
50707 + bool "Invert GID option"
50708 + depends on GRKERNSEC_TPE
50709 + help
50710 + If you say Y here, the group you specify in the TPE configuration will
50711 + decide what group TPE restrictions will be *disabled* for. This
50712 + option is useful if you want TPE restrictions to be applied to most
50713 + users on the system. If the sysctl option is enabled, a sysctl option
50714 + with name "tpe_invert" is created. Unlike other sysctl options, this
50715 + entry will default to on for backward-compatibility.
50716 +
50717 +config GRKERNSEC_TPE_GID
50718 + int "GID for untrusted users"
50719 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50720 + default 1005
50721 + help
50722 + Setting this GID determines what group TPE restrictions will be
50723 + *enabled* for. If the sysctl option is enabled, a sysctl option
50724 + with name "tpe_gid" is created.
50725 +
50726 +config GRKERNSEC_TPE_GID
50727 + int "GID for trusted users"
50728 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50729 + default 1005
50730 + help
50731 + Setting this GID determines what group TPE restrictions will be
50732 + *disabled* for. If the sysctl option is enabled, a sysctl option
50733 + with name "tpe_gid" is created.
50734 +
50735 +endmenu
50736 +menu "Network Protections"
50737 +depends on GRKERNSEC
50738 +
50739 +config GRKERNSEC_RANDNET
50740 + bool "Larger entropy pools"
50741 + help
50742 + If you say Y here, the entropy pools used for many features of Linux
50743 + and grsecurity will be doubled in size. Since several grsecurity
50744 + features use additional randomness, it is recommended that you say Y
50745 + here. Saying Y here has a similar effect as modifying
50746 + /proc/sys/kernel/random/poolsize.
50747 +
50748 +config GRKERNSEC_BLACKHOLE
50749 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50750 + depends on NET
50751 + help
50752 + If you say Y here, neither TCP resets nor ICMP
50753 + destination-unreachable packets will be sent in response to packets
50754 + sent to ports for which no associated listening process exists.
50755 + This feature supports both IPV4 and IPV6 and exempts the
50756 + loopback interface from blackholing. Enabling this feature
50757 + makes a host more resilient to DoS attacks and reduces network
50758 + visibility against scanners.
50759 +
50760 + The blackhole feature as-implemented is equivalent to the FreeBSD
50761 + blackhole feature, as it prevents RST responses to all packets, not
50762 + just SYNs. Under most application behavior this causes no
50763 + problems, but applications (like haproxy) may not close certain
50764 + connections in a way that cleanly terminates them on the remote
50765 + end, leaving the remote host in LAST_ACK state. Because of this
50766 + side-effect and to prevent intentional LAST_ACK DoSes, this
50767 + feature also adds automatic mitigation against such attacks.
50768 + The mitigation drastically reduces the amount of time a socket
50769 + can spend in LAST_ACK state. If you're using haproxy and not
50770 + all servers it connects to have this option enabled, consider
50771 + disabling this feature on the haproxy host.
50772 +
50773 + If the sysctl option is enabled, two sysctl options with names
50774 + "ip_blackhole" and "lastack_retries" will be created.
50775 + While "ip_blackhole" takes the standard zero/non-zero on/off
50776 + toggle, "lastack_retries" uses the same kinds of values as
50777 + "tcp_retries1" and "tcp_retries2". The default value of 4
50778 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50779 + state.
50780 +
50781 +config GRKERNSEC_SOCKET
50782 + bool "Socket restrictions"
50783 + depends on NET
50784 + help
50785 + If you say Y here, you will be able to choose from several options.
50786 + If you assign a GID on your system and add it to the supplementary
50787 + groups of users you want to restrict socket access to, this patch
50788 + will perform up to three things, based on the option(s) you choose.
50789 +
50790 +config GRKERNSEC_SOCKET_ALL
50791 + bool "Deny any sockets to group"
50792 + depends on GRKERNSEC_SOCKET
50793 + help
50794 + If you say Y here, you will be able to choose a GID of whose users will
50795 + be unable to connect to other hosts from your machine or run server
50796 + applications from your machine. If the sysctl option is enabled, a
50797 + sysctl option with name "socket_all" is created.
50798 +
50799 +config GRKERNSEC_SOCKET_ALL_GID
50800 + int "GID to deny all sockets for"
50801 + depends on GRKERNSEC_SOCKET_ALL
50802 + default 1004
50803 + help
50804 + Here you can choose the GID to disable socket access for. Remember to
50805 + add the users you want socket access disabled for to the GID
50806 + specified here. If the sysctl option is enabled, a sysctl option
50807 + with name "socket_all_gid" is created.
50808 +
50809 +config GRKERNSEC_SOCKET_CLIENT
50810 + bool "Deny client sockets to group"
50811 + depends on GRKERNSEC_SOCKET
50812 + help
50813 + If you say Y here, you will be able to choose a GID of whose users will
50814 + be unable to connect to other hosts from your machine, but will be
50815 + able to run servers. If this option is enabled, all users in the group
50816 + you specify will have to use passive mode when initiating ftp transfers
50817 + from the shell on your machine. If the sysctl option is enabled, a
50818 + sysctl option with name "socket_client" is created.
50819 +
50820 +config GRKERNSEC_SOCKET_CLIENT_GID
50821 + int "GID to deny client sockets for"
50822 + depends on GRKERNSEC_SOCKET_CLIENT
50823 + default 1003
50824 + help
50825 + Here you can choose the GID to disable client socket access for.
50826 + Remember to add the users you want client socket access disabled for to
50827 + the GID specified here. If the sysctl option is enabled, a sysctl
50828 + option with name "socket_client_gid" is created.
50829 +
50830 +config GRKERNSEC_SOCKET_SERVER
50831 + bool "Deny server sockets to group"
50832 + depends on GRKERNSEC_SOCKET
50833 + help
50834 + If you say Y here, you will be able to choose a GID of whose users will
50835 + be unable to run server applications from your machine. If the sysctl
50836 + option is enabled, a sysctl option with name "socket_server" is created.
50837 +
50838 +config GRKERNSEC_SOCKET_SERVER_GID
50839 + int "GID to deny server sockets for"
50840 + depends on GRKERNSEC_SOCKET_SERVER
50841 + default 1002
50842 + help
50843 + Here you can choose the GID to disable server socket access for.
50844 + Remember to add the users you want server socket access disabled for to
50845 + the GID specified here. If the sysctl option is enabled, a sysctl
50846 + option with name "socket_server_gid" is created.
50847 +
50848 +endmenu
50849 +menu "Sysctl support"
50850 +depends on GRKERNSEC && SYSCTL
50851 +
50852 +config GRKERNSEC_SYSCTL
50853 + bool "Sysctl support"
50854 + help
50855 + If you say Y here, you will be able to change the options that
50856 + grsecurity runs with at bootup, without having to recompile your
50857 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50858 + to enable (1) or disable (0) various features. All the sysctl entries
50859 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50860 + All features enabled in the kernel configuration are disabled at boot
50861 + if you do not say Y to the "Turn on features by default" option.
50862 + All options should be set at startup, and the grsec_lock entry should
50863 + be set to a non-zero value after all the options are set.
50864 + *THIS IS EXTREMELY IMPORTANT*
50865 +
50866 +config GRKERNSEC_SYSCTL_DISTRO
50867 + bool "Extra sysctl support for distro makers (READ HELP)"
50868 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50869 + help
50870 + If you say Y here, additional sysctl options will be created
50871 + for features that affect processes running as root. Therefore,
50872 + it is critical when using this option that the grsec_lock entry be
50873 + enabled after boot. Only distros with prebuilt kernel packages
50874 + with this option enabled that can ensure grsec_lock is enabled
50875 + after boot should use this option.
50876 + *Failure to set grsec_lock after boot makes all grsec features
50877 + this option covers useless*
50878 +
50879 + Currently this option creates the following sysctl entries:
50880 + "Disable Privileged I/O": "disable_priv_io"
50881 +
50882 +config GRKERNSEC_SYSCTL_ON
50883 + bool "Turn on features by default"
50884 + depends on GRKERNSEC_SYSCTL
50885 + help
50886 + If you say Y here, instead of having all features enabled in the
50887 + kernel configuration disabled at boot time, the features will be
50888 + enabled at boot time. It is recommended you say Y here unless
50889 + there is some reason you would want all sysctl-tunable features to
50890 + be disabled by default. As mentioned elsewhere, it is important
50891 + to enable the grsec_lock entry once you have finished modifying
50892 + the sysctl entries.
50893 +
50894 +endmenu
50895 +menu "Logging Options"
50896 +depends on GRKERNSEC
50897 +
50898 +config GRKERNSEC_FLOODTIME
50899 + int "Seconds in between log messages (minimum)"
50900 + default 10
50901 + help
50902 + This option allows you to enforce the number of seconds between
50903 + grsecurity log messages. The default should be suitable for most
50904 + people, however, if you choose to change it, choose a value small enough
50905 + to allow informative logs to be produced, but large enough to
50906 + prevent flooding.
50907 +
50908 +config GRKERNSEC_FLOODBURST
50909 + int "Number of messages in a burst (maximum)"
50910 + default 4
50911 + help
50912 + This option allows you to choose the maximum number of messages allowed
50913 + within the flood time interval you chose in a separate option. The
50914 + default should be suitable for most people, however if you find that
50915 + many of your logs are being interpreted as flooding, you may want to
50916 + raise this value.
50917 +
50918 +endmenu
50919 +
50920 +endmenu
50921 diff -urNp linux-3.0.3/grsecurity/Makefile linux-3.0.3/grsecurity/Makefile
50922 --- linux-3.0.3/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
50923 +++ linux-3.0.3/grsecurity/Makefile 2011-08-23 21:48:14.000000000 -0400
50924 @@ -0,0 +1,34 @@
50925 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50926 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50927 +# into an RBAC system
50928 +#
50929 +# All code in this directory and various hooks inserted throughout the kernel
50930 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50931 +# under the GPL v2 or higher
50932 +
50933 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50934 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50935 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50936 +
50937 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50938 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50939 + gracl_learn.o grsec_log.o
50940 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50941 +
50942 +ifdef CONFIG_NET
50943 +obj-y += grsec_sock.o
50944 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50945 +endif
50946 +
50947 +ifndef CONFIG_GRKERNSEC
50948 +obj-y += grsec_disabled.o
50949 +endif
50950 +
50951 +ifdef CONFIG_GRKERNSEC_HIDESYM
50952 +extra-y := grsec_hidesym.o
50953 +$(obj)/grsec_hidesym.o:
50954 + @-chmod -f 500 /boot
50955 + @-chmod -f 500 /lib/modules
50956 + @-chmod -f 700 .
50957 + @echo ' grsec: protected kernel image paths'
50958 +endif
50959 diff -urNp linux-3.0.3/include/acpi/acpi_bus.h linux-3.0.3/include/acpi/acpi_bus.h
50960 --- linux-3.0.3/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
50961 +++ linux-3.0.3/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
50962 @@ -107,7 +107,7 @@ struct acpi_device_ops {
50963 acpi_op_bind bind;
50964 acpi_op_unbind unbind;
50965 acpi_op_notify notify;
50966 -};
50967 +} __no_const;
50968
50969 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
50970
50971 diff -urNp linux-3.0.3/include/asm-generic/atomic-long.h linux-3.0.3/include/asm-generic/atomic-long.h
50972 --- linux-3.0.3/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
50973 +++ linux-3.0.3/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
50974 @@ -22,6 +22,12 @@
50975
50976 typedef atomic64_t atomic_long_t;
50977
50978 +#ifdef CONFIG_PAX_REFCOUNT
50979 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
50980 +#else
50981 +typedef atomic64_t atomic_long_unchecked_t;
50982 +#endif
50983 +
50984 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
50985
50986 static inline long atomic_long_read(atomic_long_t *l)
50987 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
50988 return (long)atomic64_read(v);
50989 }
50990
50991 +#ifdef CONFIG_PAX_REFCOUNT
50992 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
50993 +{
50994 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50995 +
50996 + return (long)atomic64_read_unchecked(v);
50997 +}
50998 +#endif
50999 +
51000 static inline void atomic_long_set(atomic_long_t *l, long i)
51001 {
51002 atomic64_t *v = (atomic64_t *)l;
51003 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
51004 atomic64_set(v, i);
51005 }
51006
51007 +#ifdef CONFIG_PAX_REFCOUNT
51008 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51009 +{
51010 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51011 +
51012 + atomic64_set_unchecked(v, i);
51013 +}
51014 +#endif
51015 +
51016 static inline void atomic_long_inc(atomic_long_t *l)
51017 {
51018 atomic64_t *v = (atomic64_t *)l;
51019 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
51020 atomic64_inc(v);
51021 }
51022
51023 +#ifdef CONFIG_PAX_REFCOUNT
51024 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51025 +{
51026 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51027 +
51028 + atomic64_inc_unchecked(v);
51029 +}
51030 +#endif
51031 +
51032 static inline void atomic_long_dec(atomic_long_t *l)
51033 {
51034 atomic64_t *v = (atomic64_t *)l;
51035 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51036 atomic64_dec(v);
51037 }
51038
51039 +#ifdef CONFIG_PAX_REFCOUNT
51040 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51041 +{
51042 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51043 +
51044 + atomic64_dec_unchecked(v);
51045 +}
51046 +#endif
51047 +
51048 static inline void atomic_long_add(long i, atomic_long_t *l)
51049 {
51050 atomic64_t *v = (atomic64_t *)l;
51051 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51052 atomic64_add(i, v);
51053 }
51054
51055 +#ifdef CONFIG_PAX_REFCOUNT
51056 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51057 +{
51058 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51059 +
51060 + atomic64_add_unchecked(i, v);
51061 +}
51062 +#endif
51063 +
51064 static inline void atomic_long_sub(long i, atomic_long_t *l)
51065 {
51066 atomic64_t *v = (atomic64_t *)l;
51067 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51068 atomic64_sub(i, v);
51069 }
51070
51071 +#ifdef CONFIG_PAX_REFCOUNT
51072 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51073 +{
51074 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51075 +
51076 + atomic64_sub_unchecked(i, v);
51077 +}
51078 +#endif
51079 +
51080 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51081 {
51082 atomic64_t *v = (atomic64_t *)l;
51083 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51084 return (long)atomic64_inc_return(v);
51085 }
51086
51087 +#ifdef CONFIG_PAX_REFCOUNT
51088 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51089 +{
51090 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51091 +
51092 + return (long)atomic64_inc_return_unchecked(v);
51093 +}
51094 +#endif
51095 +
51096 static inline long atomic_long_dec_return(atomic_long_t *l)
51097 {
51098 atomic64_t *v = (atomic64_t *)l;
51099 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51100
51101 typedef atomic_t atomic_long_t;
51102
51103 +#ifdef CONFIG_PAX_REFCOUNT
51104 +typedef atomic_unchecked_t atomic_long_unchecked_t;
51105 +#else
51106 +typedef atomic_t atomic_long_unchecked_t;
51107 +#endif
51108 +
51109 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51110 static inline long atomic_long_read(atomic_long_t *l)
51111 {
51112 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51113 return (long)atomic_read(v);
51114 }
51115
51116 +#ifdef CONFIG_PAX_REFCOUNT
51117 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51118 +{
51119 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51120 +
51121 + return (long)atomic_read_unchecked(v);
51122 +}
51123 +#endif
51124 +
51125 static inline void atomic_long_set(atomic_long_t *l, long i)
51126 {
51127 atomic_t *v = (atomic_t *)l;
51128 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51129 atomic_set(v, i);
51130 }
51131
51132 +#ifdef CONFIG_PAX_REFCOUNT
51133 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51134 +{
51135 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51136 +
51137 + atomic_set_unchecked(v, i);
51138 +}
51139 +#endif
51140 +
51141 static inline void atomic_long_inc(atomic_long_t *l)
51142 {
51143 atomic_t *v = (atomic_t *)l;
51144 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51145 atomic_inc(v);
51146 }
51147
51148 +#ifdef CONFIG_PAX_REFCOUNT
51149 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51150 +{
51151 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51152 +
51153 + atomic_inc_unchecked(v);
51154 +}
51155 +#endif
51156 +
51157 static inline void atomic_long_dec(atomic_long_t *l)
51158 {
51159 atomic_t *v = (atomic_t *)l;
51160 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51161 atomic_dec(v);
51162 }
51163
51164 +#ifdef CONFIG_PAX_REFCOUNT
51165 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51166 +{
51167 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51168 +
51169 + atomic_dec_unchecked(v);
51170 +}
51171 +#endif
51172 +
51173 static inline void atomic_long_add(long i, atomic_long_t *l)
51174 {
51175 atomic_t *v = (atomic_t *)l;
51176 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51177 atomic_add(i, v);
51178 }
51179
51180 +#ifdef CONFIG_PAX_REFCOUNT
51181 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51182 +{
51183 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51184 +
51185 + atomic_add_unchecked(i, v);
51186 +}
51187 +#endif
51188 +
51189 static inline void atomic_long_sub(long i, atomic_long_t *l)
51190 {
51191 atomic_t *v = (atomic_t *)l;
51192 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51193 atomic_sub(i, v);
51194 }
51195
51196 +#ifdef CONFIG_PAX_REFCOUNT
51197 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51198 +{
51199 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51200 +
51201 + atomic_sub_unchecked(i, v);
51202 +}
51203 +#endif
51204 +
51205 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51206 {
51207 atomic_t *v = (atomic_t *)l;
51208 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51209 return (long)atomic_inc_return(v);
51210 }
51211
51212 +#ifdef CONFIG_PAX_REFCOUNT
51213 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51214 +{
51215 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51216 +
51217 + return (long)atomic_inc_return_unchecked(v);
51218 +}
51219 +#endif
51220 +
51221 static inline long atomic_long_dec_return(atomic_long_t *l)
51222 {
51223 atomic_t *v = (atomic_t *)l;
51224 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51225
51226 #endif /* BITS_PER_LONG == 64 */
51227
51228 +#ifdef CONFIG_PAX_REFCOUNT
51229 +static inline void pax_refcount_needs_these_functions(void)
51230 +{
51231 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
51232 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51233 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51234 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51235 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51236 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51237 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51238 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51239 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51240 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51241 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51242 +
51243 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51244 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51245 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51246 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51247 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51248 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51249 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51250 +}
51251 +#else
51252 +#define atomic_read_unchecked(v) atomic_read(v)
51253 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51254 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51255 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51256 +#define atomic_inc_unchecked(v) atomic_inc(v)
51257 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51258 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51259 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51260 +#define atomic_dec_unchecked(v) atomic_dec(v)
51261 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51262 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51263 +
51264 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
51265 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51266 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51267 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51268 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51269 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51270 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51271 +#endif
51272 +
51273 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51274 diff -urNp linux-3.0.3/include/asm-generic/cache.h linux-3.0.3/include/asm-generic/cache.h
51275 --- linux-3.0.3/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
51276 +++ linux-3.0.3/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
51277 @@ -6,7 +6,7 @@
51278 * cache lines need to provide their own cache.h.
51279 */
51280
51281 -#define L1_CACHE_SHIFT 5
51282 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51283 +#define L1_CACHE_SHIFT 5UL
51284 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51285
51286 #endif /* __ASM_GENERIC_CACHE_H */
51287 diff -urNp linux-3.0.3/include/asm-generic/int-l64.h linux-3.0.3/include/asm-generic/int-l64.h
51288 --- linux-3.0.3/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
51289 +++ linux-3.0.3/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
51290 @@ -46,6 +46,8 @@ typedef unsigned int u32;
51291 typedef signed long s64;
51292 typedef unsigned long u64;
51293
51294 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51295 +
51296 #define S8_C(x) x
51297 #define U8_C(x) x ## U
51298 #define S16_C(x) x
51299 diff -urNp linux-3.0.3/include/asm-generic/int-ll64.h linux-3.0.3/include/asm-generic/int-ll64.h
51300 --- linux-3.0.3/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
51301 +++ linux-3.0.3/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
51302 @@ -51,6 +51,8 @@ typedef unsigned int u32;
51303 typedef signed long long s64;
51304 typedef unsigned long long u64;
51305
51306 +typedef unsigned long long intoverflow_t;
51307 +
51308 #define S8_C(x) x
51309 #define U8_C(x) x ## U
51310 #define S16_C(x) x
51311 diff -urNp linux-3.0.3/include/asm-generic/kmap_types.h linux-3.0.3/include/asm-generic/kmap_types.h
51312 --- linux-3.0.3/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
51313 +++ linux-3.0.3/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
51314 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51315 KMAP_D(17) KM_NMI,
51316 KMAP_D(18) KM_NMI_PTE,
51317 KMAP_D(19) KM_KDB,
51318 +KMAP_D(20) KM_CLEARPAGE,
51319 /*
51320 * Remember to update debug_kmap_atomic() when adding new kmap types!
51321 */
51322 -KMAP_D(20) KM_TYPE_NR
51323 +KMAP_D(21) KM_TYPE_NR
51324 };
51325
51326 #undef KMAP_D
51327 diff -urNp linux-3.0.3/include/asm-generic/pgtable.h linux-3.0.3/include/asm-generic/pgtable.h
51328 --- linux-3.0.3/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
51329 +++ linux-3.0.3/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
51330 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
51331 #endif /* __HAVE_ARCH_PMD_WRITE */
51332 #endif
51333
51334 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51335 +static inline unsigned long pax_open_kernel(void) { return 0; }
51336 +#endif
51337 +
51338 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51339 +static inline unsigned long pax_close_kernel(void) { return 0; }
51340 +#endif
51341 +
51342 #endif /* !__ASSEMBLY__ */
51343
51344 #endif /* _ASM_GENERIC_PGTABLE_H */
51345 diff -urNp linux-3.0.3/include/asm-generic/pgtable-nopmd.h linux-3.0.3/include/asm-generic/pgtable-nopmd.h
51346 --- linux-3.0.3/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
51347 +++ linux-3.0.3/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
51348 @@ -1,14 +1,19 @@
51349 #ifndef _PGTABLE_NOPMD_H
51350 #define _PGTABLE_NOPMD_H
51351
51352 -#ifndef __ASSEMBLY__
51353 -
51354 #include <asm-generic/pgtable-nopud.h>
51355
51356 -struct mm_struct;
51357 -
51358 #define __PAGETABLE_PMD_FOLDED
51359
51360 +#define PMD_SHIFT PUD_SHIFT
51361 +#define PTRS_PER_PMD 1
51362 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51363 +#define PMD_MASK (~(PMD_SIZE-1))
51364 +
51365 +#ifndef __ASSEMBLY__
51366 +
51367 +struct mm_struct;
51368 +
51369 /*
51370 * Having the pmd type consist of a pud gets the size right, and allows
51371 * us to conceptually access the pud entry that this pmd is folded into
51372 @@ -16,11 +21,6 @@ struct mm_struct;
51373 */
51374 typedef struct { pud_t pud; } pmd_t;
51375
51376 -#define PMD_SHIFT PUD_SHIFT
51377 -#define PTRS_PER_PMD 1
51378 -#define PMD_SIZE (1UL << PMD_SHIFT)
51379 -#define PMD_MASK (~(PMD_SIZE-1))
51380 -
51381 /*
51382 * The "pud_xxx()" functions here are trivial for a folded two-level
51383 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51384 diff -urNp linux-3.0.3/include/asm-generic/pgtable-nopud.h linux-3.0.3/include/asm-generic/pgtable-nopud.h
51385 --- linux-3.0.3/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
51386 +++ linux-3.0.3/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
51387 @@ -1,10 +1,15 @@
51388 #ifndef _PGTABLE_NOPUD_H
51389 #define _PGTABLE_NOPUD_H
51390
51391 -#ifndef __ASSEMBLY__
51392 -
51393 #define __PAGETABLE_PUD_FOLDED
51394
51395 +#define PUD_SHIFT PGDIR_SHIFT
51396 +#define PTRS_PER_PUD 1
51397 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51398 +#define PUD_MASK (~(PUD_SIZE-1))
51399 +
51400 +#ifndef __ASSEMBLY__
51401 +
51402 /*
51403 * Having the pud type consist of a pgd gets the size right, and allows
51404 * us to conceptually access the pgd entry that this pud is folded into
51405 @@ -12,11 +17,6 @@
51406 */
51407 typedef struct { pgd_t pgd; } pud_t;
51408
51409 -#define PUD_SHIFT PGDIR_SHIFT
51410 -#define PTRS_PER_PUD 1
51411 -#define PUD_SIZE (1UL << PUD_SHIFT)
51412 -#define PUD_MASK (~(PUD_SIZE-1))
51413 -
51414 /*
51415 * The "pgd_xxx()" functions here are trivial for a folded two-level
51416 * setup: the pud is never bad, and a pud always exists (as it's folded
51417 diff -urNp linux-3.0.3/include/asm-generic/vmlinux.lds.h linux-3.0.3/include/asm-generic/vmlinux.lds.h
51418 --- linux-3.0.3/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
51419 +++ linux-3.0.3/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
51420 @@ -217,6 +217,7 @@
51421 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51422 VMLINUX_SYMBOL(__start_rodata) = .; \
51423 *(.rodata) *(.rodata.*) \
51424 + *(.data..read_only) \
51425 *(__vermagic) /* Kernel version magic */ \
51426 . = ALIGN(8); \
51427 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51428 @@ -723,17 +724,18 @@
51429 * section in the linker script will go there too. @phdr should have
51430 * a leading colon.
51431 *
51432 - * Note that this macros defines __per_cpu_load as an absolute symbol.
51433 + * Note that this macros defines per_cpu_load as an absolute symbol.
51434 * If there is no need to put the percpu section at a predetermined
51435 * address, use PERCPU_SECTION.
51436 */
51437 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51438 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
51439 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51440 + per_cpu_load = .; \
51441 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51442 - LOAD_OFFSET) { \
51443 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51444 PERCPU_INPUT(cacheline) \
51445 } phdr \
51446 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51447 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51448
51449 /**
51450 * PERCPU_SECTION - define output section for percpu area, simple version
51451 diff -urNp linux-3.0.3/include/drm/drm_crtc_helper.h linux-3.0.3/include/drm/drm_crtc_helper.h
51452 --- linux-3.0.3/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
51453 +++ linux-3.0.3/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
51454 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51455
51456 /* disable crtc when not in use - more explicit than dpms off */
51457 void (*disable)(struct drm_crtc *crtc);
51458 -};
51459 +} __no_const;
51460
51461 struct drm_encoder_helper_funcs {
51462 void (*dpms)(struct drm_encoder *encoder, int mode);
51463 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51464 struct drm_connector *connector);
51465 /* disable encoder when not in use - more explicit than dpms off */
51466 void (*disable)(struct drm_encoder *encoder);
51467 -};
51468 +} __no_const;
51469
51470 struct drm_connector_helper_funcs {
51471 int (*get_modes)(struct drm_connector *connector);
51472 diff -urNp linux-3.0.3/include/drm/drmP.h linux-3.0.3/include/drm/drmP.h
51473 --- linux-3.0.3/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
51474 +++ linux-3.0.3/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
51475 @@ -73,6 +73,7 @@
51476 #include <linux/workqueue.h>
51477 #include <linux/poll.h>
51478 #include <asm/pgalloc.h>
51479 +#include <asm/local.h>
51480 #include "drm.h"
51481
51482 #include <linux/idr.h>
51483 @@ -1033,7 +1034,7 @@ struct drm_device {
51484
51485 /** \name Usage Counters */
51486 /*@{ */
51487 - int open_count; /**< Outstanding files open */
51488 + local_t open_count; /**< Outstanding files open */
51489 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51490 atomic_t vma_count; /**< Outstanding vma areas open */
51491 int buf_use; /**< Buffers in use -- cannot alloc */
51492 @@ -1044,7 +1045,7 @@ struct drm_device {
51493 /*@{ */
51494 unsigned long counters;
51495 enum drm_stat_type types[15];
51496 - atomic_t counts[15];
51497 + atomic_unchecked_t counts[15];
51498 /*@} */
51499
51500 struct list_head filelist;
51501 diff -urNp linux-3.0.3/include/drm/ttm/ttm_memory.h linux-3.0.3/include/drm/ttm/ttm_memory.h
51502 --- linux-3.0.3/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
51503 +++ linux-3.0.3/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
51504 @@ -47,7 +47,7 @@
51505
51506 struct ttm_mem_shrink {
51507 int (*do_shrink) (struct ttm_mem_shrink *);
51508 -};
51509 +} __no_const;
51510
51511 /**
51512 * struct ttm_mem_global - Global memory accounting structure.
51513 diff -urNp linux-3.0.3/include/linux/a.out.h linux-3.0.3/include/linux/a.out.h
51514 --- linux-3.0.3/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
51515 +++ linux-3.0.3/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
51516 @@ -39,6 +39,14 @@ enum machine_type {
51517 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51518 };
51519
51520 +/* Constants for the N_FLAGS field */
51521 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51522 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51523 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51524 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51525 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51526 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51527 +
51528 #if !defined (N_MAGIC)
51529 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51530 #endif
51531 diff -urNp linux-3.0.3/include/linux/atmdev.h linux-3.0.3/include/linux/atmdev.h
51532 --- linux-3.0.3/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
51533 +++ linux-3.0.3/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
51534 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51535 #endif
51536
51537 struct k_atm_aal_stats {
51538 -#define __HANDLE_ITEM(i) atomic_t i
51539 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
51540 __AAL_STAT_ITEMS
51541 #undef __HANDLE_ITEM
51542 };
51543 diff -urNp linux-3.0.3/include/linux/binfmts.h linux-3.0.3/include/linux/binfmts.h
51544 --- linux-3.0.3/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
51545 +++ linux-3.0.3/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
51546 @@ -88,6 +88,7 @@ struct linux_binfmt {
51547 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51548 int (*load_shlib)(struct file *);
51549 int (*core_dump)(struct coredump_params *cprm);
51550 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51551 unsigned long min_coredump; /* minimal dump size */
51552 };
51553
51554 diff -urNp linux-3.0.3/include/linux/blkdev.h linux-3.0.3/include/linux/blkdev.h
51555 --- linux-3.0.3/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
51556 +++ linux-3.0.3/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
51557 @@ -1308,7 +1308,7 @@ struct block_device_operations {
51558 /* this callback is with swap_lock and sometimes page table lock held */
51559 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51560 struct module *owner;
51561 -};
51562 +} __do_const;
51563
51564 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51565 unsigned long);
51566 diff -urNp linux-3.0.3/include/linux/blktrace_api.h linux-3.0.3/include/linux/blktrace_api.h
51567 --- linux-3.0.3/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
51568 +++ linux-3.0.3/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
51569 @@ -161,7 +161,7 @@ struct blk_trace {
51570 struct dentry *dir;
51571 struct dentry *dropped_file;
51572 struct dentry *msg_file;
51573 - atomic_t dropped;
51574 + atomic_unchecked_t dropped;
51575 };
51576
51577 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51578 diff -urNp linux-3.0.3/include/linux/byteorder/little_endian.h linux-3.0.3/include/linux/byteorder/little_endian.h
51579 --- linux-3.0.3/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
51580 +++ linux-3.0.3/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
51581 @@ -42,51 +42,51 @@
51582
51583 static inline __le64 __cpu_to_le64p(const __u64 *p)
51584 {
51585 - return (__force __le64)*p;
51586 + return (__force const __le64)*p;
51587 }
51588 static inline __u64 __le64_to_cpup(const __le64 *p)
51589 {
51590 - return (__force __u64)*p;
51591 + return (__force const __u64)*p;
51592 }
51593 static inline __le32 __cpu_to_le32p(const __u32 *p)
51594 {
51595 - return (__force __le32)*p;
51596 + return (__force const __le32)*p;
51597 }
51598 static inline __u32 __le32_to_cpup(const __le32 *p)
51599 {
51600 - return (__force __u32)*p;
51601 + return (__force const __u32)*p;
51602 }
51603 static inline __le16 __cpu_to_le16p(const __u16 *p)
51604 {
51605 - return (__force __le16)*p;
51606 + return (__force const __le16)*p;
51607 }
51608 static inline __u16 __le16_to_cpup(const __le16 *p)
51609 {
51610 - return (__force __u16)*p;
51611 + return (__force const __u16)*p;
51612 }
51613 static inline __be64 __cpu_to_be64p(const __u64 *p)
51614 {
51615 - return (__force __be64)__swab64p(p);
51616 + return (__force const __be64)__swab64p(p);
51617 }
51618 static inline __u64 __be64_to_cpup(const __be64 *p)
51619 {
51620 - return __swab64p((__u64 *)p);
51621 + return __swab64p((const __u64 *)p);
51622 }
51623 static inline __be32 __cpu_to_be32p(const __u32 *p)
51624 {
51625 - return (__force __be32)__swab32p(p);
51626 + return (__force const __be32)__swab32p(p);
51627 }
51628 static inline __u32 __be32_to_cpup(const __be32 *p)
51629 {
51630 - return __swab32p((__u32 *)p);
51631 + return __swab32p((const __u32 *)p);
51632 }
51633 static inline __be16 __cpu_to_be16p(const __u16 *p)
51634 {
51635 - return (__force __be16)__swab16p(p);
51636 + return (__force const __be16)__swab16p(p);
51637 }
51638 static inline __u16 __be16_to_cpup(const __be16 *p)
51639 {
51640 - return __swab16p((__u16 *)p);
51641 + return __swab16p((const __u16 *)p);
51642 }
51643 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51644 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51645 diff -urNp linux-3.0.3/include/linux/cache.h linux-3.0.3/include/linux/cache.h
51646 --- linux-3.0.3/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
51647 +++ linux-3.0.3/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
51648 @@ -16,6 +16,10 @@
51649 #define __read_mostly
51650 #endif
51651
51652 +#ifndef __read_only
51653 +#define __read_only __read_mostly
51654 +#endif
51655 +
51656 #ifndef ____cacheline_aligned
51657 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51658 #endif
51659 diff -urNp linux-3.0.3/include/linux/capability.h linux-3.0.3/include/linux/capability.h
51660 --- linux-3.0.3/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
51661 +++ linux-3.0.3/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
51662 @@ -547,6 +547,9 @@ extern bool capable(int cap);
51663 extern bool ns_capable(struct user_namespace *ns, int cap);
51664 extern bool task_ns_capable(struct task_struct *t, int cap);
51665 extern bool nsown_capable(int cap);
51666 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51667 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51668 +extern bool capable_nolog(int cap);
51669
51670 /* audit system wants to get cap info from files as well */
51671 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51672 diff -urNp linux-3.0.3/include/linux/cleancache.h linux-3.0.3/include/linux/cleancache.h
51673 --- linux-3.0.3/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
51674 +++ linux-3.0.3/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
51675 @@ -31,7 +31,7 @@ struct cleancache_ops {
51676 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
51677 void (*flush_inode)(int, struct cleancache_filekey);
51678 void (*flush_fs)(int);
51679 -};
51680 +} __no_const;
51681
51682 extern struct cleancache_ops
51683 cleancache_register_ops(struct cleancache_ops *ops);
51684 diff -urNp linux-3.0.3/include/linux/compiler-gcc4.h linux-3.0.3/include/linux/compiler-gcc4.h
51685 --- linux-3.0.3/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
51686 +++ linux-3.0.3/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
51687 @@ -31,6 +31,12 @@
51688
51689
51690 #if __GNUC_MINOR__ >= 5
51691 +
51692 +#ifdef CONSTIFY_PLUGIN
51693 +#define __no_const __attribute__((no_const))
51694 +#define __do_const __attribute__((do_const))
51695 +#endif
51696 +
51697 /*
51698 * Mark a position in code as unreachable. This can be used to
51699 * suppress control flow warnings after asm blocks that transfer
51700 @@ -46,6 +52,11 @@
51701 #define __noclone __attribute__((__noclone__))
51702
51703 #endif
51704 +
51705 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51706 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51707 +#define __bos0(ptr) __bos((ptr), 0)
51708 +#define __bos1(ptr) __bos((ptr), 1)
51709 #endif
51710
51711 #if __GNUC_MINOR__ > 0
51712 diff -urNp linux-3.0.3/include/linux/compiler.h linux-3.0.3/include/linux/compiler.h
51713 --- linux-3.0.3/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
51714 +++ linux-3.0.3/include/linux/compiler.h 2011-08-26 19:49:56.000000000 -0400
51715 @@ -264,6 +264,14 @@ void ftrace_likely_update(struct ftrace_
51716 # define __attribute_const__ /* unimplemented */
51717 #endif
51718
51719 +#ifndef __no_const
51720 +# define __no_const
51721 +#endif
51722 +
51723 +#ifndef __do_const
51724 +# define __do_const
51725 +#endif
51726 +
51727 /*
51728 * Tell gcc if a function is cold. The compiler will assume any path
51729 * directly leading to the call is unlikely.
51730 @@ -273,6 +281,22 @@ void ftrace_likely_update(struct ftrace_
51731 #define __cold
51732 #endif
51733
51734 +#ifndef __alloc_size
51735 +#define __alloc_size(...)
51736 +#endif
51737 +
51738 +#ifndef __bos
51739 +#define __bos(ptr, arg)
51740 +#endif
51741 +
51742 +#ifndef __bos0
51743 +#define __bos0(ptr)
51744 +#endif
51745 +
51746 +#ifndef __bos1
51747 +#define __bos1(ptr)
51748 +#endif
51749 +
51750 /* Simple shorthand for a section definition */
51751 #ifndef __section
51752 # define __section(S) __attribute__ ((__section__(#S)))
51753 @@ -306,6 +330,7 @@ void ftrace_likely_update(struct ftrace_
51754 * use is to mediate communication between process-level code and irq/NMI
51755 * handlers, all running on the same CPU.
51756 */
51757 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51758 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51759 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51760
51761 #endif /* __LINUX_COMPILER_H */
51762 diff -urNp linux-3.0.3/include/linux/cpuset.h linux-3.0.3/include/linux/cpuset.h
51763 --- linux-3.0.3/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
51764 +++ linux-3.0.3/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
51765 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51766 * nodemask.
51767 */
51768 smp_mb();
51769 - --ACCESS_ONCE(current->mems_allowed_change_disable);
51770 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51771 }
51772
51773 static inline void set_mems_allowed(nodemask_t nodemask)
51774 diff -urNp linux-3.0.3/include/linux/crypto.h linux-3.0.3/include/linux/crypto.h
51775 --- linux-3.0.3/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
51776 +++ linux-3.0.3/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
51777 @@ -361,7 +361,7 @@ struct cipher_tfm {
51778 const u8 *key, unsigned int keylen);
51779 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51780 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51781 -};
51782 +} __no_const;
51783
51784 struct hash_tfm {
51785 int (*init)(struct hash_desc *desc);
51786 @@ -382,13 +382,13 @@ struct compress_tfm {
51787 int (*cot_decompress)(struct crypto_tfm *tfm,
51788 const u8 *src, unsigned int slen,
51789 u8 *dst, unsigned int *dlen);
51790 -};
51791 +} __no_const;
51792
51793 struct rng_tfm {
51794 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51795 unsigned int dlen);
51796 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51797 -};
51798 +} __no_const;
51799
51800 #define crt_ablkcipher crt_u.ablkcipher
51801 #define crt_aead crt_u.aead
51802 diff -urNp linux-3.0.3/include/linux/decompress/mm.h linux-3.0.3/include/linux/decompress/mm.h
51803 --- linux-3.0.3/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
51804 +++ linux-3.0.3/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
51805 @@ -77,7 +77,7 @@ static void free(void *where)
51806 * warnings when not needed (indeed large_malloc / large_free are not
51807 * needed by inflate */
51808
51809 -#define malloc(a) kmalloc(a, GFP_KERNEL)
51810 +#define malloc(a) kmalloc((a), GFP_KERNEL)
51811 #define free(a) kfree(a)
51812
51813 #define large_malloc(a) vmalloc(a)
51814 diff -urNp linux-3.0.3/include/linux/dma-mapping.h linux-3.0.3/include/linux/dma-mapping.h
51815 --- linux-3.0.3/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
51816 +++ linux-3.0.3/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
51817 @@ -50,7 +50,7 @@ struct dma_map_ops {
51818 int (*dma_supported)(struct device *dev, u64 mask);
51819 int (*set_dma_mask)(struct device *dev, u64 mask);
51820 int is_phys;
51821 -};
51822 +} __do_const;
51823
51824 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
51825
51826 diff -urNp linux-3.0.3/include/linux/efi.h linux-3.0.3/include/linux/efi.h
51827 --- linux-3.0.3/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
51828 +++ linux-3.0.3/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
51829 @@ -410,7 +410,7 @@ struct efivar_operations {
51830 efi_get_variable_t *get_variable;
51831 efi_get_next_variable_t *get_next_variable;
51832 efi_set_variable_t *set_variable;
51833 -};
51834 +} __no_const;
51835
51836 struct efivars {
51837 /*
51838 diff -urNp linux-3.0.3/include/linux/elf.h linux-3.0.3/include/linux/elf.h
51839 --- linux-3.0.3/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
51840 +++ linux-3.0.3/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
51841 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
51842 #define PT_GNU_EH_FRAME 0x6474e550
51843
51844 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
51845 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
51846 +
51847 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
51848 +
51849 +/* Constants for the e_flags field */
51850 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51851 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
51852 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
51853 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
51854 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51855 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51856
51857 /*
51858 * Extended Numbering
51859 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
51860 #define DT_DEBUG 21
51861 #define DT_TEXTREL 22
51862 #define DT_JMPREL 23
51863 +#define DT_FLAGS 30
51864 + #define DF_TEXTREL 0x00000004
51865 #define DT_ENCODING 32
51866 #define OLD_DT_LOOS 0x60000000
51867 #define DT_LOOS 0x6000000d
51868 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
51869 #define PF_W 0x2
51870 #define PF_X 0x1
51871
51872 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
51873 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
51874 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
51875 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
51876 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
51877 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
51878 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
51879 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
51880 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
51881 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
51882 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
51883 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
51884 +
51885 typedef struct elf32_phdr{
51886 Elf32_Word p_type;
51887 Elf32_Off p_offset;
51888 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
51889 #define EI_OSABI 7
51890 #define EI_PAD 8
51891
51892 +#define EI_PAX 14
51893 +
51894 #define ELFMAG0 0x7f /* EI_MAG */
51895 #define ELFMAG1 'E'
51896 #define ELFMAG2 'L'
51897 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
51898 #define elf_note elf32_note
51899 #define elf_addr_t Elf32_Off
51900 #define Elf_Half Elf32_Half
51901 +#define elf_dyn Elf32_Dyn
51902
51903 #else
51904
51905 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
51906 #define elf_note elf64_note
51907 #define elf_addr_t Elf64_Off
51908 #define Elf_Half Elf64_Half
51909 +#define elf_dyn Elf64_Dyn
51910
51911 #endif
51912
51913 diff -urNp linux-3.0.3/include/linux/firewire.h linux-3.0.3/include/linux/firewire.h
51914 --- linux-3.0.3/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
51915 +++ linux-3.0.3/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
51916 @@ -428,7 +428,7 @@ struct fw_iso_context {
51917 union {
51918 fw_iso_callback_t sc;
51919 fw_iso_mc_callback_t mc;
51920 - } callback;
51921 + } __no_const callback;
51922 void *callback_data;
51923 };
51924
51925 diff -urNp linux-3.0.3/include/linux/fscache-cache.h linux-3.0.3/include/linux/fscache-cache.h
51926 --- linux-3.0.3/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
51927 +++ linux-3.0.3/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
51928 @@ -102,7 +102,7 @@ struct fscache_operation {
51929 fscache_operation_release_t release;
51930 };
51931
51932 -extern atomic_t fscache_op_debug_id;
51933 +extern atomic_unchecked_t fscache_op_debug_id;
51934 extern void fscache_op_work_func(struct work_struct *work);
51935
51936 extern void fscache_enqueue_operation(struct fscache_operation *);
51937 @@ -122,7 +122,7 @@ static inline void fscache_operation_ini
51938 {
51939 INIT_WORK(&op->work, fscache_op_work_func);
51940 atomic_set(&op->usage, 1);
51941 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
51942 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51943 op->processor = processor;
51944 op->release = release;
51945 INIT_LIST_HEAD(&op->pend_link);
51946 diff -urNp linux-3.0.3/include/linux/fs.h linux-3.0.3/include/linux/fs.h
51947 --- linux-3.0.3/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
51948 +++ linux-3.0.3/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
51949 @@ -109,6 +109,11 @@ struct inodes_stat_t {
51950 /* File was opened by fanotify and shouldn't generate fanotify events */
51951 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
51952
51953 +/* Hack for grsec so as not to require read permission simply to execute
51954 + * a binary
51955 + */
51956 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
51957 +
51958 /*
51959 * The below are the various read and write types that we support. Some of
51960 * them include behavioral modifiers that send information down to the
51961 @@ -1571,7 +1576,8 @@ struct file_operations {
51962 int (*setlease)(struct file *, long, struct file_lock **);
51963 long (*fallocate)(struct file *file, int mode, loff_t offset,
51964 loff_t len);
51965 -};
51966 +} __do_const;
51967 +typedef struct file_operations __no_const file_operations_no_const;
51968
51969 #define IPERM_FLAG_RCU 0x0001
51970
51971 diff -urNp linux-3.0.3/include/linux/fsnotify.h linux-3.0.3/include/linux/fsnotify.h
51972 --- linux-3.0.3/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
51973 +++ linux-3.0.3/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
51974 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
51975 */
51976 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
51977 {
51978 - return kstrdup(name, GFP_KERNEL);
51979 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
51980 }
51981
51982 /*
51983 diff -urNp linux-3.0.3/include/linux/fs_struct.h linux-3.0.3/include/linux/fs_struct.h
51984 --- linux-3.0.3/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
51985 +++ linux-3.0.3/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
51986 @@ -6,7 +6,7 @@
51987 #include <linux/seqlock.h>
51988
51989 struct fs_struct {
51990 - int users;
51991 + atomic_t users;
51992 spinlock_t lock;
51993 seqcount_t seq;
51994 int umask;
51995 diff -urNp linux-3.0.3/include/linux/ftrace_event.h linux-3.0.3/include/linux/ftrace_event.h
51996 --- linux-3.0.3/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
51997 +++ linux-3.0.3/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
51998 @@ -96,7 +96,7 @@ struct trace_event_functions {
51999 trace_print_func raw;
52000 trace_print_func hex;
52001 trace_print_func binary;
52002 -};
52003 +} __no_const;
52004
52005 struct trace_event {
52006 struct hlist_node node;
52007 @@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
52008 extern int trace_add_event_call(struct ftrace_event_call *call);
52009 extern void trace_remove_event_call(struct ftrace_event_call *call);
52010
52011 -#define is_signed_type(type) (((type)(-1)) < 0)
52012 +#define is_signed_type(type) (((type)(-1)) < (type)1)
52013
52014 int trace_set_clr_event(const char *system, const char *event, int set);
52015
52016 diff -urNp linux-3.0.3/include/linux/genhd.h linux-3.0.3/include/linux/genhd.h
52017 --- linux-3.0.3/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
52018 +++ linux-3.0.3/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
52019 @@ -184,7 +184,7 @@ struct gendisk {
52020 struct kobject *slave_dir;
52021
52022 struct timer_rand_state *random;
52023 - atomic_t sync_io; /* RAID */
52024 + atomic_unchecked_t sync_io; /* RAID */
52025 struct disk_events *ev;
52026 #ifdef CONFIG_BLK_DEV_INTEGRITY
52027 struct blk_integrity *integrity;
52028 diff -urNp linux-3.0.3/include/linux/gracl.h linux-3.0.3/include/linux/gracl.h
52029 --- linux-3.0.3/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52030 +++ linux-3.0.3/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
52031 @@ -0,0 +1,317 @@
52032 +#ifndef GR_ACL_H
52033 +#define GR_ACL_H
52034 +
52035 +#include <linux/grdefs.h>
52036 +#include <linux/resource.h>
52037 +#include <linux/capability.h>
52038 +#include <linux/dcache.h>
52039 +#include <asm/resource.h>
52040 +
52041 +/* Major status information */
52042 +
52043 +#define GR_VERSION "grsecurity 2.2.2"
52044 +#define GRSECURITY_VERSION 0x2202
52045 +
52046 +enum {
52047 + GR_SHUTDOWN = 0,
52048 + GR_ENABLE = 1,
52049 + GR_SPROLE = 2,
52050 + GR_RELOAD = 3,
52051 + GR_SEGVMOD = 4,
52052 + GR_STATUS = 5,
52053 + GR_UNSPROLE = 6,
52054 + GR_PASSSET = 7,
52055 + GR_SPROLEPAM = 8,
52056 +};
52057 +
52058 +/* Password setup definitions
52059 + * kernel/grhash.c */
52060 +enum {
52061 + GR_PW_LEN = 128,
52062 + GR_SALT_LEN = 16,
52063 + GR_SHA_LEN = 32,
52064 +};
52065 +
52066 +enum {
52067 + GR_SPROLE_LEN = 64,
52068 +};
52069 +
52070 +enum {
52071 + GR_NO_GLOB = 0,
52072 + GR_REG_GLOB,
52073 + GR_CREATE_GLOB
52074 +};
52075 +
52076 +#define GR_NLIMITS 32
52077 +
52078 +/* Begin Data Structures */
52079 +
52080 +struct sprole_pw {
52081 + unsigned char *rolename;
52082 + unsigned char salt[GR_SALT_LEN];
52083 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52084 +};
52085 +
52086 +struct name_entry {
52087 + __u32 key;
52088 + ino_t inode;
52089 + dev_t device;
52090 + char *name;
52091 + __u16 len;
52092 + __u8 deleted;
52093 + struct name_entry *prev;
52094 + struct name_entry *next;
52095 +};
52096 +
52097 +struct inodev_entry {
52098 + struct name_entry *nentry;
52099 + struct inodev_entry *prev;
52100 + struct inodev_entry *next;
52101 +};
52102 +
52103 +struct acl_role_db {
52104 + struct acl_role_label **r_hash;
52105 + __u32 r_size;
52106 +};
52107 +
52108 +struct inodev_db {
52109 + struct inodev_entry **i_hash;
52110 + __u32 i_size;
52111 +};
52112 +
52113 +struct name_db {
52114 + struct name_entry **n_hash;
52115 + __u32 n_size;
52116 +};
52117 +
52118 +struct crash_uid {
52119 + uid_t uid;
52120 + unsigned long expires;
52121 +};
52122 +
52123 +struct gr_hash_struct {
52124 + void **table;
52125 + void **nametable;
52126 + void *first;
52127 + __u32 table_size;
52128 + __u32 used_size;
52129 + int type;
52130 +};
52131 +
52132 +/* Userspace Grsecurity ACL data structures */
52133 +
52134 +struct acl_subject_label {
52135 + char *filename;
52136 + ino_t inode;
52137 + dev_t device;
52138 + __u32 mode;
52139 + kernel_cap_t cap_mask;
52140 + kernel_cap_t cap_lower;
52141 + kernel_cap_t cap_invert_audit;
52142 +
52143 + struct rlimit res[GR_NLIMITS];
52144 + __u32 resmask;
52145 +
52146 + __u8 user_trans_type;
52147 + __u8 group_trans_type;
52148 + uid_t *user_transitions;
52149 + gid_t *group_transitions;
52150 + __u16 user_trans_num;
52151 + __u16 group_trans_num;
52152 +
52153 + __u32 sock_families[2];
52154 + __u32 ip_proto[8];
52155 + __u32 ip_type;
52156 + struct acl_ip_label **ips;
52157 + __u32 ip_num;
52158 + __u32 inaddr_any_override;
52159 +
52160 + __u32 crashes;
52161 + unsigned long expires;
52162 +
52163 + struct acl_subject_label *parent_subject;
52164 + struct gr_hash_struct *hash;
52165 + struct acl_subject_label *prev;
52166 + struct acl_subject_label *next;
52167 +
52168 + struct acl_object_label **obj_hash;
52169 + __u32 obj_hash_size;
52170 + __u16 pax_flags;
52171 +};
52172 +
52173 +struct role_allowed_ip {
52174 + __u32 addr;
52175 + __u32 netmask;
52176 +
52177 + struct role_allowed_ip *prev;
52178 + struct role_allowed_ip *next;
52179 +};
52180 +
52181 +struct role_transition {
52182 + char *rolename;
52183 +
52184 + struct role_transition *prev;
52185 + struct role_transition *next;
52186 +};
52187 +
52188 +struct acl_role_label {
52189 + char *rolename;
52190 + uid_t uidgid;
52191 + __u16 roletype;
52192 +
52193 + __u16 auth_attempts;
52194 + unsigned long expires;
52195 +
52196 + struct acl_subject_label *root_label;
52197 + struct gr_hash_struct *hash;
52198 +
52199 + struct acl_role_label *prev;
52200 + struct acl_role_label *next;
52201 +
52202 + struct role_transition *transitions;
52203 + struct role_allowed_ip *allowed_ips;
52204 + uid_t *domain_children;
52205 + __u16 domain_child_num;
52206 +
52207 + struct acl_subject_label **subj_hash;
52208 + __u32 subj_hash_size;
52209 +};
52210 +
52211 +struct user_acl_role_db {
52212 + struct acl_role_label **r_table;
52213 + __u32 num_pointers; /* Number of allocations to track */
52214 + __u32 num_roles; /* Number of roles */
52215 + __u32 num_domain_children; /* Number of domain children */
52216 + __u32 num_subjects; /* Number of subjects */
52217 + __u32 num_objects; /* Number of objects */
52218 +};
52219 +
52220 +struct acl_object_label {
52221 + char *filename;
52222 + ino_t inode;
52223 + dev_t device;
52224 + __u32 mode;
52225 +
52226 + struct acl_subject_label *nested;
52227 + struct acl_object_label *globbed;
52228 +
52229 + /* next two structures not used */
52230 +
52231 + struct acl_object_label *prev;
52232 + struct acl_object_label *next;
52233 +};
52234 +
52235 +struct acl_ip_label {
52236 + char *iface;
52237 + __u32 addr;
52238 + __u32 netmask;
52239 + __u16 low, high;
52240 + __u8 mode;
52241 + __u32 type;
52242 + __u32 proto[8];
52243 +
52244 + /* next two structures not used */
52245 +
52246 + struct acl_ip_label *prev;
52247 + struct acl_ip_label *next;
52248 +};
52249 +
52250 +struct gr_arg {
52251 + struct user_acl_role_db role_db;
52252 + unsigned char pw[GR_PW_LEN];
52253 + unsigned char salt[GR_SALT_LEN];
52254 + unsigned char sum[GR_SHA_LEN];
52255 + unsigned char sp_role[GR_SPROLE_LEN];
52256 + struct sprole_pw *sprole_pws;
52257 + dev_t segv_device;
52258 + ino_t segv_inode;
52259 + uid_t segv_uid;
52260 + __u16 num_sprole_pws;
52261 + __u16 mode;
52262 +};
52263 +
52264 +struct gr_arg_wrapper {
52265 + struct gr_arg *arg;
52266 + __u32 version;
52267 + __u32 size;
52268 +};
52269 +
52270 +struct subject_map {
52271 + struct acl_subject_label *user;
52272 + struct acl_subject_label *kernel;
52273 + struct subject_map *prev;
52274 + struct subject_map *next;
52275 +};
52276 +
52277 +struct acl_subj_map_db {
52278 + struct subject_map **s_hash;
52279 + __u32 s_size;
52280 +};
52281 +
52282 +/* End Data Structures Section */
52283 +
52284 +/* Hash functions generated by empirical testing by Brad Spengler
52285 + Makes good use of the low bits of the inode. Generally 0-1 times
52286 + in loop for successful match. 0-3 for unsuccessful match.
52287 + Shift/add algorithm with modulus of table size and an XOR*/
52288 +
52289 +static __inline__ unsigned int
52290 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52291 +{
52292 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
52293 +}
52294 +
52295 + static __inline__ unsigned int
52296 +shash(const struct acl_subject_label *userp, const unsigned int sz)
52297 +{
52298 + return ((const unsigned long)userp % sz);
52299 +}
52300 +
52301 +static __inline__ unsigned int
52302 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52303 +{
52304 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52305 +}
52306 +
52307 +static __inline__ unsigned int
52308 +nhash(const char *name, const __u16 len, const unsigned int sz)
52309 +{
52310 + return full_name_hash((const unsigned char *)name, len) % sz;
52311 +}
52312 +
52313 +#define FOR_EACH_ROLE_START(role) \
52314 + role = role_list; \
52315 + while (role) {
52316 +
52317 +#define FOR_EACH_ROLE_END(role) \
52318 + role = role->prev; \
52319 + }
52320 +
52321 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52322 + subj = NULL; \
52323 + iter = 0; \
52324 + while (iter < role->subj_hash_size) { \
52325 + if (subj == NULL) \
52326 + subj = role->subj_hash[iter]; \
52327 + if (subj == NULL) { \
52328 + iter++; \
52329 + continue; \
52330 + }
52331 +
52332 +#define FOR_EACH_SUBJECT_END(subj,iter) \
52333 + subj = subj->next; \
52334 + if (subj == NULL) \
52335 + iter++; \
52336 + }
52337 +
52338 +
52339 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52340 + subj = role->hash->first; \
52341 + while (subj != NULL) {
52342 +
52343 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52344 + subj = subj->next; \
52345 + }
52346 +
52347 +#endif
52348 +
52349 diff -urNp linux-3.0.3/include/linux/gralloc.h linux-3.0.3/include/linux/gralloc.h
52350 --- linux-3.0.3/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52351 +++ linux-3.0.3/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
52352 @@ -0,0 +1,9 @@
52353 +#ifndef __GRALLOC_H
52354 +#define __GRALLOC_H
52355 +
52356 +void acl_free_all(void);
52357 +int acl_alloc_stack_init(unsigned long size);
52358 +void *acl_alloc(unsigned long len);
52359 +void *acl_alloc_num(unsigned long num, unsigned long len);
52360 +
52361 +#endif
52362 diff -urNp linux-3.0.3/include/linux/grdefs.h linux-3.0.3/include/linux/grdefs.h
52363 --- linux-3.0.3/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52364 +++ linux-3.0.3/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
52365 @@ -0,0 +1,140 @@
52366 +#ifndef GRDEFS_H
52367 +#define GRDEFS_H
52368 +
52369 +/* Begin grsecurity status declarations */
52370 +
52371 +enum {
52372 + GR_READY = 0x01,
52373 + GR_STATUS_INIT = 0x00 // disabled state
52374 +};
52375 +
52376 +/* Begin ACL declarations */
52377 +
52378 +/* Role flags */
52379 +
52380 +enum {
52381 + GR_ROLE_USER = 0x0001,
52382 + GR_ROLE_GROUP = 0x0002,
52383 + GR_ROLE_DEFAULT = 0x0004,
52384 + GR_ROLE_SPECIAL = 0x0008,
52385 + GR_ROLE_AUTH = 0x0010,
52386 + GR_ROLE_NOPW = 0x0020,
52387 + GR_ROLE_GOD = 0x0040,
52388 + GR_ROLE_LEARN = 0x0080,
52389 + GR_ROLE_TPE = 0x0100,
52390 + GR_ROLE_DOMAIN = 0x0200,
52391 + GR_ROLE_PAM = 0x0400,
52392 + GR_ROLE_PERSIST = 0x0800
52393 +};
52394 +
52395 +/* ACL Subject and Object mode flags */
52396 +enum {
52397 + GR_DELETED = 0x80000000
52398 +};
52399 +
52400 +/* ACL Object-only mode flags */
52401 +enum {
52402 + GR_READ = 0x00000001,
52403 + GR_APPEND = 0x00000002,
52404 + GR_WRITE = 0x00000004,
52405 + GR_EXEC = 0x00000008,
52406 + GR_FIND = 0x00000010,
52407 + GR_INHERIT = 0x00000020,
52408 + GR_SETID = 0x00000040,
52409 + GR_CREATE = 0x00000080,
52410 + GR_DELETE = 0x00000100,
52411 + GR_LINK = 0x00000200,
52412 + GR_AUDIT_READ = 0x00000400,
52413 + GR_AUDIT_APPEND = 0x00000800,
52414 + GR_AUDIT_WRITE = 0x00001000,
52415 + GR_AUDIT_EXEC = 0x00002000,
52416 + GR_AUDIT_FIND = 0x00004000,
52417 + GR_AUDIT_INHERIT= 0x00008000,
52418 + GR_AUDIT_SETID = 0x00010000,
52419 + GR_AUDIT_CREATE = 0x00020000,
52420 + GR_AUDIT_DELETE = 0x00040000,
52421 + GR_AUDIT_LINK = 0x00080000,
52422 + GR_PTRACERD = 0x00100000,
52423 + GR_NOPTRACE = 0x00200000,
52424 + GR_SUPPRESS = 0x00400000,
52425 + GR_NOLEARN = 0x00800000,
52426 + GR_INIT_TRANSFER= 0x01000000
52427 +};
52428 +
52429 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52430 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52431 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52432 +
52433 +/* ACL subject-only mode flags */
52434 +enum {
52435 + GR_KILL = 0x00000001,
52436 + GR_VIEW = 0x00000002,
52437 + GR_PROTECTED = 0x00000004,
52438 + GR_LEARN = 0x00000008,
52439 + GR_OVERRIDE = 0x00000010,
52440 + /* just a placeholder, this mode is only used in userspace */
52441 + GR_DUMMY = 0x00000020,
52442 + GR_PROTSHM = 0x00000040,
52443 + GR_KILLPROC = 0x00000080,
52444 + GR_KILLIPPROC = 0x00000100,
52445 + /* just a placeholder, this mode is only used in userspace */
52446 + GR_NOTROJAN = 0x00000200,
52447 + GR_PROTPROCFD = 0x00000400,
52448 + GR_PROCACCT = 0x00000800,
52449 + GR_RELAXPTRACE = 0x00001000,
52450 + GR_NESTED = 0x00002000,
52451 + GR_INHERITLEARN = 0x00004000,
52452 + GR_PROCFIND = 0x00008000,
52453 + GR_POVERRIDE = 0x00010000,
52454 + GR_KERNELAUTH = 0x00020000,
52455 + GR_ATSECURE = 0x00040000,
52456 + GR_SHMEXEC = 0x00080000
52457 +};
52458 +
52459 +enum {
52460 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52461 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52462 + GR_PAX_ENABLE_MPROTECT = 0x0004,
52463 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
52464 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52465 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52466 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52467 + GR_PAX_DISABLE_MPROTECT = 0x0400,
52468 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
52469 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52470 +};
52471 +
52472 +enum {
52473 + GR_ID_USER = 0x01,
52474 + GR_ID_GROUP = 0x02,
52475 +};
52476 +
52477 +enum {
52478 + GR_ID_ALLOW = 0x01,
52479 + GR_ID_DENY = 0x02,
52480 +};
52481 +
52482 +#define GR_CRASH_RES 31
52483 +#define GR_UIDTABLE_MAX 500
52484 +
52485 +/* begin resource learning section */
52486 +enum {
52487 + GR_RLIM_CPU_BUMP = 60,
52488 + GR_RLIM_FSIZE_BUMP = 50000,
52489 + GR_RLIM_DATA_BUMP = 10000,
52490 + GR_RLIM_STACK_BUMP = 1000,
52491 + GR_RLIM_CORE_BUMP = 10000,
52492 + GR_RLIM_RSS_BUMP = 500000,
52493 + GR_RLIM_NPROC_BUMP = 1,
52494 + GR_RLIM_NOFILE_BUMP = 5,
52495 + GR_RLIM_MEMLOCK_BUMP = 50000,
52496 + GR_RLIM_AS_BUMP = 500000,
52497 + GR_RLIM_LOCKS_BUMP = 2,
52498 + GR_RLIM_SIGPENDING_BUMP = 5,
52499 + GR_RLIM_MSGQUEUE_BUMP = 10000,
52500 + GR_RLIM_NICE_BUMP = 1,
52501 + GR_RLIM_RTPRIO_BUMP = 1,
52502 + GR_RLIM_RTTIME_BUMP = 1000000
52503 +};
52504 +
52505 +#endif
52506 diff -urNp linux-3.0.3/include/linux/grinternal.h linux-3.0.3/include/linux/grinternal.h
52507 --- linux-3.0.3/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52508 +++ linux-3.0.3/include/linux/grinternal.h 2011-08-23 21:48:14.000000000 -0400
52509 @@ -0,0 +1,219 @@
52510 +#ifndef __GRINTERNAL_H
52511 +#define __GRINTERNAL_H
52512 +
52513 +#ifdef CONFIG_GRKERNSEC
52514 +
52515 +#include <linux/fs.h>
52516 +#include <linux/mnt_namespace.h>
52517 +#include <linux/nsproxy.h>
52518 +#include <linux/gracl.h>
52519 +#include <linux/grdefs.h>
52520 +#include <linux/grmsg.h>
52521 +
52522 +void gr_add_learn_entry(const char *fmt, ...)
52523 + __attribute__ ((format (printf, 1, 2)));
52524 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52525 + const struct vfsmount *mnt);
52526 +__u32 gr_check_create(const struct dentry *new_dentry,
52527 + const struct dentry *parent,
52528 + const struct vfsmount *mnt, const __u32 mode);
52529 +int gr_check_protected_task(const struct task_struct *task);
52530 +__u32 to_gr_audit(const __u32 reqmode);
52531 +int gr_set_acls(const int type);
52532 +int gr_apply_subject_to_task(struct task_struct *task);
52533 +int gr_acl_is_enabled(void);
52534 +char gr_roletype_to_char(void);
52535 +
52536 +void gr_handle_alertkill(struct task_struct *task);
52537 +char *gr_to_filename(const struct dentry *dentry,
52538 + const struct vfsmount *mnt);
52539 +char *gr_to_filename1(const struct dentry *dentry,
52540 + const struct vfsmount *mnt);
52541 +char *gr_to_filename2(const struct dentry *dentry,
52542 + const struct vfsmount *mnt);
52543 +char *gr_to_filename3(const struct dentry *dentry,
52544 + const struct vfsmount *mnt);
52545 +
52546 +extern int grsec_enable_harden_ptrace;
52547 +extern int grsec_enable_link;
52548 +extern int grsec_enable_fifo;
52549 +extern int grsec_enable_execve;
52550 +extern int grsec_enable_shm;
52551 +extern int grsec_enable_execlog;
52552 +extern int grsec_enable_signal;
52553 +extern int grsec_enable_audit_ptrace;
52554 +extern int grsec_enable_forkfail;
52555 +extern int grsec_enable_time;
52556 +extern int grsec_enable_rofs;
52557 +extern int grsec_enable_chroot_shmat;
52558 +extern int grsec_enable_chroot_mount;
52559 +extern int grsec_enable_chroot_double;
52560 +extern int grsec_enable_chroot_pivot;
52561 +extern int grsec_enable_chroot_chdir;
52562 +extern int grsec_enable_chroot_chmod;
52563 +extern int grsec_enable_chroot_mknod;
52564 +extern int grsec_enable_chroot_fchdir;
52565 +extern int grsec_enable_chroot_nice;
52566 +extern int grsec_enable_chroot_execlog;
52567 +extern int grsec_enable_chroot_caps;
52568 +extern int grsec_enable_chroot_sysctl;
52569 +extern int grsec_enable_chroot_unix;
52570 +extern int grsec_enable_tpe;
52571 +extern int grsec_tpe_gid;
52572 +extern int grsec_enable_tpe_all;
52573 +extern int grsec_enable_tpe_invert;
52574 +extern int grsec_enable_socket_all;
52575 +extern int grsec_socket_all_gid;
52576 +extern int grsec_enable_socket_client;
52577 +extern int grsec_socket_client_gid;
52578 +extern int grsec_enable_socket_server;
52579 +extern int grsec_socket_server_gid;
52580 +extern int grsec_audit_gid;
52581 +extern int grsec_enable_group;
52582 +extern int grsec_enable_audit_textrel;
52583 +extern int grsec_enable_log_rwxmaps;
52584 +extern int grsec_enable_mount;
52585 +extern int grsec_enable_chdir;
52586 +extern int grsec_resource_logging;
52587 +extern int grsec_enable_blackhole;
52588 +extern int grsec_lastack_retries;
52589 +extern int grsec_enable_brute;
52590 +extern int grsec_lock;
52591 +
52592 +extern spinlock_t grsec_alert_lock;
52593 +extern unsigned long grsec_alert_wtime;
52594 +extern unsigned long grsec_alert_fyet;
52595 +
52596 +extern spinlock_t grsec_audit_lock;
52597 +
52598 +extern rwlock_t grsec_exec_file_lock;
52599 +
52600 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52601 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52602 + (tsk)->exec_file->f_vfsmnt) : "/")
52603 +
52604 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52605 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52606 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52607 +
52608 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52609 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
52610 + (tsk)->exec_file->f_vfsmnt) : "/")
52611 +
52612 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52613 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52614 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52615 +
52616 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52617 +
52618 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52619 +
52620 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52621 + (task)->pid, (cred)->uid, \
52622 + (cred)->euid, (cred)->gid, (cred)->egid, \
52623 + gr_parent_task_fullpath(task), \
52624 + (task)->real_parent->comm, (task)->real_parent->pid, \
52625 + (pcred)->uid, (pcred)->euid, \
52626 + (pcred)->gid, (pcred)->egid
52627 +
52628 +#define GR_CHROOT_CAPS {{ \
52629 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52630 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52631 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52632 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52633 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52634 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52635 +
52636 +#define security_learn(normal_msg,args...) \
52637 +({ \
52638 + read_lock(&grsec_exec_file_lock); \
52639 + gr_add_learn_entry(normal_msg "\n", ## args); \
52640 + read_unlock(&grsec_exec_file_lock); \
52641 +})
52642 +
52643 +enum {
52644 + GR_DO_AUDIT,
52645 + GR_DONT_AUDIT,
52646 + /* used for non-audit messages that we shouldn't kill the task on */
52647 + GR_DONT_AUDIT_GOOD
52648 +};
52649 +
52650 +enum {
52651 + GR_TTYSNIFF,
52652 + GR_RBAC,
52653 + GR_RBAC_STR,
52654 + GR_STR_RBAC,
52655 + GR_RBAC_MODE2,
52656 + GR_RBAC_MODE3,
52657 + GR_FILENAME,
52658 + GR_SYSCTL_HIDDEN,
52659 + GR_NOARGS,
52660 + GR_ONE_INT,
52661 + GR_ONE_INT_TWO_STR,
52662 + GR_ONE_STR,
52663 + GR_STR_INT,
52664 + GR_TWO_STR_INT,
52665 + GR_TWO_INT,
52666 + GR_TWO_U64,
52667 + GR_THREE_INT,
52668 + GR_FIVE_INT_TWO_STR,
52669 + GR_TWO_STR,
52670 + GR_THREE_STR,
52671 + GR_FOUR_STR,
52672 + GR_STR_FILENAME,
52673 + GR_FILENAME_STR,
52674 + GR_FILENAME_TWO_INT,
52675 + GR_FILENAME_TWO_INT_STR,
52676 + GR_TEXTREL,
52677 + GR_PTRACE,
52678 + GR_RESOURCE,
52679 + GR_CAP,
52680 + GR_SIG,
52681 + GR_SIG2,
52682 + GR_CRASH1,
52683 + GR_CRASH2,
52684 + GR_PSACCT,
52685 + GR_RWXMAP
52686 +};
52687 +
52688 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52689 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52690 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52691 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52692 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52693 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52694 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52695 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52696 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52697 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52698 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52699 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52700 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52701 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52702 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52703 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52704 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52705 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52706 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52707 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52708 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52709 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52710 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52711 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52712 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52713 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52714 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52715 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52716 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52717 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52718 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52719 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52720 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52721 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52722 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52723 +
52724 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52725 +
52726 +#endif
52727 +
52728 +#endif
52729 diff -urNp linux-3.0.3/include/linux/grmsg.h linux-3.0.3/include/linux/grmsg.h
52730 --- linux-3.0.3/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52731 +++ linux-3.0.3/include/linux/grmsg.h 2011-08-25 17:27:26.000000000 -0400
52732 @@ -0,0 +1,107 @@
52733 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52734 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52735 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52736 +#define GR_STOPMOD_MSG "denied modification of module state by "
52737 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52738 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52739 +#define GR_IOPERM_MSG "denied use of ioperm() by "
52740 +#define GR_IOPL_MSG "denied use of iopl() by "
52741 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52742 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52743 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52744 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52745 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52746 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52747 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52748 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52749 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52750 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52751 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52752 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52753 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52754 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52755 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52756 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52757 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52758 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52759 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52760 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52761 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52762 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52763 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52764 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52765 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52766 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52767 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52768 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52769 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52770 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52771 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52772 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52773 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52774 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52775 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52776 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52777 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52778 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52779 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52780 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52781 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52782 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52783 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52784 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52785 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52786 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52787 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52788 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52789 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52790 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52791 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52792 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52793 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52794 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52795 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52796 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52797 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52798 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52799 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52800 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52801 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52802 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52803 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52804 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52805 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
52806 +#define GR_NICE_CHROOT_MSG "denied priority change by "
52807 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52808 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52809 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52810 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52811 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52812 +#define GR_TIME_MSG "time set by "
52813 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52814 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52815 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52816 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52817 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52818 +#define GR_BIND_MSG "denied bind() by "
52819 +#define GR_CONNECT_MSG "denied connect() by "
52820 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52821 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
52822 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
52823 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
52824 +#define GR_CAP_ACL_MSG "use of %s denied for "
52825 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
52826 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
52827 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
52828 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
52829 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
52830 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
52831 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
52832 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
52833 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
52834 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
52835 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
52836 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
52837 +#define GR_VM86_MSG "denied use of vm86 by "
52838 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
52839 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
52840 diff -urNp linux-3.0.3/include/linux/grsecurity.h linux-3.0.3/include/linux/grsecurity.h
52841 --- linux-3.0.3/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
52842 +++ linux-3.0.3/include/linux/grsecurity.h 2011-08-25 17:27:36.000000000 -0400
52843 @@ -0,0 +1,227 @@
52844 +#ifndef GR_SECURITY_H
52845 +#define GR_SECURITY_H
52846 +#include <linux/fs.h>
52847 +#include <linux/fs_struct.h>
52848 +#include <linux/binfmts.h>
52849 +#include <linux/gracl.h>
52850 +
52851 +/* notify of brain-dead configs */
52852 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52853 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
52854 +#endif
52855 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
52856 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
52857 +#endif
52858 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52859 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52860 +#endif
52861 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52862 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52863 +#endif
52864 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
52865 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
52866 +#endif
52867 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
52868 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
52869 +#endif
52870 +
52871 +#include <linux/compat.h>
52872 +
52873 +struct user_arg_ptr {
52874 +#ifdef CONFIG_COMPAT
52875 + bool is_compat;
52876 +#endif
52877 + union {
52878 + const char __user *const __user *native;
52879 +#ifdef CONFIG_COMPAT
52880 + compat_uptr_t __user *compat;
52881 +#endif
52882 + } ptr;
52883 +};
52884 +
52885 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
52886 +void gr_handle_brute_check(void);
52887 +void gr_handle_kernel_exploit(void);
52888 +int gr_process_user_ban(void);
52889 +
52890 +char gr_roletype_to_char(void);
52891 +
52892 +int gr_acl_enable_at_secure(void);
52893 +
52894 +int gr_check_user_change(int real, int effective, int fs);
52895 +int gr_check_group_change(int real, int effective, int fs);
52896 +
52897 +void gr_del_task_from_ip_table(struct task_struct *p);
52898 +
52899 +int gr_pid_is_chrooted(struct task_struct *p);
52900 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
52901 +int gr_handle_chroot_nice(void);
52902 +int gr_handle_chroot_sysctl(const int op);
52903 +int gr_handle_chroot_setpriority(struct task_struct *p,
52904 + const int niceval);
52905 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
52906 +int gr_handle_chroot_chroot(const struct dentry *dentry,
52907 + const struct vfsmount *mnt);
52908 +int gr_handle_chroot_caps(struct path *path);
52909 +void gr_handle_chroot_chdir(struct path *path);
52910 +int gr_handle_chroot_chmod(const struct dentry *dentry,
52911 + const struct vfsmount *mnt, const int mode);
52912 +int gr_handle_chroot_mknod(const struct dentry *dentry,
52913 + const struct vfsmount *mnt, const int mode);
52914 +int gr_handle_chroot_mount(const struct dentry *dentry,
52915 + const struct vfsmount *mnt,
52916 + const char *dev_name);
52917 +int gr_handle_chroot_pivot(void);
52918 +int gr_handle_chroot_unix(const pid_t pid);
52919 +
52920 +int gr_handle_rawio(const struct inode *inode);
52921 +
52922 +void gr_handle_ioperm(void);
52923 +void gr_handle_iopl(void);
52924 +
52925 +int gr_tpe_allow(const struct file *file);
52926 +
52927 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
52928 +void gr_clear_chroot_entries(struct task_struct *task);
52929 +
52930 +void gr_log_forkfail(const int retval);
52931 +void gr_log_timechange(void);
52932 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
52933 +void gr_log_chdir(const struct dentry *dentry,
52934 + const struct vfsmount *mnt);
52935 +void gr_log_chroot_exec(const struct dentry *dentry,
52936 + const struct vfsmount *mnt);
52937 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
52938 +void gr_log_remount(const char *devname, const int retval);
52939 +void gr_log_unmount(const char *devname, const int retval);
52940 +void gr_log_mount(const char *from, const char *to, const int retval);
52941 +void gr_log_textrel(struct vm_area_struct *vma);
52942 +void gr_log_rwxmmap(struct file *file);
52943 +void gr_log_rwxmprotect(struct file *file);
52944 +
52945 +int gr_handle_follow_link(const struct inode *parent,
52946 + const struct inode *inode,
52947 + const struct dentry *dentry,
52948 + const struct vfsmount *mnt);
52949 +int gr_handle_fifo(const struct dentry *dentry,
52950 + const struct vfsmount *mnt,
52951 + const struct dentry *dir, const int flag,
52952 + const int acc_mode);
52953 +int gr_handle_hardlink(const struct dentry *dentry,
52954 + const struct vfsmount *mnt,
52955 + struct inode *inode,
52956 + const int mode, const char *to);
52957 +
52958 +int gr_is_capable(const int cap);
52959 +int gr_is_capable_nolog(const int cap);
52960 +void gr_learn_resource(const struct task_struct *task, const int limit,
52961 + const unsigned long wanted, const int gt);
52962 +void gr_copy_label(struct task_struct *tsk);
52963 +void gr_handle_crash(struct task_struct *task, const int sig);
52964 +int gr_handle_signal(const struct task_struct *p, const int sig);
52965 +int gr_check_crash_uid(const uid_t uid);
52966 +int gr_check_protected_task(const struct task_struct *task);
52967 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
52968 +int gr_acl_handle_mmap(const struct file *file,
52969 + const unsigned long prot);
52970 +int gr_acl_handle_mprotect(const struct file *file,
52971 + const unsigned long prot);
52972 +int gr_check_hidden_task(const struct task_struct *tsk);
52973 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
52974 + const struct vfsmount *mnt);
52975 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
52976 + const struct vfsmount *mnt);
52977 +__u32 gr_acl_handle_access(const struct dentry *dentry,
52978 + const struct vfsmount *mnt, const int fmode);
52979 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
52980 + const struct vfsmount *mnt, mode_t mode);
52981 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
52982 + const struct vfsmount *mnt, mode_t mode);
52983 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
52984 + const struct vfsmount *mnt);
52985 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
52986 + const struct vfsmount *mnt);
52987 +int gr_handle_ptrace(struct task_struct *task, const long request);
52988 +int gr_handle_proc_ptrace(struct task_struct *task);
52989 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
52990 + const struct vfsmount *mnt);
52991 +int gr_check_crash_exec(const struct file *filp);
52992 +int gr_acl_is_enabled(void);
52993 +void gr_set_kernel_label(struct task_struct *task);
52994 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
52995 + const gid_t gid);
52996 +int gr_set_proc_label(const struct dentry *dentry,
52997 + const struct vfsmount *mnt,
52998 + const int unsafe_share);
52999 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
53000 + const struct vfsmount *mnt);
53001 +__u32 gr_acl_handle_open(const struct dentry *dentry,
53002 + const struct vfsmount *mnt, const int fmode);
53003 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
53004 + const struct dentry *p_dentry,
53005 + const struct vfsmount *p_mnt, const int fmode,
53006 + const int imode);
53007 +void gr_handle_create(const struct dentry *dentry,
53008 + const struct vfsmount *mnt);
53009 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
53010 + const struct dentry *parent_dentry,
53011 + const struct vfsmount *parent_mnt,
53012 + const int mode);
53013 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
53014 + const struct dentry *parent_dentry,
53015 + const struct vfsmount *parent_mnt);
53016 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
53017 + const struct vfsmount *mnt);
53018 +void gr_handle_delete(const ino_t ino, const dev_t dev);
53019 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
53020 + const struct vfsmount *mnt);
53021 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
53022 + const struct dentry *parent_dentry,
53023 + const struct vfsmount *parent_mnt,
53024 + const char *from);
53025 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
53026 + const struct dentry *parent_dentry,
53027 + const struct vfsmount *parent_mnt,
53028 + const struct dentry *old_dentry,
53029 + const struct vfsmount *old_mnt, const char *to);
53030 +int gr_acl_handle_rename(struct dentry *new_dentry,
53031 + struct dentry *parent_dentry,
53032 + const struct vfsmount *parent_mnt,
53033 + struct dentry *old_dentry,
53034 + struct inode *old_parent_inode,
53035 + struct vfsmount *old_mnt, const char *newname);
53036 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53037 + struct dentry *old_dentry,
53038 + struct dentry *new_dentry,
53039 + struct vfsmount *mnt, const __u8 replace);
53040 +__u32 gr_check_link(const struct dentry *new_dentry,
53041 + const struct dentry *parent_dentry,
53042 + const struct vfsmount *parent_mnt,
53043 + const struct dentry *old_dentry,
53044 + const struct vfsmount *old_mnt);
53045 +int gr_acl_handle_filldir(const struct file *file, const char *name,
53046 + const unsigned int namelen, const ino_t ino);
53047 +
53048 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
53049 + const struct vfsmount *mnt);
53050 +void gr_acl_handle_exit(void);
53051 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
53052 +int gr_acl_handle_procpidmem(const struct task_struct *task);
53053 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53054 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53055 +void gr_audit_ptrace(struct task_struct *task);
53056 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53057 +
53058 +#ifdef CONFIG_GRKERNSEC
53059 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53060 +void gr_handle_vm86(void);
53061 +void gr_handle_mem_readwrite(u64 from, u64 to);
53062 +
53063 +extern int grsec_enable_dmesg;
53064 +extern int grsec_disable_privio;
53065 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53066 +extern int grsec_enable_chroot_findtask;
53067 +#endif
53068 +#endif
53069 +
53070 +#endif
53071 diff -urNp linux-3.0.3/include/linux/grsock.h linux-3.0.3/include/linux/grsock.h
53072 --- linux-3.0.3/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53073 +++ linux-3.0.3/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
53074 @@ -0,0 +1,19 @@
53075 +#ifndef __GRSOCK_H
53076 +#define __GRSOCK_H
53077 +
53078 +extern void gr_attach_curr_ip(const struct sock *sk);
53079 +extern int gr_handle_sock_all(const int family, const int type,
53080 + const int protocol);
53081 +extern int gr_handle_sock_server(const struct sockaddr *sck);
53082 +extern int gr_handle_sock_server_other(const struct sock *sck);
53083 +extern int gr_handle_sock_client(const struct sockaddr *sck);
53084 +extern int gr_search_connect(struct socket * sock,
53085 + struct sockaddr_in * addr);
53086 +extern int gr_search_bind(struct socket * sock,
53087 + struct sockaddr_in * addr);
53088 +extern int gr_search_listen(struct socket * sock);
53089 +extern int gr_search_accept(struct socket * sock);
53090 +extern int gr_search_socket(const int domain, const int type,
53091 + const int protocol);
53092 +
53093 +#endif
53094 diff -urNp linux-3.0.3/include/linux/hid.h linux-3.0.3/include/linux/hid.h
53095 --- linux-3.0.3/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
53096 +++ linux-3.0.3/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
53097 @@ -675,7 +675,7 @@ struct hid_ll_driver {
53098 unsigned int code, int value);
53099
53100 int (*parse)(struct hid_device *hdev);
53101 -};
53102 +} __no_const;
53103
53104 #define PM_HINT_FULLON 1<<5
53105 #define PM_HINT_NORMAL 1<<1
53106 diff -urNp linux-3.0.3/include/linux/highmem.h linux-3.0.3/include/linux/highmem.h
53107 --- linux-3.0.3/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
53108 +++ linux-3.0.3/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
53109 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53110 kunmap_atomic(kaddr, KM_USER0);
53111 }
53112
53113 +static inline void sanitize_highpage(struct page *page)
53114 +{
53115 + void *kaddr;
53116 + unsigned long flags;
53117 +
53118 + local_irq_save(flags);
53119 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
53120 + clear_page(kaddr);
53121 + kunmap_atomic(kaddr, KM_CLEARPAGE);
53122 + local_irq_restore(flags);
53123 +}
53124 +
53125 static inline void zero_user_segments(struct page *page,
53126 unsigned start1, unsigned end1,
53127 unsigned start2, unsigned end2)
53128 diff -urNp linux-3.0.3/include/linux/i2c.h linux-3.0.3/include/linux/i2c.h
53129 --- linux-3.0.3/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
53130 +++ linux-3.0.3/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
53131 @@ -346,6 +346,7 @@ struct i2c_algorithm {
53132 /* To determine what the adapter supports */
53133 u32 (*functionality) (struct i2c_adapter *);
53134 };
53135 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53136
53137 /*
53138 * i2c_adapter is the structure used to identify a physical i2c bus along
53139 diff -urNp linux-3.0.3/include/linux/i2o.h linux-3.0.3/include/linux/i2o.h
53140 --- linux-3.0.3/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
53141 +++ linux-3.0.3/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
53142 @@ -564,7 +564,7 @@ struct i2o_controller {
53143 struct i2o_device *exec; /* Executive */
53144 #if BITS_PER_LONG == 64
53145 spinlock_t context_list_lock; /* lock for context_list */
53146 - atomic_t context_list_counter; /* needed for unique contexts */
53147 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53148 struct list_head context_list; /* list of context id's
53149 and pointers */
53150 #endif
53151 diff -urNp linux-3.0.3/include/linux/init.h linux-3.0.3/include/linux/init.h
53152 --- linux-3.0.3/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
53153 +++ linux-3.0.3/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
53154 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53155
53156 /* Each module must use one module_init(). */
53157 #define module_init(initfn) \
53158 - static inline initcall_t __inittest(void) \
53159 + static inline __used initcall_t __inittest(void) \
53160 { return initfn; } \
53161 int init_module(void) __attribute__((alias(#initfn)));
53162
53163 /* This is only required if you want to be unloadable. */
53164 #define module_exit(exitfn) \
53165 - static inline exitcall_t __exittest(void) \
53166 + static inline __used exitcall_t __exittest(void) \
53167 { return exitfn; } \
53168 void cleanup_module(void) __attribute__((alias(#exitfn)));
53169
53170 diff -urNp linux-3.0.3/include/linux/init_task.h linux-3.0.3/include/linux/init_task.h
53171 --- linux-3.0.3/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
53172 +++ linux-3.0.3/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
53173 @@ -126,6 +126,12 @@ extern struct cred init_cred;
53174 # define INIT_PERF_EVENTS(tsk)
53175 #endif
53176
53177 +#ifdef CONFIG_X86
53178 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53179 +#else
53180 +#define INIT_TASK_THREAD_INFO
53181 +#endif
53182 +
53183 /*
53184 * INIT_TASK is used to set up the first task table, touch at
53185 * your own risk!. Base=0, limit=0x1fffff (=2MB)
53186 @@ -164,6 +170,7 @@ extern struct cred init_cred;
53187 RCU_INIT_POINTER(.cred, &init_cred), \
53188 .comm = "swapper", \
53189 .thread = INIT_THREAD, \
53190 + INIT_TASK_THREAD_INFO \
53191 .fs = &init_fs, \
53192 .files = &init_files, \
53193 .signal = &init_signals, \
53194 diff -urNp linux-3.0.3/include/linux/intel-iommu.h linux-3.0.3/include/linux/intel-iommu.h
53195 --- linux-3.0.3/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
53196 +++ linux-3.0.3/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
53197 @@ -296,7 +296,7 @@ struct iommu_flush {
53198 u8 fm, u64 type);
53199 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53200 unsigned int size_order, u64 type);
53201 -};
53202 +} __no_const;
53203
53204 enum {
53205 SR_DMAR_FECTL_REG,
53206 diff -urNp linux-3.0.3/include/linux/interrupt.h linux-3.0.3/include/linux/interrupt.h
53207 --- linux-3.0.3/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
53208 +++ linux-3.0.3/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
53209 @@ -422,7 +422,7 @@ enum
53210 /* map softirq index to softirq name. update 'softirq_to_name' in
53211 * kernel/softirq.c when adding a new softirq.
53212 */
53213 -extern char *softirq_to_name[NR_SOFTIRQS];
53214 +extern const char * const softirq_to_name[NR_SOFTIRQS];
53215
53216 /* softirq mask and active fields moved to irq_cpustat_t in
53217 * asm/hardirq.h to get better cache usage. KAO
53218 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53219
53220 struct softirq_action
53221 {
53222 - void (*action)(struct softirq_action *);
53223 + void (*action)(void);
53224 };
53225
53226 asmlinkage void do_softirq(void);
53227 asmlinkage void __do_softirq(void);
53228 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53229 +extern void open_softirq(int nr, void (*action)(void));
53230 extern void softirq_init(void);
53231 static inline void __raise_softirq_irqoff(unsigned int nr)
53232 {
53233 diff -urNp linux-3.0.3/include/linux/kallsyms.h linux-3.0.3/include/linux/kallsyms.h
53234 --- linux-3.0.3/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
53235 +++ linux-3.0.3/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
53236 @@ -15,7 +15,8 @@
53237
53238 struct module;
53239
53240 -#ifdef CONFIG_KALLSYMS
53241 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53242 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53243 /* Lookup the address for a symbol. Returns 0 if not found. */
53244 unsigned long kallsyms_lookup_name(const char *name);
53245
53246 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53247 /* Stupid that this does nothing, but I didn't create this mess. */
53248 #define __print_symbol(fmt, addr)
53249 #endif /*CONFIG_KALLSYMS*/
53250 +#else /* when included by kallsyms.c, vsnprintf.c, or
53251 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53252 +extern void __print_symbol(const char *fmt, unsigned long address);
53253 +extern int sprint_backtrace(char *buffer, unsigned long address);
53254 +extern int sprint_symbol(char *buffer, unsigned long address);
53255 +const char *kallsyms_lookup(unsigned long addr,
53256 + unsigned long *symbolsize,
53257 + unsigned long *offset,
53258 + char **modname, char *namebuf);
53259 +#endif
53260
53261 /* This macro allows us to keep printk typechecking */
53262 static void __check_printsym_format(const char *fmt, ...)
53263 diff -urNp linux-3.0.3/include/linux/kgdb.h linux-3.0.3/include/linux/kgdb.h
53264 --- linux-3.0.3/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
53265 +++ linux-3.0.3/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
53266 @@ -53,7 +53,7 @@ extern int kgdb_connected;
53267 extern int kgdb_io_module_registered;
53268
53269 extern atomic_t kgdb_setting_breakpoint;
53270 -extern atomic_t kgdb_cpu_doing_single_step;
53271 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53272
53273 extern struct task_struct *kgdb_usethread;
53274 extern struct task_struct *kgdb_contthread;
53275 @@ -251,7 +251,7 @@ struct kgdb_arch {
53276 void (*disable_hw_break)(struct pt_regs *regs);
53277 void (*remove_all_hw_break)(void);
53278 void (*correct_hw_break)(void);
53279 -};
53280 +} __do_const;
53281
53282 /**
53283 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
53284 @@ -276,7 +276,7 @@ struct kgdb_io {
53285 void (*pre_exception) (void);
53286 void (*post_exception) (void);
53287 int is_console;
53288 -};
53289 +} __do_const;
53290
53291 extern struct kgdb_arch arch_kgdb_ops;
53292
53293 diff -urNp linux-3.0.3/include/linux/kmod.h linux-3.0.3/include/linux/kmod.h
53294 --- linux-3.0.3/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
53295 +++ linux-3.0.3/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
53296 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
53297 * usually useless though. */
53298 extern int __request_module(bool wait, const char *name, ...) \
53299 __attribute__((format(printf, 2, 3)));
53300 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53301 + __attribute__((format(printf, 3, 4)));
53302 #define request_module(mod...) __request_module(true, mod)
53303 #define request_module_nowait(mod...) __request_module(false, mod)
53304 #define try_then_request_module(x, mod...) \
53305 diff -urNp linux-3.0.3/include/linux/kvm_host.h linux-3.0.3/include/linux/kvm_host.h
53306 --- linux-3.0.3/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
53307 +++ linux-3.0.3/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
53308 @@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53309 void vcpu_load(struct kvm_vcpu *vcpu);
53310 void vcpu_put(struct kvm_vcpu *vcpu);
53311
53312 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53313 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53314 struct module *module);
53315 void kvm_exit(void);
53316
53317 @@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53318 struct kvm_guest_debug *dbg);
53319 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53320
53321 -int kvm_arch_init(void *opaque);
53322 +int kvm_arch_init(const void *opaque);
53323 void kvm_arch_exit(void);
53324
53325 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53326 diff -urNp linux-3.0.3/include/linux/libata.h linux-3.0.3/include/linux/libata.h
53327 --- linux-3.0.3/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
53328 +++ linux-3.0.3/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
53329 @@ -899,7 +899,7 @@ struct ata_port_operations {
53330 * fields must be pointers.
53331 */
53332 const struct ata_port_operations *inherits;
53333 -};
53334 +} __do_const;
53335
53336 struct ata_port_info {
53337 unsigned long flags;
53338 diff -urNp linux-3.0.3/include/linux/mca.h linux-3.0.3/include/linux/mca.h
53339 --- linux-3.0.3/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
53340 +++ linux-3.0.3/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
53341 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53342 int region);
53343 void * (*mca_transform_memory)(struct mca_device *,
53344 void *memory);
53345 -};
53346 +} __no_const;
53347
53348 struct mca_bus {
53349 u64 default_dma_mask;
53350 diff -urNp linux-3.0.3/include/linux/memory.h linux-3.0.3/include/linux/memory.h
53351 --- linux-3.0.3/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
53352 +++ linux-3.0.3/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
53353 @@ -144,7 +144,7 @@ struct memory_accessor {
53354 size_t count);
53355 ssize_t (*write)(struct memory_accessor *, const char *buf,
53356 off_t offset, size_t count);
53357 -};
53358 +} __no_const;
53359
53360 /*
53361 * Kernel text modification mutex, used for code patching. Users of this lock
53362 diff -urNp linux-3.0.3/include/linux/mfd/abx500.h linux-3.0.3/include/linux/mfd/abx500.h
53363 --- linux-3.0.3/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
53364 +++ linux-3.0.3/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
53365 @@ -234,6 +234,7 @@ struct abx500_ops {
53366 int (*event_registers_startup_state_get) (struct device *, u8 *);
53367 int (*startup_irq_enabled) (struct device *, unsigned int);
53368 };
53369 +typedef struct abx500_ops __no_const abx500_ops_no_const;
53370
53371 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53372 void abx500_remove_ops(struct device *dev);
53373 diff -urNp linux-3.0.3/include/linux/mm.h linux-3.0.3/include/linux/mm.h
53374 --- linux-3.0.3/include/linux/mm.h 2011-08-23 21:44:40.000000000 -0400
53375 +++ linux-3.0.3/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
53376 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53377
53378 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53379 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53380 +
53381 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53382 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53383 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53384 +#else
53385 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53386 +#endif
53387 +
53388 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53389 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53390
53391 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
53392 int set_page_dirty_lock(struct page *page);
53393 int clear_page_dirty_for_io(struct page *page);
53394
53395 -/* Is the vma a continuation of the stack vma above it? */
53396 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53397 -{
53398 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53399 -}
53400 -
53401 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
53402 - unsigned long addr)
53403 -{
53404 - return (vma->vm_flags & VM_GROWSDOWN) &&
53405 - (vma->vm_start == addr) &&
53406 - !vma_growsdown(vma->vm_prev, addr);
53407 -}
53408 -
53409 -/* Is the vma a continuation of the stack vma below it? */
53410 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53411 -{
53412 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53413 -}
53414 -
53415 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
53416 - unsigned long addr)
53417 -{
53418 - return (vma->vm_flags & VM_GROWSUP) &&
53419 - (vma->vm_end == addr) &&
53420 - !vma_growsup(vma->vm_next, addr);
53421 -}
53422 -
53423 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53424 unsigned long old_addr, struct vm_area_struct *new_vma,
53425 unsigned long new_addr, unsigned long len);
53426 @@ -1169,6 +1148,15 @@ struct shrinker {
53427 extern void register_shrinker(struct shrinker *);
53428 extern void unregister_shrinker(struct shrinker *);
53429
53430 +#ifdef CONFIG_MMU
53431 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
53432 +#else
53433 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
53434 +{
53435 + return __pgprot(0);
53436 +}
53437 +#endif
53438 +
53439 int vma_wants_writenotify(struct vm_area_struct *vma);
53440
53441 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53442 @@ -1452,6 +1440,7 @@ out:
53443 }
53444
53445 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53446 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53447
53448 extern unsigned long do_brk(unsigned long, unsigned long);
53449
53450 @@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
53451 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53452 struct vm_area_struct **pprev);
53453
53454 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53455 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53456 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53457 +
53458 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53459 NULL if none. Assume start_addr < end_addr. */
53460 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53461 @@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
53462 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53463 }
53464
53465 -#ifdef CONFIG_MMU
53466 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
53467 -#else
53468 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53469 -{
53470 - return __pgprot(0);
53471 -}
53472 -#endif
53473 -
53474 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53475 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53476 unsigned long pfn, unsigned long size, pgprot_t);
53477 @@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
53478 extern int sysctl_memory_failure_early_kill;
53479 extern int sysctl_memory_failure_recovery;
53480 extern void shake_page(struct page *p, int access);
53481 -extern atomic_long_t mce_bad_pages;
53482 +extern atomic_long_unchecked_t mce_bad_pages;
53483 extern int soft_offline_page(struct page *page, int flags);
53484
53485 extern void dump_page(struct page *page);
53486 @@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
53487 unsigned int pages_per_huge_page);
53488 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53489
53490 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53491 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53492 +#else
53493 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53494 +#endif
53495 +
53496 #endif /* __KERNEL__ */
53497 #endif /* _LINUX_MM_H */
53498 diff -urNp linux-3.0.3/include/linux/mm_types.h linux-3.0.3/include/linux/mm_types.h
53499 --- linux-3.0.3/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
53500 +++ linux-3.0.3/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
53501 @@ -184,6 +184,8 @@ struct vm_area_struct {
53502 #ifdef CONFIG_NUMA
53503 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53504 #endif
53505 +
53506 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53507 };
53508
53509 struct core_thread {
53510 @@ -316,6 +318,24 @@ struct mm_struct {
53511 #ifdef CONFIG_CPUMASK_OFFSTACK
53512 struct cpumask cpumask_allocation;
53513 #endif
53514 +
53515 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53516 + unsigned long pax_flags;
53517 +#endif
53518 +
53519 +#ifdef CONFIG_PAX_DLRESOLVE
53520 + unsigned long call_dl_resolve;
53521 +#endif
53522 +
53523 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53524 + unsigned long call_syscall;
53525 +#endif
53526 +
53527 +#ifdef CONFIG_PAX_ASLR
53528 + unsigned long delta_mmap; /* randomized offset */
53529 + unsigned long delta_stack; /* randomized offset */
53530 +#endif
53531 +
53532 };
53533
53534 static inline void mm_init_cpumask(struct mm_struct *mm)
53535 diff -urNp linux-3.0.3/include/linux/mmu_notifier.h linux-3.0.3/include/linux/mmu_notifier.h
53536 --- linux-3.0.3/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
53537 +++ linux-3.0.3/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
53538 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53539 */
53540 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53541 ({ \
53542 - pte_t __pte; \
53543 + pte_t ___pte; \
53544 struct vm_area_struct *___vma = __vma; \
53545 unsigned long ___address = __address; \
53546 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53547 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53548 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53549 - __pte; \
53550 + ___pte; \
53551 })
53552
53553 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53554 diff -urNp linux-3.0.3/include/linux/mmzone.h linux-3.0.3/include/linux/mmzone.h
53555 --- linux-3.0.3/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
53556 +++ linux-3.0.3/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
53557 @@ -350,7 +350,7 @@ struct zone {
53558 unsigned long flags; /* zone flags, see below */
53559
53560 /* Zone statistics */
53561 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53562 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53563
53564 /*
53565 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53566 diff -urNp linux-3.0.3/include/linux/mod_devicetable.h linux-3.0.3/include/linux/mod_devicetable.h
53567 --- linux-3.0.3/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
53568 +++ linux-3.0.3/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
53569 @@ -12,7 +12,7 @@
53570 typedef unsigned long kernel_ulong_t;
53571 #endif
53572
53573 -#define PCI_ANY_ID (~0)
53574 +#define PCI_ANY_ID ((__u16)~0)
53575
53576 struct pci_device_id {
53577 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53578 @@ -131,7 +131,7 @@ struct usb_device_id {
53579 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53580 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53581
53582 -#define HID_ANY_ID (~0)
53583 +#define HID_ANY_ID (~0U)
53584
53585 struct hid_device_id {
53586 __u16 bus;
53587 diff -urNp linux-3.0.3/include/linux/module.h linux-3.0.3/include/linux/module.h
53588 --- linux-3.0.3/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
53589 +++ linux-3.0.3/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
53590 @@ -16,6 +16,7 @@
53591 #include <linux/kobject.h>
53592 #include <linux/moduleparam.h>
53593 #include <linux/tracepoint.h>
53594 +#include <linux/fs.h>
53595
53596 #include <linux/percpu.h>
53597 #include <asm/module.h>
53598 @@ -325,19 +326,16 @@ struct module
53599 int (*init)(void);
53600
53601 /* If this is non-NULL, vfree after init() returns */
53602 - void *module_init;
53603 + void *module_init_rx, *module_init_rw;
53604
53605 /* Here is the actual code + data, vfree'd on unload. */
53606 - void *module_core;
53607 + void *module_core_rx, *module_core_rw;
53608
53609 /* Here are the sizes of the init and core sections */
53610 - unsigned int init_size, core_size;
53611 + unsigned int init_size_rw, core_size_rw;
53612
53613 /* The size of the executable code in each section. */
53614 - unsigned int init_text_size, core_text_size;
53615 -
53616 - /* Size of RO sections of the module (text+rodata) */
53617 - unsigned int init_ro_size, core_ro_size;
53618 + unsigned int init_size_rx, core_size_rx;
53619
53620 /* Arch-specific module values */
53621 struct mod_arch_specific arch;
53622 @@ -393,6 +391,10 @@ struct module
53623 #ifdef CONFIG_EVENT_TRACING
53624 struct ftrace_event_call **trace_events;
53625 unsigned int num_trace_events;
53626 + struct file_operations trace_id;
53627 + struct file_operations trace_enable;
53628 + struct file_operations trace_format;
53629 + struct file_operations trace_filter;
53630 #endif
53631 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53632 unsigned int num_ftrace_callsites;
53633 @@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
53634 bool is_module_percpu_address(unsigned long addr);
53635 bool is_module_text_address(unsigned long addr);
53636
53637 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53638 +{
53639 +
53640 +#ifdef CONFIG_PAX_KERNEXEC
53641 + if (ktla_ktva(addr) >= (unsigned long)start &&
53642 + ktla_ktva(addr) < (unsigned long)start + size)
53643 + return 1;
53644 +#endif
53645 +
53646 + return ((void *)addr >= start && (void *)addr < start + size);
53647 +}
53648 +
53649 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53650 +{
53651 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53652 +}
53653 +
53654 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53655 +{
53656 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53657 +}
53658 +
53659 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53660 +{
53661 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53662 +}
53663 +
53664 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53665 +{
53666 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53667 +}
53668 +
53669 static inline int within_module_core(unsigned long addr, struct module *mod)
53670 {
53671 - return (unsigned long)mod->module_core <= addr &&
53672 - addr < (unsigned long)mod->module_core + mod->core_size;
53673 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53674 }
53675
53676 static inline int within_module_init(unsigned long addr, struct module *mod)
53677 {
53678 - return (unsigned long)mod->module_init <= addr &&
53679 - addr < (unsigned long)mod->module_init + mod->init_size;
53680 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53681 }
53682
53683 /* Search for module by name: must hold module_mutex. */
53684 diff -urNp linux-3.0.3/include/linux/moduleloader.h linux-3.0.3/include/linux/moduleloader.h
53685 --- linux-3.0.3/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
53686 +++ linux-3.0.3/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
53687 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53688 sections. Returns NULL on failure. */
53689 void *module_alloc(unsigned long size);
53690
53691 +#ifdef CONFIG_PAX_KERNEXEC
53692 +void *module_alloc_exec(unsigned long size);
53693 +#else
53694 +#define module_alloc_exec(x) module_alloc(x)
53695 +#endif
53696 +
53697 /* Free memory returned from module_alloc. */
53698 void module_free(struct module *mod, void *module_region);
53699
53700 +#ifdef CONFIG_PAX_KERNEXEC
53701 +void module_free_exec(struct module *mod, void *module_region);
53702 +#else
53703 +#define module_free_exec(x, y) module_free((x), (y))
53704 +#endif
53705 +
53706 /* Apply the given relocation to the (simplified) ELF. Return -error
53707 or 0. */
53708 int apply_relocate(Elf_Shdr *sechdrs,
53709 diff -urNp linux-3.0.3/include/linux/moduleparam.h linux-3.0.3/include/linux/moduleparam.h
53710 --- linux-3.0.3/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
53711 +++ linux-3.0.3/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
53712 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53713 * @len is usually just sizeof(string).
53714 */
53715 #define module_param_string(name, string, len, perm) \
53716 - static const struct kparam_string __param_string_##name \
53717 + static const struct kparam_string __param_string_##name __used \
53718 = { len, string }; \
53719 __module_param_call(MODULE_PARAM_PREFIX, name, \
53720 &param_ops_string, \
53721 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53722 * module_param_named() for why this might be necessary.
53723 */
53724 #define module_param_array_named(name, array, type, nump, perm) \
53725 - static const struct kparam_array __param_arr_##name \
53726 + static const struct kparam_array __param_arr_##name __used \
53727 = { .max = ARRAY_SIZE(array), .num = nump, \
53728 .ops = &param_ops_##type, \
53729 .elemsize = sizeof(array[0]), .elem = array }; \
53730 diff -urNp linux-3.0.3/include/linux/namei.h linux-3.0.3/include/linux/namei.h
53731 --- linux-3.0.3/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
53732 +++ linux-3.0.3/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
53733 @@ -24,7 +24,7 @@ struct nameidata {
53734 unsigned seq;
53735 int last_type;
53736 unsigned depth;
53737 - char *saved_names[MAX_NESTED_LINKS + 1];
53738 + const char *saved_names[MAX_NESTED_LINKS + 1];
53739
53740 /* Intent data */
53741 union {
53742 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53743 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53744 extern void unlock_rename(struct dentry *, struct dentry *);
53745
53746 -static inline void nd_set_link(struct nameidata *nd, char *path)
53747 +static inline void nd_set_link(struct nameidata *nd, const char *path)
53748 {
53749 nd->saved_names[nd->depth] = path;
53750 }
53751
53752 -static inline char *nd_get_link(struct nameidata *nd)
53753 +static inline const char *nd_get_link(const struct nameidata *nd)
53754 {
53755 return nd->saved_names[nd->depth];
53756 }
53757 diff -urNp linux-3.0.3/include/linux/netdevice.h linux-3.0.3/include/linux/netdevice.h
53758 --- linux-3.0.3/include/linux/netdevice.h 2011-08-23 21:44:40.000000000 -0400
53759 +++ linux-3.0.3/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
53760 @@ -979,6 +979,7 @@ struct net_device_ops {
53761 int (*ndo_set_features)(struct net_device *dev,
53762 u32 features);
53763 };
53764 +typedef struct net_device_ops __no_const net_device_ops_no_const;
53765
53766 /*
53767 * The DEVICE structure.
53768 diff -urNp linux-3.0.3/include/linux/netfilter/xt_gradm.h linux-3.0.3/include/linux/netfilter/xt_gradm.h
53769 --- linux-3.0.3/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53770 +++ linux-3.0.3/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
53771 @@ -0,0 +1,9 @@
53772 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
53773 +#define _LINUX_NETFILTER_XT_GRADM_H 1
53774 +
53775 +struct xt_gradm_mtinfo {
53776 + __u16 flags;
53777 + __u16 invflags;
53778 +};
53779 +
53780 +#endif
53781 diff -urNp linux-3.0.3/include/linux/oprofile.h linux-3.0.3/include/linux/oprofile.h
53782 --- linux-3.0.3/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
53783 +++ linux-3.0.3/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
53784 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53785 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53786 char const * name, ulong * val);
53787
53788 -/** Create a file for read-only access to an atomic_t. */
53789 +/** Create a file for read-only access to an atomic_unchecked_t. */
53790 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53791 - char const * name, atomic_t * val);
53792 + char const * name, atomic_unchecked_t * val);
53793
53794 /** create a directory */
53795 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53796 diff -urNp linux-3.0.3/include/linux/padata.h linux-3.0.3/include/linux/padata.h
53797 --- linux-3.0.3/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
53798 +++ linux-3.0.3/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
53799 @@ -129,7 +129,7 @@ struct parallel_data {
53800 struct padata_instance *pinst;
53801 struct padata_parallel_queue __percpu *pqueue;
53802 struct padata_serial_queue __percpu *squeue;
53803 - atomic_t seq_nr;
53804 + atomic_unchecked_t seq_nr;
53805 atomic_t reorder_objects;
53806 atomic_t refcnt;
53807 unsigned int max_seq_nr;
53808 diff -urNp linux-3.0.3/include/linux/perf_event.h linux-3.0.3/include/linux/perf_event.h
53809 --- linux-3.0.3/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
53810 +++ linux-3.0.3/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
53811 @@ -761,8 +761,8 @@ struct perf_event {
53812
53813 enum perf_event_active_state state;
53814 unsigned int attach_state;
53815 - local64_t count;
53816 - atomic64_t child_count;
53817 + local64_t count; /* PaX: fix it one day */
53818 + atomic64_unchecked_t child_count;
53819
53820 /*
53821 * These are the total time in nanoseconds that the event
53822 @@ -813,8 +813,8 @@ struct perf_event {
53823 * These accumulate total time (in nanoseconds) that children
53824 * events have been enabled and running, respectively.
53825 */
53826 - atomic64_t child_total_time_enabled;
53827 - atomic64_t child_total_time_running;
53828 + atomic64_unchecked_t child_total_time_enabled;
53829 + atomic64_unchecked_t child_total_time_running;
53830
53831 /*
53832 * Protect attach/detach and child_list:
53833 diff -urNp linux-3.0.3/include/linux/pipe_fs_i.h linux-3.0.3/include/linux/pipe_fs_i.h
53834 --- linux-3.0.3/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
53835 +++ linux-3.0.3/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
53836 @@ -46,9 +46,9 @@ struct pipe_buffer {
53837 struct pipe_inode_info {
53838 wait_queue_head_t wait;
53839 unsigned int nrbufs, curbuf, buffers;
53840 - unsigned int readers;
53841 - unsigned int writers;
53842 - unsigned int waiting_writers;
53843 + atomic_t readers;
53844 + atomic_t writers;
53845 + atomic_t waiting_writers;
53846 unsigned int r_counter;
53847 unsigned int w_counter;
53848 struct page *tmp_page;
53849 diff -urNp linux-3.0.3/include/linux/pm_runtime.h linux-3.0.3/include/linux/pm_runtime.h
53850 --- linux-3.0.3/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
53851 +++ linux-3.0.3/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
53852 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
53853
53854 static inline void pm_runtime_mark_last_busy(struct device *dev)
53855 {
53856 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
53857 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
53858 }
53859
53860 #else /* !CONFIG_PM_RUNTIME */
53861 diff -urNp linux-3.0.3/include/linux/poison.h linux-3.0.3/include/linux/poison.h
53862 --- linux-3.0.3/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
53863 +++ linux-3.0.3/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
53864 @@ -19,8 +19,8 @@
53865 * under normal circumstances, used to verify that nobody uses
53866 * non-initialized list entries.
53867 */
53868 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
53869 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
53870 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
53871 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
53872
53873 /********** include/linux/timer.h **********/
53874 /*
53875 diff -urNp linux-3.0.3/include/linux/preempt.h linux-3.0.3/include/linux/preempt.h
53876 --- linux-3.0.3/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
53877 +++ linux-3.0.3/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
53878 @@ -115,7 +115,7 @@ struct preempt_ops {
53879 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
53880 void (*sched_out)(struct preempt_notifier *notifier,
53881 struct task_struct *next);
53882 -};
53883 +} __no_const;
53884
53885 /**
53886 * preempt_notifier - key for installing preemption notifiers
53887 diff -urNp linux-3.0.3/include/linux/proc_fs.h linux-3.0.3/include/linux/proc_fs.h
53888 --- linux-3.0.3/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
53889 +++ linux-3.0.3/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
53890 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
53891 return proc_create_data(name, mode, parent, proc_fops, NULL);
53892 }
53893
53894 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
53895 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
53896 +{
53897 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53898 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
53899 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53900 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
53901 +#else
53902 + return proc_create_data(name, mode, parent, proc_fops, NULL);
53903 +#endif
53904 +}
53905 +
53906 +
53907 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
53908 mode_t mode, struct proc_dir_entry *base,
53909 read_proc_t *read_proc, void * data)
53910 @@ -258,7 +271,7 @@ union proc_op {
53911 int (*proc_show)(struct seq_file *m,
53912 struct pid_namespace *ns, struct pid *pid,
53913 struct task_struct *task);
53914 -};
53915 +} __no_const;
53916
53917 struct ctl_table_header;
53918 struct ctl_table;
53919 diff -urNp linux-3.0.3/include/linux/ptrace.h linux-3.0.3/include/linux/ptrace.h
53920 --- linux-3.0.3/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
53921 +++ linux-3.0.3/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
53922 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
53923 extern void exit_ptrace(struct task_struct *tracer);
53924 #define PTRACE_MODE_READ 1
53925 #define PTRACE_MODE_ATTACH 2
53926 -/* Returns 0 on success, -errno on denial. */
53927 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
53928 /* Returns true on success, false on denial. */
53929 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
53930 +/* Returns true on success, false on denial. */
53931 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
53932
53933 static inline int ptrace_reparented(struct task_struct *child)
53934 {
53935 diff -urNp linux-3.0.3/include/linux/random.h linux-3.0.3/include/linux/random.h
53936 --- linux-3.0.3/include/linux/random.h 2011-08-23 21:44:40.000000000 -0400
53937 +++ linux-3.0.3/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
53938 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
53939
53940 u32 prandom32(struct rnd_state *);
53941
53942 +static inline unsigned long pax_get_random_long(void)
53943 +{
53944 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
53945 +}
53946 +
53947 /*
53948 * Handle minimum values for seeds
53949 */
53950 static inline u32 __seed(u32 x, u32 m)
53951 {
53952 - return (x < m) ? x + m : x;
53953 + return (x <= m) ? x + m + 1 : x;
53954 }
53955
53956 /**
53957 diff -urNp linux-3.0.3/include/linux/reboot.h linux-3.0.3/include/linux/reboot.h
53958 --- linux-3.0.3/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
53959 +++ linux-3.0.3/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
53960 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
53961 * Architecture-specific implementations of sys_reboot commands.
53962 */
53963
53964 -extern void machine_restart(char *cmd);
53965 -extern void machine_halt(void);
53966 -extern void machine_power_off(void);
53967 +extern void machine_restart(char *cmd) __noreturn;
53968 +extern void machine_halt(void) __noreturn;
53969 +extern void machine_power_off(void) __noreturn;
53970
53971 extern void machine_shutdown(void);
53972 struct pt_regs;
53973 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
53974 */
53975
53976 extern void kernel_restart_prepare(char *cmd);
53977 -extern void kernel_restart(char *cmd);
53978 -extern void kernel_halt(void);
53979 -extern void kernel_power_off(void);
53980 +extern void kernel_restart(char *cmd) __noreturn;
53981 +extern void kernel_halt(void) __noreturn;
53982 +extern void kernel_power_off(void) __noreturn;
53983
53984 extern int C_A_D; /* for sysctl */
53985 void ctrl_alt_del(void);
53986 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
53987 * Emergency restart, callable from an interrupt handler.
53988 */
53989
53990 -extern void emergency_restart(void);
53991 +extern void emergency_restart(void) __noreturn;
53992 #include <asm/emergency-restart.h>
53993
53994 #endif
53995 diff -urNp linux-3.0.3/include/linux/reiserfs_fs.h linux-3.0.3/include/linux/reiserfs_fs.h
53996 --- linux-3.0.3/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
53997 +++ linux-3.0.3/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
53998 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
53999 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
54000
54001 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
54002 -#define get_generation(s) atomic_read (&fs_generation(s))
54003 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
54004 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
54005 #define __fs_changed(gen,s) (gen != get_generation (s))
54006 #define fs_changed(gen,s) \
54007 diff -urNp linux-3.0.3/include/linux/reiserfs_fs_sb.h linux-3.0.3/include/linux/reiserfs_fs_sb.h
54008 --- linux-3.0.3/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
54009 +++ linux-3.0.3/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
54010 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
54011 /* Comment? -Hans */
54012 wait_queue_head_t s_wait;
54013 /* To be obsoleted soon by per buffer seals.. -Hans */
54014 - atomic_t s_generation_counter; // increased by one every time the
54015 + atomic_unchecked_t s_generation_counter; // increased by one every time the
54016 // tree gets re-balanced
54017 unsigned long s_properties; /* File system properties. Currently holds
54018 on-disk FS format */
54019 diff -urNp linux-3.0.3/include/linux/relay.h linux-3.0.3/include/linux/relay.h
54020 --- linux-3.0.3/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
54021 +++ linux-3.0.3/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
54022 @@ -159,7 +159,7 @@ struct rchan_callbacks
54023 * The callback should return 0 if successful, negative if not.
54024 */
54025 int (*remove_buf_file)(struct dentry *dentry);
54026 -};
54027 +} __no_const;
54028
54029 /*
54030 * CONFIG_RELAY kernel API, kernel/relay.c
54031 diff -urNp linux-3.0.3/include/linux/rfkill.h linux-3.0.3/include/linux/rfkill.h
54032 --- linux-3.0.3/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
54033 +++ linux-3.0.3/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
54034 @@ -147,6 +147,7 @@ struct rfkill_ops {
54035 void (*query)(struct rfkill *rfkill, void *data);
54036 int (*set_block)(void *data, bool blocked);
54037 };
54038 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54039
54040 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54041 /**
54042 diff -urNp linux-3.0.3/include/linux/rmap.h linux-3.0.3/include/linux/rmap.h
54043 --- linux-3.0.3/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
54044 +++ linux-3.0.3/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
54045 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54046 void anon_vma_init(void); /* create anon_vma_cachep */
54047 int anon_vma_prepare(struct vm_area_struct *);
54048 void unlink_anon_vmas(struct vm_area_struct *);
54049 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54050 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54051 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54052 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54053 void __anon_vma_link(struct vm_area_struct *);
54054
54055 static inline void anon_vma_merge(struct vm_area_struct *vma,
54056 diff -urNp linux-3.0.3/include/linux/sched.h linux-3.0.3/include/linux/sched.h
54057 --- linux-3.0.3/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
54058 +++ linux-3.0.3/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
54059 @@ -100,6 +100,7 @@ struct bio_list;
54060 struct fs_struct;
54061 struct perf_event_context;
54062 struct blk_plug;
54063 +struct linux_binprm;
54064
54065 /*
54066 * List of flags we want to share for kernel threads,
54067 @@ -380,10 +381,13 @@ struct user_namespace;
54068 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54069
54070 extern int sysctl_max_map_count;
54071 +extern unsigned long sysctl_heap_stack_gap;
54072
54073 #include <linux/aio.h>
54074
54075 #ifdef CONFIG_MMU
54076 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54077 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54078 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54079 extern unsigned long
54080 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54081 @@ -629,6 +633,17 @@ struct signal_struct {
54082 #ifdef CONFIG_TASKSTATS
54083 struct taskstats *stats;
54084 #endif
54085 +
54086 +#ifdef CONFIG_GRKERNSEC
54087 + u32 curr_ip;
54088 + u32 saved_ip;
54089 + u32 gr_saddr;
54090 + u32 gr_daddr;
54091 + u16 gr_sport;
54092 + u16 gr_dport;
54093 + u8 used_accept:1;
54094 +#endif
54095 +
54096 #ifdef CONFIG_AUDIT
54097 unsigned audit_tty;
54098 struct tty_audit_buf *tty_audit_buf;
54099 @@ -710,6 +725,11 @@ struct user_struct {
54100 struct key *session_keyring; /* UID's default session keyring */
54101 #endif
54102
54103 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54104 + unsigned int banned;
54105 + unsigned long ban_expires;
54106 +#endif
54107 +
54108 /* Hash table maintenance information */
54109 struct hlist_node uidhash_node;
54110 uid_t uid;
54111 @@ -1340,8 +1360,8 @@ struct task_struct {
54112 struct list_head thread_group;
54113
54114 struct completion *vfork_done; /* for vfork() */
54115 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54116 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54117 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54118 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54119
54120 cputime_t utime, stime, utimescaled, stimescaled;
54121 cputime_t gtime;
54122 @@ -1357,13 +1377,6 @@ struct task_struct {
54123 struct task_cputime cputime_expires;
54124 struct list_head cpu_timers[3];
54125
54126 -/* process credentials */
54127 - const struct cred __rcu *real_cred; /* objective and real subjective task
54128 - * credentials (COW) */
54129 - const struct cred __rcu *cred; /* effective (overridable) subjective task
54130 - * credentials (COW) */
54131 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54132 -
54133 char comm[TASK_COMM_LEN]; /* executable name excluding path
54134 - access with [gs]et_task_comm (which lock
54135 it with task_lock())
54136 @@ -1380,8 +1393,16 @@ struct task_struct {
54137 #endif
54138 /* CPU-specific state of this task */
54139 struct thread_struct thread;
54140 +/* thread_info moved to task_struct */
54141 +#ifdef CONFIG_X86
54142 + struct thread_info tinfo;
54143 +#endif
54144 /* filesystem information */
54145 struct fs_struct *fs;
54146 +
54147 + const struct cred __rcu *cred; /* effective (overridable) subjective task
54148 + * credentials (COW) */
54149 +
54150 /* open file information */
54151 struct files_struct *files;
54152 /* namespaces */
54153 @@ -1428,6 +1449,11 @@ struct task_struct {
54154 struct rt_mutex_waiter *pi_blocked_on;
54155 #endif
54156
54157 +/* process credentials */
54158 + const struct cred __rcu *real_cred; /* objective and real subjective task
54159 + * credentials (COW) */
54160 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54161 +
54162 #ifdef CONFIG_DEBUG_MUTEXES
54163 /* mutex deadlock detection */
54164 struct mutex_waiter *blocked_on;
54165 @@ -1538,6 +1564,21 @@ struct task_struct {
54166 unsigned long default_timer_slack_ns;
54167
54168 struct list_head *scm_work_list;
54169 +
54170 +#ifdef CONFIG_GRKERNSEC
54171 + /* grsecurity */
54172 + struct dentry *gr_chroot_dentry;
54173 + struct acl_subject_label *acl;
54174 + struct acl_role_label *role;
54175 + struct file *exec_file;
54176 + u16 acl_role_id;
54177 + /* is this the task that authenticated to the special role */
54178 + u8 acl_sp_role;
54179 + u8 is_writable;
54180 + u8 brute;
54181 + u8 gr_is_chrooted;
54182 +#endif
54183 +
54184 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54185 /* Index of current stored address in ret_stack */
54186 int curr_ret_stack;
54187 @@ -1572,6 +1613,57 @@ struct task_struct {
54188 #endif
54189 };
54190
54191 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54192 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54193 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54194 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54195 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54196 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54197 +
54198 +#ifdef CONFIG_PAX_SOFTMODE
54199 +extern int pax_softmode;
54200 +#endif
54201 +
54202 +extern int pax_check_flags(unsigned long *);
54203 +
54204 +/* if tsk != current then task_lock must be held on it */
54205 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54206 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
54207 +{
54208 + if (likely(tsk->mm))
54209 + return tsk->mm->pax_flags;
54210 + else
54211 + return 0UL;
54212 +}
54213 +
54214 +/* if tsk != current then task_lock must be held on it */
54215 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54216 +{
54217 + if (likely(tsk->mm)) {
54218 + tsk->mm->pax_flags = flags;
54219 + return 0;
54220 + }
54221 + return -EINVAL;
54222 +}
54223 +#endif
54224 +
54225 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54226 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
54227 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54228 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54229 +#endif
54230 +
54231 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54232 +extern void pax_report_insns(void *pc, void *sp);
54233 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
54234 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54235 +
54236 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54237 +extern void pax_track_stack(void);
54238 +#else
54239 +static inline void pax_track_stack(void) {}
54240 +#endif
54241 +
54242 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54243 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54244
54245 @@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
54246 #define PF_DUMPCORE 0x00000200 /* dumped core */
54247 #define PF_SIGNALED 0x00000400 /* killed by a signal */
54248 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
54249 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
54250 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
54251 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
54252 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
54253 @@ -2056,7 +2149,9 @@ void yield(void);
54254 extern struct exec_domain default_exec_domain;
54255
54256 union thread_union {
54257 +#ifndef CONFIG_X86
54258 struct thread_info thread_info;
54259 +#endif
54260 unsigned long stack[THREAD_SIZE/sizeof(long)];
54261 };
54262
54263 @@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
54264 */
54265
54266 extern struct task_struct *find_task_by_vpid(pid_t nr);
54267 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54268 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54269 struct pid_namespace *ns);
54270
54271 @@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
54272 extern void exit_itimers(struct signal_struct *);
54273 extern void flush_itimer_signals(void);
54274
54275 -extern NORET_TYPE void do_group_exit(int);
54276 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54277
54278 extern void daemonize(const char *, ...);
54279 extern int allow_signal(int);
54280 @@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
54281
54282 #endif
54283
54284 -static inline int object_is_on_stack(void *obj)
54285 +static inline int object_starts_on_stack(void *obj)
54286 {
54287 - void *stack = task_stack_page(current);
54288 + const void *stack = task_stack_page(current);
54289
54290 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54291 }
54292
54293 +#ifdef CONFIG_PAX_USERCOPY
54294 +extern int object_is_on_stack(const void *obj, unsigned long len);
54295 +#endif
54296 +
54297 extern void thread_info_cache_init(void);
54298
54299 #ifdef CONFIG_DEBUG_STACK_USAGE
54300 diff -urNp linux-3.0.3/include/linux/screen_info.h linux-3.0.3/include/linux/screen_info.h
54301 --- linux-3.0.3/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
54302 +++ linux-3.0.3/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
54303 @@ -43,7 +43,8 @@ struct screen_info {
54304 __u16 pages; /* 0x32 */
54305 __u16 vesa_attributes; /* 0x34 */
54306 __u32 capabilities; /* 0x36 */
54307 - __u8 _reserved[6]; /* 0x3a */
54308 + __u16 vesapm_size; /* 0x3a */
54309 + __u8 _reserved[4]; /* 0x3c */
54310 } __attribute__((packed));
54311
54312 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54313 diff -urNp linux-3.0.3/include/linux/security.h linux-3.0.3/include/linux/security.h
54314 --- linux-3.0.3/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
54315 +++ linux-3.0.3/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
54316 @@ -36,6 +36,7 @@
54317 #include <linux/key.h>
54318 #include <linux/xfrm.h>
54319 #include <linux/slab.h>
54320 +#include <linux/grsecurity.h>
54321 #include <net/flow.h>
54322
54323 /* Maximum number of letters for an LSM name string */
54324 diff -urNp linux-3.0.3/include/linux/seq_file.h linux-3.0.3/include/linux/seq_file.h
54325 --- linux-3.0.3/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
54326 +++ linux-3.0.3/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
54327 @@ -32,6 +32,7 @@ struct seq_operations {
54328 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54329 int (*show) (struct seq_file *m, void *v);
54330 };
54331 +typedef struct seq_operations __no_const seq_operations_no_const;
54332
54333 #define SEQ_SKIP 1
54334
54335 diff -urNp linux-3.0.3/include/linux/shmem_fs.h linux-3.0.3/include/linux/shmem_fs.h
54336 --- linux-3.0.3/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
54337 +++ linux-3.0.3/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
54338 @@ -10,7 +10,7 @@
54339
54340 #define SHMEM_NR_DIRECT 16
54341
54342 -#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
54343 +#define SHMEM_SYMLINK_INLINE_LEN 64
54344
54345 struct shmem_inode_info {
54346 spinlock_t lock;
54347 diff -urNp linux-3.0.3/include/linux/shm.h linux-3.0.3/include/linux/shm.h
54348 --- linux-3.0.3/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
54349 +++ linux-3.0.3/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
54350 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54351 pid_t shm_cprid;
54352 pid_t shm_lprid;
54353 struct user_struct *mlock_user;
54354 +#ifdef CONFIG_GRKERNSEC
54355 + time_t shm_createtime;
54356 + pid_t shm_lapid;
54357 +#endif
54358 };
54359
54360 /* shm_mode upper byte flags */
54361 diff -urNp linux-3.0.3/include/linux/skbuff.h linux-3.0.3/include/linux/skbuff.h
54362 --- linux-3.0.3/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
54363 +++ linux-3.0.3/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
54364 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54365 */
54366 static inline int skb_queue_empty(const struct sk_buff_head *list)
54367 {
54368 - return list->next == (struct sk_buff *)list;
54369 + return list->next == (const struct sk_buff *)list;
54370 }
54371
54372 /**
54373 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54374 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54375 const struct sk_buff *skb)
54376 {
54377 - return skb->next == (struct sk_buff *)list;
54378 + return skb->next == (const struct sk_buff *)list;
54379 }
54380
54381 /**
54382 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54383 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54384 const struct sk_buff *skb)
54385 {
54386 - return skb->prev == (struct sk_buff *)list;
54387 + return skb->prev == (const struct sk_buff *)list;
54388 }
54389
54390 /**
54391 @@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
54392 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54393 */
54394 #ifndef NET_SKB_PAD
54395 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54396 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54397 #endif
54398
54399 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54400 diff -urNp linux-3.0.3/include/linux/slab_def.h linux-3.0.3/include/linux/slab_def.h
54401 --- linux-3.0.3/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
54402 +++ linux-3.0.3/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
54403 @@ -96,10 +96,10 @@ struct kmem_cache {
54404 unsigned long node_allocs;
54405 unsigned long node_frees;
54406 unsigned long node_overflow;
54407 - atomic_t allochit;
54408 - atomic_t allocmiss;
54409 - atomic_t freehit;
54410 - atomic_t freemiss;
54411 + atomic_unchecked_t allochit;
54412 + atomic_unchecked_t allocmiss;
54413 + atomic_unchecked_t freehit;
54414 + atomic_unchecked_t freemiss;
54415
54416 /*
54417 * If debugging is enabled, then the allocator can add additional
54418 diff -urNp linux-3.0.3/include/linux/slab.h linux-3.0.3/include/linux/slab.h
54419 --- linux-3.0.3/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
54420 +++ linux-3.0.3/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
54421 @@ -11,12 +11,20 @@
54422
54423 #include <linux/gfp.h>
54424 #include <linux/types.h>
54425 +#include <linux/err.h>
54426
54427 /*
54428 * Flags to pass to kmem_cache_create().
54429 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54430 */
54431 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54432 +
54433 +#ifdef CONFIG_PAX_USERCOPY
54434 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54435 +#else
54436 +#define SLAB_USERCOPY 0x00000000UL
54437 +#endif
54438 +
54439 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54440 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54441 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54442 @@ -87,10 +95,13 @@
54443 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54444 * Both make kfree a no-op.
54445 */
54446 -#define ZERO_SIZE_PTR ((void *)16)
54447 +#define ZERO_SIZE_PTR \
54448 +({ \
54449 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54450 + (void *)(-MAX_ERRNO-1L); \
54451 +})
54452
54453 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54454 - (unsigned long)ZERO_SIZE_PTR)
54455 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54456
54457 /*
54458 * struct kmem_cache related prototypes
54459 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54460 void kfree(const void *);
54461 void kzfree(const void *);
54462 size_t ksize(const void *);
54463 +void check_object_size(const void *ptr, unsigned long n, bool to);
54464
54465 /*
54466 * Allocator specific definitions. These are mainly used to establish optimized
54467 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54468
54469 void __init kmem_cache_init_late(void);
54470
54471 +#define kmalloc(x, y) \
54472 +({ \
54473 + void *___retval; \
54474 + intoverflow_t ___x = (intoverflow_t)x; \
54475 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54476 + ___retval = NULL; \
54477 + else \
54478 + ___retval = kmalloc((size_t)___x, (y)); \
54479 + ___retval; \
54480 +})
54481 +
54482 +#define kmalloc_node(x, y, z) \
54483 +({ \
54484 + void *___retval; \
54485 + intoverflow_t ___x = (intoverflow_t)x; \
54486 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54487 + ___retval = NULL; \
54488 + else \
54489 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
54490 + ___retval; \
54491 +})
54492 +
54493 +#define kzalloc(x, y) \
54494 +({ \
54495 + void *___retval; \
54496 + intoverflow_t ___x = (intoverflow_t)x; \
54497 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54498 + ___retval = NULL; \
54499 + else \
54500 + ___retval = kzalloc((size_t)___x, (y)); \
54501 + ___retval; \
54502 +})
54503 +
54504 +#define __krealloc(x, y, z) \
54505 +({ \
54506 + void *___retval; \
54507 + intoverflow_t ___y = (intoverflow_t)y; \
54508 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54509 + ___retval = NULL; \
54510 + else \
54511 + ___retval = __krealloc((x), (size_t)___y, (z)); \
54512 + ___retval; \
54513 +})
54514 +
54515 +#define krealloc(x, y, z) \
54516 +({ \
54517 + void *___retval; \
54518 + intoverflow_t ___y = (intoverflow_t)y; \
54519 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54520 + ___retval = NULL; \
54521 + else \
54522 + ___retval = krealloc((x), (size_t)___y, (z)); \
54523 + ___retval; \
54524 +})
54525 +
54526 #endif /* _LINUX_SLAB_H */
54527 diff -urNp linux-3.0.3/include/linux/slub_def.h linux-3.0.3/include/linux/slub_def.h
54528 --- linux-3.0.3/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
54529 +++ linux-3.0.3/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
54530 @@ -82,7 +82,7 @@ struct kmem_cache {
54531 struct kmem_cache_order_objects max;
54532 struct kmem_cache_order_objects min;
54533 gfp_t allocflags; /* gfp flags to use on each alloc */
54534 - int refcount; /* Refcount for slab cache destroy */
54535 + atomic_t refcount; /* Refcount for slab cache destroy */
54536 void (*ctor)(void *);
54537 int inuse; /* Offset to metadata */
54538 int align; /* Alignment */
54539 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54540 }
54541
54542 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54543 -void *__kmalloc(size_t size, gfp_t flags);
54544 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54545
54546 static __always_inline void *
54547 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54548 diff -urNp linux-3.0.3/include/linux/sonet.h linux-3.0.3/include/linux/sonet.h
54549 --- linux-3.0.3/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
54550 +++ linux-3.0.3/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
54551 @@ -61,7 +61,7 @@ struct sonet_stats {
54552 #include <asm/atomic.h>
54553
54554 struct k_sonet_stats {
54555 -#define __HANDLE_ITEM(i) atomic_t i
54556 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54557 __SONET_ITEMS
54558 #undef __HANDLE_ITEM
54559 };
54560 diff -urNp linux-3.0.3/include/linux/sunrpc/clnt.h linux-3.0.3/include/linux/sunrpc/clnt.h
54561 --- linux-3.0.3/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
54562 +++ linux-3.0.3/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
54563 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54564 {
54565 switch (sap->sa_family) {
54566 case AF_INET:
54567 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
54568 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54569 case AF_INET6:
54570 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54571 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54572 }
54573 return 0;
54574 }
54575 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54576 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54577 const struct sockaddr *src)
54578 {
54579 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54580 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54581 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54582
54583 dsin->sin_family = ssin->sin_family;
54584 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54585 if (sa->sa_family != AF_INET6)
54586 return 0;
54587
54588 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54589 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54590 }
54591
54592 #endif /* __KERNEL__ */
54593 diff -urNp linux-3.0.3/include/linux/sunrpc/svc_rdma.h linux-3.0.3/include/linux/sunrpc/svc_rdma.h
54594 --- linux-3.0.3/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
54595 +++ linux-3.0.3/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
54596 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54597 extern unsigned int svcrdma_max_requests;
54598 extern unsigned int svcrdma_max_req_size;
54599
54600 -extern atomic_t rdma_stat_recv;
54601 -extern atomic_t rdma_stat_read;
54602 -extern atomic_t rdma_stat_write;
54603 -extern atomic_t rdma_stat_sq_starve;
54604 -extern atomic_t rdma_stat_rq_starve;
54605 -extern atomic_t rdma_stat_rq_poll;
54606 -extern atomic_t rdma_stat_rq_prod;
54607 -extern atomic_t rdma_stat_sq_poll;
54608 -extern atomic_t rdma_stat_sq_prod;
54609 +extern atomic_unchecked_t rdma_stat_recv;
54610 +extern atomic_unchecked_t rdma_stat_read;
54611 +extern atomic_unchecked_t rdma_stat_write;
54612 +extern atomic_unchecked_t rdma_stat_sq_starve;
54613 +extern atomic_unchecked_t rdma_stat_rq_starve;
54614 +extern atomic_unchecked_t rdma_stat_rq_poll;
54615 +extern atomic_unchecked_t rdma_stat_rq_prod;
54616 +extern atomic_unchecked_t rdma_stat_sq_poll;
54617 +extern atomic_unchecked_t rdma_stat_sq_prod;
54618
54619 #define RPCRDMA_VERSION 1
54620
54621 diff -urNp linux-3.0.3/include/linux/sysctl.h linux-3.0.3/include/linux/sysctl.h
54622 --- linux-3.0.3/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
54623 +++ linux-3.0.3/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
54624 @@ -155,7 +155,11 @@ enum
54625 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54626 };
54627
54628 -
54629 +#ifdef CONFIG_PAX_SOFTMODE
54630 +enum {
54631 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54632 +};
54633 +#endif
54634
54635 /* CTL_VM names: */
54636 enum
54637 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54638
54639 extern int proc_dostring(struct ctl_table *, int,
54640 void __user *, size_t *, loff_t *);
54641 +extern int proc_dostring_modpriv(struct ctl_table *, int,
54642 + void __user *, size_t *, loff_t *);
54643 extern int proc_dointvec(struct ctl_table *, int,
54644 void __user *, size_t *, loff_t *);
54645 extern int proc_dointvec_minmax(struct ctl_table *, int,
54646 diff -urNp linux-3.0.3/include/linux/tty_ldisc.h linux-3.0.3/include/linux/tty_ldisc.h
54647 --- linux-3.0.3/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
54648 +++ linux-3.0.3/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
54649 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54650
54651 struct module *owner;
54652
54653 - int refcount;
54654 + atomic_t refcount;
54655 };
54656
54657 struct tty_ldisc {
54658 diff -urNp linux-3.0.3/include/linux/types.h linux-3.0.3/include/linux/types.h
54659 --- linux-3.0.3/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
54660 +++ linux-3.0.3/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
54661 @@ -213,10 +213,26 @@ typedef struct {
54662 int counter;
54663 } atomic_t;
54664
54665 +#ifdef CONFIG_PAX_REFCOUNT
54666 +typedef struct {
54667 + int counter;
54668 +} atomic_unchecked_t;
54669 +#else
54670 +typedef atomic_t atomic_unchecked_t;
54671 +#endif
54672 +
54673 #ifdef CONFIG_64BIT
54674 typedef struct {
54675 long counter;
54676 } atomic64_t;
54677 +
54678 +#ifdef CONFIG_PAX_REFCOUNT
54679 +typedef struct {
54680 + long counter;
54681 +} atomic64_unchecked_t;
54682 +#else
54683 +typedef atomic64_t atomic64_unchecked_t;
54684 +#endif
54685 #endif
54686
54687 struct list_head {
54688 diff -urNp linux-3.0.3/include/linux/uaccess.h linux-3.0.3/include/linux/uaccess.h
54689 --- linux-3.0.3/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
54690 +++ linux-3.0.3/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
54691 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54692 long ret; \
54693 mm_segment_t old_fs = get_fs(); \
54694 \
54695 - set_fs(KERNEL_DS); \
54696 pagefault_disable(); \
54697 + set_fs(KERNEL_DS); \
54698 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54699 - pagefault_enable(); \
54700 set_fs(old_fs); \
54701 + pagefault_enable(); \
54702 ret; \
54703 })
54704
54705 diff -urNp linux-3.0.3/include/linux/unaligned/access_ok.h linux-3.0.3/include/linux/unaligned/access_ok.h
54706 --- linux-3.0.3/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
54707 +++ linux-3.0.3/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
54708 @@ -6,32 +6,32 @@
54709
54710 static inline u16 get_unaligned_le16(const void *p)
54711 {
54712 - return le16_to_cpup((__le16 *)p);
54713 + return le16_to_cpup((const __le16 *)p);
54714 }
54715
54716 static inline u32 get_unaligned_le32(const void *p)
54717 {
54718 - return le32_to_cpup((__le32 *)p);
54719 + return le32_to_cpup((const __le32 *)p);
54720 }
54721
54722 static inline u64 get_unaligned_le64(const void *p)
54723 {
54724 - return le64_to_cpup((__le64 *)p);
54725 + return le64_to_cpup((const __le64 *)p);
54726 }
54727
54728 static inline u16 get_unaligned_be16(const void *p)
54729 {
54730 - return be16_to_cpup((__be16 *)p);
54731 + return be16_to_cpup((const __be16 *)p);
54732 }
54733
54734 static inline u32 get_unaligned_be32(const void *p)
54735 {
54736 - return be32_to_cpup((__be32 *)p);
54737 + return be32_to_cpup((const __be32 *)p);
54738 }
54739
54740 static inline u64 get_unaligned_be64(const void *p)
54741 {
54742 - return be64_to_cpup((__be64 *)p);
54743 + return be64_to_cpup((const __be64 *)p);
54744 }
54745
54746 static inline void put_unaligned_le16(u16 val, void *p)
54747 diff -urNp linux-3.0.3/include/linux/vmalloc.h linux-3.0.3/include/linux/vmalloc.h
54748 --- linux-3.0.3/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
54749 +++ linux-3.0.3/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
54750 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54751 #define VM_MAP 0x00000004 /* vmap()ed pages */
54752 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54753 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54754 +
54755 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54756 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54757 +#endif
54758 +
54759 /* bits [20..32] reserved for arch specific ioremap internals */
54760
54761 /*
54762 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54763 # endif
54764 #endif
54765
54766 +#define vmalloc(x) \
54767 +({ \
54768 + void *___retval; \
54769 + intoverflow_t ___x = (intoverflow_t)x; \
54770 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54771 + ___retval = NULL; \
54772 + else \
54773 + ___retval = vmalloc((unsigned long)___x); \
54774 + ___retval; \
54775 +})
54776 +
54777 +#define vzalloc(x) \
54778 +({ \
54779 + void *___retval; \
54780 + intoverflow_t ___x = (intoverflow_t)x; \
54781 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54782 + ___retval = NULL; \
54783 + else \
54784 + ___retval = vzalloc((unsigned long)___x); \
54785 + ___retval; \
54786 +})
54787 +
54788 +#define __vmalloc(x, y, z) \
54789 +({ \
54790 + void *___retval; \
54791 + intoverflow_t ___x = (intoverflow_t)x; \
54792 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54793 + ___retval = NULL; \
54794 + else \
54795 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54796 + ___retval; \
54797 +})
54798 +
54799 +#define vmalloc_user(x) \
54800 +({ \
54801 + void *___retval; \
54802 + intoverflow_t ___x = (intoverflow_t)x; \
54803 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54804 + ___retval = NULL; \
54805 + else \
54806 + ___retval = vmalloc_user((unsigned long)___x); \
54807 + ___retval; \
54808 +})
54809 +
54810 +#define vmalloc_exec(x) \
54811 +({ \
54812 + void *___retval; \
54813 + intoverflow_t ___x = (intoverflow_t)x; \
54814 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
54815 + ___retval = NULL; \
54816 + else \
54817 + ___retval = vmalloc_exec((unsigned long)___x); \
54818 + ___retval; \
54819 +})
54820 +
54821 +#define vmalloc_node(x, y) \
54822 +({ \
54823 + void *___retval; \
54824 + intoverflow_t ___x = (intoverflow_t)x; \
54825 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
54826 + ___retval = NULL; \
54827 + else \
54828 + ___retval = vmalloc_node((unsigned long)___x, (y));\
54829 + ___retval; \
54830 +})
54831 +
54832 +#define vzalloc_node(x, y) \
54833 +({ \
54834 + void *___retval; \
54835 + intoverflow_t ___x = (intoverflow_t)x; \
54836 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
54837 + ___retval = NULL; \
54838 + else \
54839 + ___retval = vzalloc_node((unsigned long)___x, (y));\
54840 + ___retval; \
54841 +})
54842 +
54843 +#define vmalloc_32(x) \
54844 +({ \
54845 + void *___retval; \
54846 + intoverflow_t ___x = (intoverflow_t)x; \
54847 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
54848 + ___retval = NULL; \
54849 + else \
54850 + ___retval = vmalloc_32((unsigned long)___x); \
54851 + ___retval; \
54852 +})
54853 +
54854 +#define vmalloc_32_user(x) \
54855 +({ \
54856 +void *___retval; \
54857 + intoverflow_t ___x = (intoverflow_t)x; \
54858 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
54859 + ___retval = NULL; \
54860 + else \
54861 + ___retval = vmalloc_32_user((unsigned long)___x);\
54862 + ___retval; \
54863 +})
54864 +
54865 #endif /* _LINUX_VMALLOC_H */
54866 diff -urNp linux-3.0.3/include/linux/vmstat.h linux-3.0.3/include/linux/vmstat.h
54867 --- linux-3.0.3/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
54868 +++ linux-3.0.3/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
54869 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
54870 /*
54871 * Zone based page accounting with per cpu differentials.
54872 */
54873 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54874 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54875
54876 static inline void zone_page_state_add(long x, struct zone *zone,
54877 enum zone_stat_item item)
54878 {
54879 - atomic_long_add(x, &zone->vm_stat[item]);
54880 - atomic_long_add(x, &vm_stat[item]);
54881 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
54882 + atomic_long_add_unchecked(x, &vm_stat[item]);
54883 }
54884
54885 static inline unsigned long global_page_state(enum zone_stat_item item)
54886 {
54887 - long x = atomic_long_read(&vm_stat[item]);
54888 + long x = atomic_long_read_unchecked(&vm_stat[item]);
54889 #ifdef CONFIG_SMP
54890 if (x < 0)
54891 x = 0;
54892 @@ -109,7 +109,7 @@ static inline unsigned long global_page_
54893 static inline unsigned long zone_page_state(struct zone *zone,
54894 enum zone_stat_item item)
54895 {
54896 - long x = atomic_long_read(&zone->vm_stat[item]);
54897 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54898 #ifdef CONFIG_SMP
54899 if (x < 0)
54900 x = 0;
54901 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
54902 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
54903 enum zone_stat_item item)
54904 {
54905 - long x = atomic_long_read(&zone->vm_stat[item]);
54906 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54907
54908 #ifdef CONFIG_SMP
54909 int cpu;
54910 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
54911
54912 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
54913 {
54914 - atomic_long_inc(&zone->vm_stat[item]);
54915 - atomic_long_inc(&vm_stat[item]);
54916 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
54917 + atomic_long_inc_unchecked(&vm_stat[item]);
54918 }
54919
54920 static inline void __inc_zone_page_state(struct page *page,
54921 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
54922
54923 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
54924 {
54925 - atomic_long_dec(&zone->vm_stat[item]);
54926 - atomic_long_dec(&vm_stat[item]);
54927 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
54928 + atomic_long_dec_unchecked(&vm_stat[item]);
54929 }
54930
54931 static inline void __dec_zone_page_state(struct page *page,
54932 diff -urNp linux-3.0.3/include/media/saa7146_vv.h linux-3.0.3/include/media/saa7146_vv.h
54933 --- linux-3.0.3/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
54934 +++ linux-3.0.3/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
54935 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
54936 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
54937
54938 /* the extension can override this */
54939 - struct v4l2_ioctl_ops ops;
54940 + v4l2_ioctl_ops_no_const ops;
54941 /* pointer to the saa7146 core ops */
54942 const struct v4l2_ioctl_ops *core_ops;
54943
54944 diff -urNp linux-3.0.3/include/media/v4l2-ioctl.h linux-3.0.3/include/media/v4l2-ioctl.h
54945 --- linux-3.0.3/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
54946 +++ linux-3.0.3/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
54947 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
54948 long (*vidioc_default) (struct file *file, void *fh,
54949 bool valid_prio, int cmd, void *arg);
54950 };
54951 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
54952
54953
54954 /* v4l debugging and diagnostics */
54955 diff -urNp linux-3.0.3/include/net/caif/cfctrl.h linux-3.0.3/include/net/caif/cfctrl.h
54956 --- linux-3.0.3/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
54957 +++ linux-3.0.3/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
54958 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
54959 void (*radioset_rsp)(void);
54960 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
54961 struct cflayer *client_layer);
54962 -};
54963 +} __no_const;
54964
54965 /* Link Setup Parameters for CAIF-Links. */
54966 struct cfctrl_link_param {
54967 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
54968 struct cfctrl {
54969 struct cfsrvl serv;
54970 struct cfctrl_rsp res;
54971 - atomic_t req_seq_no;
54972 - atomic_t rsp_seq_no;
54973 + atomic_unchecked_t req_seq_no;
54974 + atomic_unchecked_t rsp_seq_no;
54975 struct list_head list;
54976 /* Protects from simultaneous access to first_req list */
54977 spinlock_t info_list_lock;
54978 diff -urNp linux-3.0.3/include/net/flow.h linux-3.0.3/include/net/flow.h
54979 --- linux-3.0.3/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
54980 +++ linux-3.0.3/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
54981 @@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
54982 u8 dir, flow_resolve_t resolver, void *ctx);
54983
54984 extern void flow_cache_flush(void);
54985 -extern atomic_t flow_cache_genid;
54986 +extern atomic_unchecked_t flow_cache_genid;
54987
54988 #endif
54989 diff -urNp linux-3.0.3/include/net/inetpeer.h linux-3.0.3/include/net/inetpeer.h
54990 --- linux-3.0.3/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
54991 +++ linux-3.0.3/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
54992 @@ -43,8 +43,8 @@ struct inet_peer {
54993 */
54994 union {
54995 struct {
54996 - atomic_t rid; /* Frag reception counter */
54997 - atomic_t ip_id_count; /* IP ID for the next packet */
54998 + atomic_unchecked_t rid; /* Frag reception counter */
54999 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
55000 __u32 tcp_ts;
55001 __u32 tcp_ts_stamp;
55002 u32 metrics[RTAX_MAX];
55003 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
55004 {
55005 more++;
55006 inet_peer_refcheck(p);
55007 - return atomic_add_return(more, &p->ip_id_count) - more;
55008 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
55009 }
55010
55011 #endif /* _NET_INETPEER_H */
55012 diff -urNp linux-3.0.3/include/net/ip_fib.h linux-3.0.3/include/net/ip_fib.h
55013 --- linux-3.0.3/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
55014 +++ linux-3.0.3/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
55015 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
55016
55017 #define FIB_RES_SADDR(net, res) \
55018 ((FIB_RES_NH(res).nh_saddr_genid == \
55019 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
55020 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55021 FIB_RES_NH(res).nh_saddr : \
55022 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55023 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55024 diff -urNp linux-3.0.3/include/net/ip_vs.h linux-3.0.3/include/net/ip_vs.h
55025 --- linux-3.0.3/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
55026 +++ linux-3.0.3/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
55027 @@ -509,7 +509,7 @@ struct ip_vs_conn {
55028 struct ip_vs_conn *control; /* Master control connection */
55029 atomic_t n_control; /* Number of controlled ones */
55030 struct ip_vs_dest *dest; /* real server */
55031 - atomic_t in_pkts; /* incoming packet counter */
55032 + atomic_unchecked_t in_pkts; /* incoming packet counter */
55033
55034 /* packet transmitter for different forwarding methods. If it
55035 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55036 @@ -647,7 +647,7 @@ struct ip_vs_dest {
55037 __be16 port; /* port number of the server */
55038 union nf_inet_addr addr; /* IP address of the server */
55039 volatile unsigned flags; /* dest status flags */
55040 - atomic_t conn_flags; /* flags to copy to conn */
55041 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
55042 atomic_t weight; /* server weight */
55043
55044 atomic_t refcnt; /* reference counter */
55045 diff -urNp linux-3.0.3/include/net/irda/ircomm_core.h linux-3.0.3/include/net/irda/ircomm_core.h
55046 --- linux-3.0.3/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
55047 +++ linux-3.0.3/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
55048 @@ -51,7 +51,7 @@ typedef struct {
55049 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55050 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55051 struct ircomm_info *);
55052 -} call_t;
55053 +} __no_const call_t;
55054
55055 struct ircomm_cb {
55056 irda_queue_t queue;
55057 diff -urNp linux-3.0.3/include/net/irda/ircomm_tty.h linux-3.0.3/include/net/irda/ircomm_tty.h
55058 --- linux-3.0.3/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
55059 +++ linux-3.0.3/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
55060 @@ -35,6 +35,7 @@
55061 #include <linux/termios.h>
55062 #include <linux/timer.h>
55063 #include <linux/tty.h> /* struct tty_struct */
55064 +#include <asm/local.h>
55065
55066 #include <net/irda/irias_object.h>
55067 #include <net/irda/ircomm_core.h>
55068 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55069 unsigned short close_delay;
55070 unsigned short closing_wait; /* time to wait before closing */
55071
55072 - int open_count;
55073 - int blocked_open; /* # of blocked opens */
55074 + local_t open_count;
55075 + local_t blocked_open; /* # of blocked opens */
55076
55077 /* Protect concurent access to :
55078 * o self->open_count
55079 diff -urNp linux-3.0.3/include/net/iucv/af_iucv.h linux-3.0.3/include/net/iucv/af_iucv.h
55080 --- linux-3.0.3/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
55081 +++ linux-3.0.3/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
55082 @@ -87,7 +87,7 @@ struct iucv_sock {
55083 struct iucv_sock_list {
55084 struct hlist_head head;
55085 rwlock_t lock;
55086 - atomic_t autobind_name;
55087 + atomic_unchecked_t autobind_name;
55088 };
55089
55090 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55091 diff -urNp linux-3.0.3/include/net/lapb.h linux-3.0.3/include/net/lapb.h
55092 --- linux-3.0.3/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
55093 +++ linux-3.0.3/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
55094 @@ -95,7 +95,7 @@ struct lapb_cb {
55095 struct sk_buff_head write_queue;
55096 struct sk_buff_head ack_queue;
55097 unsigned char window;
55098 - struct lapb_register_struct callbacks;
55099 + struct lapb_register_struct *callbacks;
55100
55101 /* FRMR control information */
55102 struct lapb_frame frmr_data;
55103 diff -urNp linux-3.0.3/include/net/neighbour.h linux-3.0.3/include/net/neighbour.h
55104 --- linux-3.0.3/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
55105 +++ linux-3.0.3/include/net/neighbour.h 2011-08-26 19:49:56.000000000 -0400
55106 @@ -117,14 +117,14 @@ struct neighbour {
55107 };
55108
55109 struct neigh_ops {
55110 - int family;
55111 + const int family;
55112 void (*solicit)(struct neighbour *, struct sk_buff*);
55113 void (*error_report)(struct neighbour *, struct sk_buff*);
55114 int (*output)(struct sk_buff*);
55115 int (*connected_output)(struct sk_buff*);
55116 int (*hh_output)(struct sk_buff*);
55117 int (*queue_xmit)(struct sk_buff*);
55118 -};
55119 +} __do_const;
55120
55121 struct pneigh_entry {
55122 struct pneigh_entry *next;
55123 diff -urNp linux-3.0.3/include/net/netlink.h linux-3.0.3/include/net/netlink.h
55124 --- linux-3.0.3/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
55125 +++ linux-3.0.3/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
55126 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55127 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55128 {
55129 if (mark)
55130 - skb_trim(skb, (unsigned char *) mark - skb->data);
55131 + skb_trim(skb, (const unsigned char *) mark - skb->data);
55132 }
55133
55134 /**
55135 diff -urNp linux-3.0.3/include/net/netns/ipv4.h linux-3.0.3/include/net/netns/ipv4.h
55136 --- linux-3.0.3/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
55137 +++ linux-3.0.3/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
55138 @@ -56,8 +56,8 @@ struct netns_ipv4 {
55139
55140 unsigned int sysctl_ping_group_range[2];
55141
55142 - atomic_t rt_genid;
55143 - atomic_t dev_addr_genid;
55144 + atomic_unchecked_t rt_genid;
55145 + atomic_unchecked_t dev_addr_genid;
55146
55147 #ifdef CONFIG_IP_MROUTE
55148 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55149 diff -urNp linux-3.0.3/include/net/sctp/sctp.h linux-3.0.3/include/net/sctp/sctp.h
55150 --- linux-3.0.3/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
55151 +++ linux-3.0.3/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
55152 @@ -315,9 +315,9 @@ do { \
55153
55154 #else /* SCTP_DEBUG */
55155
55156 -#define SCTP_DEBUG_PRINTK(whatever...)
55157 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55158 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55159 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55160 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55161 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55162 #define SCTP_ENABLE_DEBUG
55163 #define SCTP_DISABLE_DEBUG
55164 #define SCTP_ASSERT(expr, str, func)
55165 diff -urNp linux-3.0.3/include/net/sock.h linux-3.0.3/include/net/sock.h
55166 --- linux-3.0.3/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
55167 +++ linux-3.0.3/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
55168 @@ -277,7 +277,7 @@ struct sock {
55169 #ifdef CONFIG_RPS
55170 __u32 sk_rxhash;
55171 #endif
55172 - atomic_t sk_drops;
55173 + atomic_unchecked_t sk_drops;
55174 int sk_rcvbuf;
55175
55176 struct sk_filter __rcu *sk_filter;
55177 @@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
55178 }
55179
55180 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
55181 - char __user *from, char *to,
55182 + char __user *from, unsigned char *to,
55183 int copy, int offset)
55184 {
55185 if (skb->ip_summed == CHECKSUM_NONE) {
55186 diff -urNp linux-3.0.3/include/net/tcp.h linux-3.0.3/include/net/tcp.h
55187 --- linux-3.0.3/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
55188 +++ linux-3.0.3/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
55189 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55190 struct tcp_seq_afinfo {
55191 char *name;
55192 sa_family_t family;
55193 - struct file_operations seq_fops;
55194 - struct seq_operations seq_ops;
55195 + file_operations_no_const seq_fops;
55196 + seq_operations_no_const seq_ops;
55197 };
55198
55199 struct tcp_iter_state {
55200 diff -urNp linux-3.0.3/include/net/udp.h linux-3.0.3/include/net/udp.h
55201 --- linux-3.0.3/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
55202 +++ linux-3.0.3/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
55203 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55204 char *name;
55205 sa_family_t family;
55206 struct udp_table *udp_table;
55207 - struct file_operations seq_fops;
55208 - struct seq_operations seq_ops;
55209 + file_operations_no_const seq_fops;
55210 + seq_operations_no_const seq_ops;
55211 };
55212
55213 struct udp_iter_state {
55214 diff -urNp linux-3.0.3/include/net/xfrm.h linux-3.0.3/include/net/xfrm.h
55215 --- linux-3.0.3/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
55216 +++ linux-3.0.3/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
55217 @@ -505,7 +505,7 @@ struct xfrm_policy {
55218 struct timer_list timer;
55219
55220 struct flow_cache_object flo;
55221 - atomic_t genid;
55222 + atomic_unchecked_t genid;
55223 u32 priority;
55224 u32 index;
55225 struct xfrm_mark mark;
55226 diff -urNp linux-3.0.3/include/rdma/iw_cm.h linux-3.0.3/include/rdma/iw_cm.h
55227 --- linux-3.0.3/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
55228 +++ linux-3.0.3/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
55229 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
55230 int backlog);
55231
55232 int (*destroy_listen)(struct iw_cm_id *cm_id);
55233 -};
55234 +} __no_const;
55235
55236 /**
55237 * iw_create_cm_id - Create an IW CM identifier.
55238 diff -urNp linux-3.0.3/include/scsi/libfc.h linux-3.0.3/include/scsi/libfc.h
55239 --- linux-3.0.3/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
55240 +++ linux-3.0.3/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
55241 @@ -750,6 +750,7 @@ struct libfc_function_template {
55242 */
55243 void (*disc_stop_final) (struct fc_lport *);
55244 };
55245 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55246
55247 /**
55248 * struct fc_disc - Discovery context
55249 @@ -853,7 +854,7 @@ struct fc_lport {
55250 struct fc_vport *vport;
55251
55252 /* Operational Information */
55253 - struct libfc_function_template tt;
55254 + libfc_function_template_no_const tt;
55255 u8 link_up;
55256 u8 qfull;
55257 enum fc_lport_state state;
55258 diff -urNp linux-3.0.3/include/scsi/scsi_device.h linux-3.0.3/include/scsi/scsi_device.h
55259 --- linux-3.0.3/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
55260 +++ linux-3.0.3/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
55261 @@ -161,9 +161,9 @@ struct scsi_device {
55262 unsigned int max_device_blocked; /* what device_blocked counts down from */
55263 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55264
55265 - atomic_t iorequest_cnt;
55266 - atomic_t iodone_cnt;
55267 - atomic_t ioerr_cnt;
55268 + atomic_unchecked_t iorequest_cnt;
55269 + atomic_unchecked_t iodone_cnt;
55270 + atomic_unchecked_t ioerr_cnt;
55271
55272 struct device sdev_gendev,
55273 sdev_dev;
55274 diff -urNp linux-3.0.3/include/scsi/scsi_transport_fc.h linux-3.0.3/include/scsi/scsi_transport_fc.h
55275 --- linux-3.0.3/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
55276 +++ linux-3.0.3/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
55277 @@ -711,7 +711,7 @@ struct fc_function_template {
55278 unsigned long show_host_system_hostname:1;
55279
55280 unsigned long disable_target_scan:1;
55281 -};
55282 +} __do_const;
55283
55284
55285 /**
55286 diff -urNp linux-3.0.3/include/sound/ak4xxx-adda.h linux-3.0.3/include/sound/ak4xxx-adda.h
55287 --- linux-3.0.3/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
55288 +++ linux-3.0.3/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
55289 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55290 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55291 unsigned char val);
55292 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55293 -};
55294 +} __no_const;
55295
55296 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55297
55298 diff -urNp linux-3.0.3/include/sound/hwdep.h linux-3.0.3/include/sound/hwdep.h
55299 --- linux-3.0.3/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
55300 +++ linux-3.0.3/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
55301 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55302 struct snd_hwdep_dsp_status *status);
55303 int (*dsp_load)(struct snd_hwdep *hw,
55304 struct snd_hwdep_dsp_image *image);
55305 -};
55306 +} __no_const;
55307
55308 struct snd_hwdep {
55309 struct snd_card *card;
55310 diff -urNp linux-3.0.3/include/sound/info.h linux-3.0.3/include/sound/info.h
55311 --- linux-3.0.3/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
55312 +++ linux-3.0.3/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
55313 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
55314 struct snd_info_buffer *buffer);
55315 void (*write)(struct snd_info_entry *entry,
55316 struct snd_info_buffer *buffer);
55317 -};
55318 +} __no_const;
55319
55320 struct snd_info_entry_ops {
55321 int (*open)(struct snd_info_entry *entry,
55322 diff -urNp linux-3.0.3/include/sound/pcm.h linux-3.0.3/include/sound/pcm.h
55323 --- linux-3.0.3/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
55324 +++ linux-3.0.3/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
55325 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
55326 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55327 int (*ack)(struct snd_pcm_substream *substream);
55328 };
55329 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55330
55331 /*
55332 *
55333 diff -urNp linux-3.0.3/include/sound/sb16_csp.h linux-3.0.3/include/sound/sb16_csp.h
55334 --- linux-3.0.3/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
55335 +++ linux-3.0.3/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
55336 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
55337 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55338 int (*csp_stop) (struct snd_sb_csp * p);
55339 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55340 -};
55341 +} __no_const;
55342
55343 /*
55344 * CSP private data
55345 diff -urNp linux-3.0.3/include/sound/soc.h linux-3.0.3/include/sound/soc.h
55346 --- linux-3.0.3/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
55347 +++ linux-3.0.3/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
55348 @@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
55349
55350 /* platform stream ops */
55351 struct snd_pcm_ops *ops;
55352 -};
55353 +} __do_const;
55354
55355 struct snd_soc_platform {
55356 const char *name;
55357 diff -urNp linux-3.0.3/include/sound/ymfpci.h linux-3.0.3/include/sound/ymfpci.h
55358 --- linux-3.0.3/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
55359 +++ linux-3.0.3/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
55360 @@ -358,7 +358,7 @@ struct snd_ymfpci {
55361 spinlock_t reg_lock;
55362 spinlock_t voice_lock;
55363 wait_queue_head_t interrupt_sleep;
55364 - atomic_t interrupt_sleep_count;
55365 + atomic_unchecked_t interrupt_sleep_count;
55366 struct snd_info_entry *proc_entry;
55367 const struct firmware *dsp_microcode;
55368 const struct firmware *controller_microcode;
55369 diff -urNp linux-3.0.3/include/target/target_core_base.h linux-3.0.3/include/target/target_core_base.h
55370 --- linux-3.0.3/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
55371 +++ linux-3.0.3/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
55372 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
55373 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55374 int (*t10_pr_register)(struct se_cmd *);
55375 int (*t10_pr_clear)(struct se_cmd *);
55376 -};
55377 +} __no_const;
55378
55379 struct t10_reservation_template {
55380 /* Reservation effects all target ports */
55381 @@ -432,8 +432,8 @@ struct se_transport_task {
55382 atomic_t t_task_cdbs_left;
55383 atomic_t t_task_cdbs_ex_left;
55384 atomic_t t_task_cdbs_timeout_left;
55385 - atomic_t t_task_cdbs_sent;
55386 - atomic_t t_transport_aborted;
55387 + atomic_unchecked_t t_task_cdbs_sent;
55388 + atomic_unchecked_t t_transport_aborted;
55389 atomic_t t_transport_active;
55390 atomic_t t_transport_complete;
55391 atomic_t t_transport_queue_active;
55392 @@ -774,7 +774,7 @@ struct se_device {
55393 atomic_t active_cmds;
55394 atomic_t simple_cmds;
55395 atomic_t depth_left;
55396 - atomic_t dev_ordered_id;
55397 + atomic_unchecked_t dev_ordered_id;
55398 atomic_t dev_tur_active;
55399 atomic_t execute_tasks;
55400 atomic_t dev_status_thr_count;
55401 diff -urNp linux-3.0.3/include/trace/events/irq.h linux-3.0.3/include/trace/events/irq.h
55402 --- linux-3.0.3/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
55403 +++ linux-3.0.3/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
55404 @@ -36,7 +36,7 @@ struct softirq_action;
55405 */
55406 TRACE_EVENT(irq_handler_entry,
55407
55408 - TP_PROTO(int irq, struct irqaction *action),
55409 + TP_PROTO(int irq, const struct irqaction *action),
55410
55411 TP_ARGS(irq, action),
55412
55413 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55414 */
55415 TRACE_EVENT(irq_handler_exit,
55416
55417 - TP_PROTO(int irq, struct irqaction *action, int ret),
55418 + TP_PROTO(int irq, const struct irqaction *action, int ret),
55419
55420 TP_ARGS(irq, action, ret),
55421
55422 diff -urNp linux-3.0.3/include/video/udlfb.h linux-3.0.3/include/video/udlfb.h
55423 --- linux-3.0.3/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
55424 +++ linux-3.0.3/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
55425 @@ -51,10 +51,10 @@ struct dlfb_data {
55426 int base8;
55427 u32 pseudo_palette[256];
55428 /* blit-only rendering path metrics, exposed through sysfs */
55429 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55430 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55431 - atomic_t bytes_sent; /* to usb, after compression including overhead */
55432 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55433 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55434 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55435 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55436 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55437 };
55438
55439 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55440 diff -urNp linux-3.0.3/include/video/uvesafb.h linux-3.0.3/include/video/uvesafb.h
55441 --- linux-3.0.3/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
55442 +++ linux-3.0.3/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
55443 @@ -177,6 +177,7 @@ struct uvesafb_par {
55444 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55445 u8 pmi_setpal; /* PMI for palette changes */
55446 u16 *pmi_base; /* protected mode interface location */
55447 + u8 *pmi_code; /* protected mode code location */
55448 void *pmi_start;
55449 void *pmi_pal;
55450 u8 *vbe_state_orig; /*
55451 diff -urNp linux-3.0.3/init/do_mounts.c linux-3.0.3/init/do_mounts.c
55452 --- linux-3.0.3/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
55453 +++ linux-3.0.3/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
55454 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55455
55456 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55457 {
55458 - int err = sys_mount(name, "/root", fs, flags, data);
55459 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55460 if (err)
55461 return err;
55462
55463 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55464 va_start(args, fmt);
55465 vsprintf(buf, fmt, args);
55466 va_end(args);
55467 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55468 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55469 if (fd >= 0) {
55470 sys_ioctl(fd, FDEJECT, 0);
55471 sys_close(fd);
55472 }
55473 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55474 - fd = sys_open("/dev/console", O_RDWR, 0);
55475 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55476 if (fd >= 0) {
55477 sys_ioctl(fd, TCGETS, (long)&termios);
55478 termios.c_lflag &= ~ICANON;
55479 sys_ioctl(fd, TCSETSF, (long)&termios);
55480 - sys_read(fd, &c, 1);
55481 + sys_read(fd, (char __user *)&c, 1);
55482 termios.c_lflag |= ICANON;
55483 sys_ioctl(fd, TCSETSF, (long)&termios);
55484 sys_close(fd);
55485 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55486 mount_root();
55487 out:
55488 devtmpfs_mount("dev");
55489 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55490 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55491 sys_chroot((const char __user __force *)".");
55492 }
55493 diff -urNp linux-3.0.3/init/do_mounts.h linux-3.0.3/init/do_mounts.h
55494 --- linux-3.0.3/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
55495 +++ linux-3.0.3/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
55496 @@ -15,15 +15,15 @@ extern int root_mountflags;
55497
55498 static inline int create_dev(char *name, dev_t dev)
55499 {
55500 - sys_unlink(name);
55501 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55502 + sys_unlink((__force char __user *)name);
55503 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55504 }
55505
55506 #if BITS_PER_LONG == 32
55507 static inline u32 bstat(char *name)
55508 {
55509 struct stat64 stat;
55510 - if (sys_stat64(name, &stat) != 0)
55511 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55512 return 0;
55513 if (!S_ISBLK(stat.st_mode))
55514 return 0;
55515 diff -urNp linux-3.0.3/init/do_mounts_initrd.c linux-3.0.3/init/do_mounts_initrd.c
55516 --- linux-3.0.3/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
55517 +++ linux-3.0.3/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
55518 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55519 create_dev("/dev/root.old", Root_RAM0);
55520 /* mount initrd on rootfs' /root */
55521 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55522 - sys_mkdir("/old", 0700);
55523 - root_fd = sys_open("/", 0, 0);
55524 - old_fd = sys_open("/old", 0, 0);
55525 + sys_mkdir((__force const char __user *)"/old", 0700);
55526 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
55527 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55528 /* move initrd over / and chdir/chroot in initrd root */
55529 - sys_chdir("/root");
55530 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55531 - sys_chroot(".");
55532 + sys_chdir((__force const char __user *)"/root");
55533 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55534 + sys_chroot((__force const char __user *)".");
55535
55536 /*
55537 * In case that a resume from disk is carried out by linuxrc or one of
55538 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55539
55540 /* move initrd to rootfs' /old */
55541 sys_fchdir(old_fd);
55542 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
55543 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55544 /* switch root and cwd back to / of rootfs */
55545 sys_fchdir(root_fd);
55546 - sys_chroot(".");
55547 + sys_chroot((__force const char __user *)".");
55548 sys_close(old_fd);
55549 sys_close(root_fd);
55550
55551 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55552 - sys_chdir("/old");
55553 + sys_chdir((__force const char __user *)"/old");
55554 return;
55555 }
55556
55557 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55558 mount_root();
55559
55560 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55561 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55562 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55563 if (!error)
55564 printk("okay\n");
55565 else {
55566 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
55567 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55568 if (error == -ENOENT)
55569 printk("/initrd does not exist. Ignored.\n");
55570 else
55571 printk("failed\n");
55572 printk(KERN_NOTICE "Unmounting old root\n");
55573 - sys_umount("/old", MNT_DETACH);
55574 + sys_umount((__force char __user *)"/old", MNT_DETACH);
55575 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55576 if (fd < 0) {
55577 error = fd;
55578 @@ -116,11 +116,11 @@ int __init initrd_load(void)
55579 * mounted in the normal path.
55580 */
55581 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55582 - sys_unlink("/initrd.image");
55583 + sys_unlink((__force const char __user *)"/initrd.image");
55584 handle_initrd();
55585 return 1;
55586 }
55587 }
55588 - sys_unlink("/initrd.image");
55589 + sys_unlink((__force const char __user *)"/initrd.image");
55590 return 0;
55591 }
55592 diff -urNp linux-3.0.3/init/do_mounts_md.c linux-3.0.3/init/do_mounts_md.c
55593 --- linux-3.0.3/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
55594 +++ linux-3.0.3/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
55595 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55596 partitioned ? "_d" : "", minor,
55597 md_setup_args[ent].device_names);
55598
55599 - fd = sys_open(name, 0, 0);
55600 + fd = sys_open((__force char __user *)name, 0, 0);
55601 if (fd < 0) {
55602 printk(KERN_ERR "md: open failed - cannot start "
55603 "array %s\n", name);
55604 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55605 * array without it
55606 */
55607 sys_close(fd);
55608 - fd = sys_open(name, 0, 0);
55609 + fd = sys_open((__force char __user *)name, 0, 0);
55610 sys_ioctl(fd, BLKRRPART, 0);
55611 }
55612 sys_close(fd);
55613 diff -urNp linux-3.0.3/init/initramfs.c linux-3.0.3/init/initramfs.c
55614 --- linux-3.0.3/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
55615 +++ linux-3.0.3/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
55616 @@ -74,7 +74,7 @@ static void __init free_hash(void)
55617 }
55618 }
55619
55620 -static long __init do_utime(char __user *filename, time_t mtime)
55621 +static long __init do_utime(__force char __user *filename, time_t mtime)
55622 {
55623 struct timespec t[2];
55624
55625 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
55626 struct dir_entry *de, *tmp;
55627 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55628 list_del(&de->list);
55629 - do_utime(de->name, de->mtime);
55630 + do_utime((__force char __user *)de->name, de->mtime);
55631 kfree(de->name);
55632 kfree(de);
55633 }
55634 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
55635 if (nlink >= 2) {
55636 char *old = find_link(major, minor, ino, mode, collected);
55637 if (old)
55638 - return (sys_link(old, collected) < 0) ? -1 : 1;
55639 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55640 }
55641 return 0;
55642 }
55643 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
55644 {
55645 struct stat st;
55646
55647 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55648 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55649 if (S_ISDIR(st.st_mode))
55650 - sys_rmdir(path);
55651 + sys_rmdir((__force char __user *)path);
55652 else
55653 - sys_unlink(path);
55654 + sys_unlink((__force char __user *)path);
55655 }
55656 }
55657
55658 @@ -305,7 +305,7 @@ static int __init do_name(void)
55659 int openflags = O_WRONLY|O_CREAT;
55660 if (ml != 1)
55661 openflags |= O_TRUNC;
55662 - wfd = sys_open(collected, openflags, mode);
55663 + wfd = sys_open((__force char __user *)collected, openflags, mode);
55664
55665 if (wfd >= 0) {
55666 sys_fchown(wfd, uid, gid);
55667 @@ -317,17 +317,17 @@ static int __init do_name(void)
55668 }
55669 }
55670 } else if (S_ISDIR(mode)) {
55671 - sys_mkdir(collected, mode);
55672 - sys_chown(collected, uid, gid);
55673 - sys_chmod(collected, mode);
55674 + sys_mkdir((__force char __user *)collected, mode);
55675 + sys_chown((__force char __user *)collected, uid, gid);
55676 + sys_chmod((__force char __user *)collected, mode);
55677 dir_add(collected, mtime);
55678 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55679 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55680 if (maybe_link() == 0) {
55681 - sys_mknod(collected, mode, rdev);
55682 - sys_chown(collected, uid, gid);
55683 - sys_chmod(collected, mode);
55684 - do_utime(collected, mtime);
55685 + sys_mknod((__force char __user *)collected, mode, rdev);
55686 + sys_chown((__force char __user *)collected, uid, gid);
55687 + sys_chmod((__force char __user *)collected, mode);
55688 + do_utime((__force char __user *)collected, mtime);
55689 }
55690 }
55691 return 0;
55692 @@ -336,15 +336,15 @@ static int __init do_name(void)
55693 static int __init do_copy(void)
55694 {
55695 if (count >= body_len) {
55696 - sys_write(wfd, victim, body_len);
55697 + sys_write(wfd, (__force char __user *)victim, body_len);
55698 sys_close(wfd);
55699 - do_utime(vcollected, mtime);
55700 + do_utime((__force char __user *)vcollected, mtime);
55701 kfree(vcollected);
55702 eat(body_len);
55703 state = SkipIt;
55704 return 0;
55705 } else {
55706 - sys_write(wfd, victim, count);
55707 + sys_write(wfd, (__force char __user *)victim, count);
55708 body_len -= count;
55709 eat(count);
55710 return 1;
55711 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
55712 {
55713 collected[N_ALIGN(name_len) + body_len] = '\0';
55714 clean_path(collected, 0);
55715 - sys_symlink(collected + N_ALIGN(name_len), collected);
55716 - sys_lchown(collected, uid, gid);
55717 - do_utime(collected, mtime);
55718 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55719 + sys_lchown((__force char __user *)collected, uid, gid);
55720 + do_utime((__force char __user *)collected, mtime);
55721 state = SkipIt;
55722 next_state = Reset;
55723 return 0;
55724 diff -urNp linux-3.0.3/init/Kconfig linux-3.0.3/init/Kconfig
55725 --- linux-3.0.3/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
55726 +++ linux-3.0.3/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
55727 @@ -1195,7 +1195,7 @@ config SLUB_DEBUG
55728
55729 config COMPAT_BRK
55730 bool "Disable heap randomization"
55731 - default y
55732 + default n
55733 help
55734 Randomizing heap placement makes heap exploits harder, but it
55735 also breaks ancient binaries (including anything libc5 based).
55736 diff -urNp linux-3.0.3/init/main.c linux-3.0.3/init/main.c
55737 --- linux-3.0.3/init/main.c 2011-07-21 22:17:23.000000000 -0400
55738 +++ linux-3.0.3/init/main.c 2011-08-23 21:48:14.000000000 -0400
55739 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
55740 extern void tc_init(void);
55741 #endif
55742
55743 +extern void grsecurity_init(void);
55744 +
55745 /*
55746 * Debug helper: via this flag we know that we are in 'early bootup code'
55747 * where only the boot processor is running with IRQ disabled. This means
55748 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
55749
55750 __setup("reset_devices", set_reset_devices);
55751
55752 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
55753 +extern char pax_enter_kernel_user[];
55754 +extern char pax_exit_kernel_user[];
55755 +extern pgdval_t clone_pgd_mask;
55756 +#endif
55757 +
55758 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
55759 +static int __init setup_pax_nouderef(char *str)
55760 +{
55761 +#ifdef CONFIG_X86_32
55762 + unsigned int cpu;
55763 + struct desc_struct *gdt;
55764 +
55765 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
55766 + gdt = get_cpu_gdt_table(cpu);
55767 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
55768 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
55769 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
55770 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
55771 + }
55772 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
55773 +#else
55774 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
55775 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
55776 + clone_pgd_mask = ~(pgdval_t)0UL;
55777 +#endif
55778 +
55779 + return 0;
55780 +}
55781 +early_param("pax_nouderef", setup_pax_nouderef);
55782 +#endif
55783 +
55784 +#ifdef CONFIG_PAX_SOFTMODE
55785 +int pax_softmode;
55786 +
55787 +static int __init setup_pax_softmode(char *str)
55788 +{
55789 + get_option(&str, &pax_softmode);
55790 + return 1;
55791 +}
55792 +__setup("pax_softmode=", setup_pax_softmode);
55793 +#endif
55794 +
55795 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
55796 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
55797 static const char *panic_later, *panic_param;
55798 @@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
55799 {
55800 int count = preempt_count();
55801 int ret;
55802 + const char *msg1 = "", *msg2 = "";
55803
55804 if (initcall_debug)
55805 ret = do_one_initcall_debug(fn);
55806 @@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
55807 sprintf(msgbuf, "error code %d ", ret);
55808
55809 if (preempt_count() != count) {
55810 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
55811 + msg1 = " preemption imbalance";
55812 preempt_count() = count;
55813 }
55814 if (irqs_disabled()) {
55815 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
55816 + msg2 = " disabled interrupts";
55817 local_irq_enable();
55818 }
55819 - if (msgbuf[0]) {
55820 - printk("initcall %pF returned with %s\n", fn, msgbuf);
55821 + if (msgbuf[0] || *msg1 || *msg2) {
55822 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
55823 }
55824
55825 return ret;
55826 @@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
55827 do_basic_setup();
55828
55829 /* Open the /dev/console on the rootfs, this should never fail */
55830 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
55831 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
55832 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
55833
55834 (void) sys_dup(0);
55835 @@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
55836 if (!ramdisk_execute_command)
55837 ramdisk_execute_command = "/init";
55838
55839 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
55840 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
55841 ramdisk_execute_command = NULL;
55842 prepare_namespace();
55843 }
55844
55845 + grsecurity_init();
55846 +
55847 /*
55848 * Ok, we have completed the initial bootup, and
55849 * we're essentially up and running. Get rid of the
55850 diff -urNp linux-3.0.3/ipc/mqueue.c linux-3.0.3/ipc/mqueue.c
55851 --- linux-3.0.3/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
55852 +++ linux-3.0.3/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
55853 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
55854 mq_bytes = (mq_msg_tblsz +
55855 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
55856
55857 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
55858 spin_lock(&mq_lock);
55859 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
55860 u->mq_bytes + mq_bytes >
55861 diff -urNp linux-3.0.3/ipc/msg.c linux-3.0.3/ipc/msg.c
55862 --- linux-3.0.3/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
55863 +++ linux-3.0.3/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
55864 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
55865 return security_msg_queue_associate(msq, msgflg);
55866 }
55867
55868 +static struct ipc_ops msg_ops = {
55869 + .getnew = newque,
55870 + .associate = msg_security,
55871 + .more_checks = NULL
55872 +};
55873 +
55874 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
55875 {
55876 struct ipc_namespace *ns;
55877 - struct ipc_ops msg_ops;
55878 struct ipc_params msg_params;
55879
55880 ns = current->nsproxy->ipc_ns;
55881
55882 - msg_ops.getnew = newque;
55883 - msg_ops.associate = msg_security;
55884 - msg_ops.more_checks = NULL;
55885 -
55886 msg_params.key = key;
55887 msg_params.flg = msgflg;
55888
55889 diff -urNp linux-3.0.3/ipc/sem.c linux-3.0.3/ipc/sem.c
55890 --- linux-3.0.3/ipc/sem.c 2011-08-23 21:44:40.000000000 -0400
55891 +++ linux-3.0.3/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
55892 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
55893 return 0;
55894 }
55895
55896 +static struct ipc_ops sem_ops = {
55897 + .getnew = newary,
55898 + .associate = sem_security,
55899 + .more_checks = sem_more_checks
55900 +};
55901 +
55902 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
55903 {
55904 struct ipc_namespace *ns;
55905 - struct ipc_ops sem_ops;
55906 struct ipc_params sem_params;
55907
55908 ns = current->nsproxy->ipc_ns;
55909 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
55910 if (nsems < 0 || nsems > ns->sc_semmsl)
55911 return -EINVAL;
55912
55913 - sem_ops.getnew = newary;
55914 - sem_ops.associate = sem_security;
55915 - sem_ops.more_checks = sem_more_checks;
55916 -
55917 sem_params.key = key;
55918 sem_params.flg = semflg;
55919 sem_params.u.nsems = nsems;
55920 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
55921 int nsems;
55922 struct list_head tasks;
55923
55924 + pax_track_stack();
55925 +
55926 sma = sem_lock_check(ns, semid);
55927 if (IS_ERR(sma))
55928 return PTR_ERR(sma);
55929 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
55930 struct ipc_namespace *ns;
55931 struct list_head tasks;
55932
55933 + pax_track_stack();
55934 +
55935 ns = current->nsproxy->ipc_ns;
55936
55937 if (nsops < 1 || semid < 0)
55938 diff -urNp linux-3.0.3/ipc/shm.c linux-3.0.3/ipc/shm.c
55939 --- linux-3.0.3/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
55940 +++ linux-3.0.3/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
55941 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
55942 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
55943 #endif
55944
55945 +#ifdef CONFIG_GRKERNSEC
55946 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55947 + const time_t shm_createtime, const uid_t cuid,
55948 + const int shmid);
55949 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55950 + const time_t shm_createtime);
55951 +#endif
55952 +
55953 void shm_init_ns(struct ipc_namespace *ns)
55954 {
55955 ns->shm_ctlmax = SHMMAX;
55956 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
55957 shp->shm_lprid = 0;
55958 shp->shm_atim = shp->shm_dtim = 0;
55959 shp->shm_ctim = get_seconds();
55960 +#ifdef CONFIG_GRKERNSEC
55961 + {
55962 + struct timespec timeval;
55963 + do_posix_clock_monotonic_gettime(&timeval);
55964 +
55965 + shp->shm_createtime = timeval.tv_sec;
55966 + }
55967 +#endif
55968 shp->shm_segsz = size;
55969 shp->shm_nattch = 0;
55970 shp->shm_file = file;
55971 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
55972 return 0;
55973 }
55974
55975 +static struct ipc_ops shm_ops = {
55976 + .getnew = newseg,
55977 + .associate = shm_security,
55978 + .more_checks = shm_more_checks
55979 +};
55980 +
55981 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
55982 {
55983 struct ipc_namespace *ns;
55984 - struct ipc_ops shm_ops;
55985 struct ipc_params shm_params;
55986
55987 ns = current->nsproxy->ipc_ns;
55988
55989 - shm_ops.getnew = newseg;
55990 - shm_ops.associate = shm_security;
55991 - shm_ops.more_checks = shm_more_checks;
55992 -
55993 shm_params.key = key;
55994 shm_params.flg = shmflg;
55995 shm_params.u.size = size;
55996 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
55997 case SHM_LOCK:
55998 case SHM_UNLOCK:
55999 {
56000 - struct file *uninitialized_var(shm_file);
56001 -
56002 lru_add_drain_all(); /* drain pagevecs to lru lists */
56003
56004 shp = shm_lock_check(ns, shmid);
56005 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
56006 if (err)
56007 goto out_unlock;
56008
56009 +#ifdef CONFIG_GRKERNSEC
56010 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
56011 + shp->shm_perm.cuid, shmid) ||
56012 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
56013 + err = -EACCES;
56014 + goto out_unlock;
56015 + }
56016 +#endif
56017 +
56018 path = shp->shm_file->f_path;
56019 path_get(&path);
56020 shp->shm_nattch++;
56021 +#ifdef CONFIG_GRKERNSEC
56022 + shp->shm_lapid = current->pid;
56023 +#endif
56024 size = i_size_read(path.dentry->d_inode);
56025 shm_unlock(shp);
56026
56027 diff -urNp linux-3.0.3/kernel/acct.c linux-3.0.3/kernel/acct.c
56028 --- linux-3.0.3/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
56029 +++ linux-3.0.3/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
56030 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56031 */
56032 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56033 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56034 - file->f_op->write(file, (char *)&ac,
56035 + file->f_op->write(file, (__force char __user *)&ac,
56036 sizeof(acct_t), &file->f_pos);
56037 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56038 set_fs(fs);
56039 diff -urNp linux-3.0.3/kernel/audit.c linux-3.0.3/kernel/audit.c
56040 --- linux-3.0.3/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
56041 +++ linux-3.0.3/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
56042 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56043 3) suppressed due to audit_rate_limit
56044 4) suppressed due to audit_backlog_limit
56045 */
56046 -static atomic_t audit_lost = ATOMIC_INIT(0);
56047 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56048
56049 /* The netlink socket. */
56050 static struct sock *audit_sock;
56051 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56052 unsigned long now;
56053 int print;
56054
56055 - atomic_inc(&audit_lost);
56056 + atomic_inc_unchecked(&audit_lost);
56057
56058 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56059
56060 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56061 printk(KERN_WARNING
56062 "audit: audit_lost=%d audit_rate_limit=%d "
56063 "audit_backlog_limit=%d\n",
56064 - atomic_read(&audit_lost),
56065 + atomic_read_unchecked(&audit_lost),
56066 audit_rate_limit,
56067 audit_backlog_limit);
56068 audit_panic(message);
56069 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56070 status_set.pid = audit_pid;
56071 status_set.rate_limit = audit_rate_limit;
56072 status_set.backlog_limit = audit_backlog_limit;
56073 - status_set.lost = atomic_read(&audit_lost);
56074 + status_set.lost = atomic_read_unchecked(&audit_lost);
56075 status_set.backlog = skb_queue_len(&audit_skb_queue);
56076 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56077 &status_set, sizeof(status_set));
56078 diff -urNp linux-3.0.3/kernel/auditsc.c linux-3.0.3/kernel/auditsc.c
56079 --- linux-3.0.3/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
56080 +++ linux-3.0.3/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
56081 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
56082 }
56083
56084 /* global counter which is incremented every time something logs in */
56085 -static atomic_t session_id = ATOMIC_INIT(0);
56086 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56087
56088 /**
56089 * audit_set_loginuid - set a task's audit_context loginuid
56090 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
56091 */
56092 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56093 {
56094 - unsigned int sessionid = atomic_inc_return(&session_id);
56095 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56096 struct audit_context *context = task->audit_context;
56097
56098 if (context && context->in_syscall) {
56099 diff -urNp linux-3.0.3/kernel/capability.c linux-3.0.3/kernel/capability.c
56100 --- linux-3.0.3/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
56101 +++ linux-3.0.3/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
56102 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56103 * before modification is attempted and the application
56104 * fails.
56105 */
56106 + if (tocopy > ARRAY_SIZE(kdata))
56107 + return -EFAULT;
56108 +
56109 if (copy_to_user(dataptr, kdata, tocopy
56110 * sizeof(struct __user_cap_data_struct))) {
56111 return -EFAULT;
56112 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
56113 BUG();
56114 }
56115
56116 - if (security_capable(ns, current_cred(), cap) == 0) {
56117 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56118 current->flags |= PF_SUPERPRIV;
56119 return true;
56120 }
56121 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
56122 }
56123 EXPORT_SYMBOL(ns_capable);
56124
56125 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
56126 +{
56127 + if (unlikely(!cap_valid(cap))) {
56128 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56129 + BUG();
56130 + }
56131 +
56132 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56133 + current->flags |= PF_SUPERPRIV;
56134 + return true;
56135 + }
56136 + return false;
56137 +}
56138 +EXPORT_SYMBOL(ns_capable_nolog);
56139 +
56140 +bool capable_nolog(int cap)
56141 +{
56142 + return ns_capable_nolog(&init_user_ns, cap);
56143 +}
56144 +EXPORT_SYMBOL(capable_nolog);
56145 +
56146 /**
56147 * task_ns_capable - Determine whether current task has a superior
56148 * capability targeted at a specific task's user namespace.
56149 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
56150 }
56151 EXPORT_SYMBOL(task_ns_capable);
56152
56153 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
56154 +{
56155 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56156 +}
56157 +EXPORT_SYMBOL(task_ns_capable_nolog);
56158 +
56159 /**
56160 * nsown_capable - Check superior capability to one's own user_ns
56161 * @cap: The capability in question
56162 diff -urNp linux-3.0.3/kernel/cgroup.c linux-3.0.3/kernel/cgroup.c
56163 --- linux-3.0.3/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
56164 +++ linux-3.0.3/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
56165 @@ -593,6 +593,8 @@ static struct css_set *find_css_set(
56166 struct hlist_head *hhead;
56167 struct cg_cgroup_link *link;
56168
56169 + pax_track_stack();
56170 +
56171 /* First see if we already have a cgroup group that matches
56172 * the desired set */
56173 read_lock(&css_set_lock);
56174 diff -urNp linux-3.0.3/kernel/compat.c linux-3.0.3/kernel/compat.c
56175 --- linux-3.0.3/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
56176 +++ linux-3.0.3/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
56177 @@ -13,6 +13,7 @@
56178
56179 #include <linux/linkage.h>
56180 #include <linux/compat.h>
56181 +#include <linux/module.h>
56182 #include <linux/errno.h>
56183 #include <linux/time.h>
56184 #include <linux/signal.h>
56185 diff -urNp linux-3.0.3/kernel/configs.c linux-3.0.3/kernel/configs.c
56186 --- linux-3.0.3/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
56187 +++ linux-3.0.3/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
56188 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56189 struct proc_dir_entry *entry;
56190
56191 /* create the current config file */
56192 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56193 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56194 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56195 + &ikconfig_file_ops);
56196 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56197 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56198 + &ikconfig_file_ops);
56199 +#endif
56200 +#else
56201 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56202 &ikconfig_file_ops);
56203 +#endif
56204 +
56205 if (!entry)
56206 return -ENOMEM;
56207
56208 diff -urNp linux-3.0.3/kernel/cred.c linux-3.0.3/kernel/cred.c
56209 --- linux-3.0.3/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
56210 +++ linux-3.0.3/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
56211 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56212 */
56213 void __put_cred(struct cred *cred)
56214 {
56215 + pax_track_stack();
56216 +
56217 kdebug("__put_cred(%p{%d,%d})", cred,
56218 atomic_read(&cred->usage),
56219 read_cred_subscribers(cred));
56220 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56221 {
56222 struct cred *cred;
56223
56224 + pax_track_stack();
56225 +
56226 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56227 atomic_read(&tsk->cred->usage),
56228 read_cred_subscribers(tsk->cred));
56229 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56230 {
56231 const struct cred *cred;
56232
56233 + pax_track_stack();
56234 +
56235 rcu_read_lock();
56236
56237 do {
56238 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56239 {
56240 struct cred *new;
56241
56242 + pax_track_stack();
56243 +
56244 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56245 if (!new)
56246 return NULL;
56247 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56248 const struct cred *old;
56249 struct cred *new;
56250
56251 + pax_track_stack();
56252 +
56253 validate_process_creds();
56254
56255 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56256 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56257 struct thread_group_cred *tgcred = NULL;
56258 struct cred *new;
56259
56260 + pax_track_stack();
56261 +
56262 #ifdef CONFIG_KEYS
56263 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56264 if (!tgcred)
56265 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56266 struct cred *new;
56267 int ret;
56268
56269 + pax_track_stack();
56270 +
56271 if (
56272 #ifdef CONFIG_KEYS
56273 !p->cred->thread_keyring &&
56274 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56275 struct task_struct *task = current;
56276 const struct cred *old = task->real_cred;
56277
56278 + pax_track_stack();
56279 +
56280 kdebug("commit_creds(%p{%d,%d})", new,
56281 atomic_read(&new->usage),
56282 read_cred_subscribers(new));
56283 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56284
56285 get_cred(new); /* we will require a ref for the subj creds too */
56286
56287 + gr_set_role_label(task, new->uid, new->gid);
56288 +
56289 /* dumpability changes */
56290 if (old->euid != new->euid ||
56291 old->egid != new->egid ||
56292 @@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
56293 key_fsgid_changed(task);
56294
56295 /* do it
56296 - * - What if a process setreuid()'s and this brings the
56297 - * new uid over his NPROC rlimit? We can check this now
56298 - * cheaply with the new uid cache, so if it matters
56299 - * we should be checking for it. -DaveM
56300 + * RLIMIT_NPROC limits on user->processes have already been checked
56301 + * in set_user().
56302 */
56303 alter_cred_subscribers(new, 2);
56304 if (new->user != old->user)
56305 @@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
56306 */
56307 void abort_creds(struct cred *new)
56308 {
56309 + pax_track_stack();
56310 +
56311 kdebug("abort_creds(%p{%d,%d})", new,
56312 atomic_read(&new->usage),
56313 read_cred_subscribers(new));
56314 @@ -574,6 +592,8 @@ const struct cred *override_creds(const
56315 {
56316 const struct cred *old = current->cred;
56317
56318 + pax_track_stack();
56319 +
56320 kdebug("override_creds(%p{%d,%d})", new,
56321 atomic_read(&new->usage),
56322 read_cred_subscribers(new));
56323 @@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
56324 {
56325 const struct cred *override = current->cred;
56326
56327 + pax_track_stack();
56328 +
56329 kdebug("revert_creds(%p{%d,%d})", old,
56330 atomic_read(&old->usage),
56331 read_cred_subscribers(old));
56332 @@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
56333 const struct cred *old;
56334 struct cred *new;
56335
56336 + pax_track_stack();
56337 +
56338 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56339 if (!new)
56340 return NULL;
56341 @@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56342 */
56343 int set_security_override(struct cred *new, u32 secid)
56344 {
56345 + pax_track_stack();
56346 +
56347 return security_kernel_act_as(new, secid);
56348 }
56349 EXPORT_SYMBOL(set_security_override);
56350 @@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
56351 u32 secid;
56352 int ret;
56353
56354 + pax_track_stack();
56355 +
56356 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56357 if (ret < 0)
56358 return ret;
56359 diff -urNp linux-3.0.3/kernel/debug/debug_core.c linux-3.0.3/kernel/debug/debug_core.c
56360 --- linux-3.0.3/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
56361 +++ linux-3.0.3/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
56362 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56363 */
56364 static atomic_t masters_in_kgdb;
56365 static atomic_t slaves_in_kgdb;
56366 -static atomic_t kgdb_break_tasklet_var;
56367 +static atomic_unchecked_t kgdb_break_tasklet_var;
56368 atomic_t kgdb_setting_breakpoint;
56369
56370 struct task_struct *kgdb_usethread;
56371 @@ -129,7 +129,7 @@ int kgdb_single_step;
56372 static pid_t kgdb_sstep_pid;
56373
56374 /* to keep track of the CPU which is doing the single stepping*/
56375 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56376 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56377
56378 /*
56379 * If you are debugging a problem where roundup (the collection of
56380 @@ -542,7 +542,7 @@ return_normal:
56381 * kernel will only try for the value of sstep_tries before
56382 * giving up and continuing on.
56383 */
56384 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56385 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56386 (kgdb_info[cpu].task &&
56387 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56388 atomic_set(&kgdb_active, -1);
56389 @@ -636,8 +636,8 @@ cpu_master_loop:
56390 }
56391
56392 kgdb_restore:
56393 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56394 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56395 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56396 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56397 if (kgdb_info[sstep_cpu].task)
56398 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56399 else
56400 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56401 static void kgdb_tasklet_bpt(unsigned long ing)
56402 {
56403 kgdb_breakpoint();
56404 - atomic_set(&kgdb_break_tasklet_var, 0);
56405 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56406 }
56407
56408 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56409
56410 void kgdb_schedule_breakpoint(void)
56411 {
56412 - if (atomic_read(&kgdb_break_tasklet_var) ||
56413 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56414 atomic_read(&kgdb_active) != -1 ||
56415 atomic_read(&kgdb_setting_breakpoint))
56416 return;
56417 - atomic_inc(&kgdb_break_tasklet_var);
56418 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
56419 tasklet_schedule(&kgdb_tasklet_breakpoint);
56420 }
56421 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56422 diff -urNp linux-3.0.3/kernel/debug/kdb/kdb_main.c linux-3.0.3/kernel/debug/kdb/kdb_main.c
56423 --- linux-3.0.3/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
56424 +++ linux-3.0.3/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
56425 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56426 list_for_each_entry(mod, kdb_modules, list) {
56427
56428 kdb_printf("%-20s%8u 0x%p ", mod->name,
56429 - mod->core_size, (void *)mod);
56430 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
56431 #ifdef CONFIG_MODULE_UNLOAD
56432 kdb_printf("%4d ", module_refcount(mod));
56433 #endif
56434 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56435 kdb_printf(" (Loading)");
56436 else
56437 kdb_printf(" (Live)");
56438 - kdb_printf(" 0x%p", mod->module_core);
56439 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56440
56441 #ifdef CONFIG_MODULE_UNLOAD
56442 {
56443 diff -urNp linux-3.0.3/kernel/events/core.c linux-3.0.3/kernel/events/core.c
56444 --- linux-3.0.3/kernel/events/core.c 2011-08-23 21:44:40.000000000 -0400
56445 +++ linux-3.0.3/kernel/events/core.c 2011-08-23 21:47:56.000000000 -0400
56446 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
56447 return 0;
56448 }
56449
56450 -static atomic64_t perf_event_id;
56451 +static atomic64_unchecked_t perf_event_id;
56452
56453 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
56454 enum event_type_t event_type);
56455 @@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
56456
56457 static inline u64 perf_event_count(struct perf_event *event)
56458 {
56459 - return local64_read(&event->count) + atomic64_read(&event->child_count);
56460 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
56461 }
56462
56463 static u64 perf_event_read(struct perf_event *event)
56464 @@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
56465 mutex_lock(&event->child_mutex);
56466 total += perf_event_read(event);
56467 *enabled += event->total_time_enabled +
56468 - atomic64_read(&event->child_total_time_enabled);
56469 + atomic64_read_unchecked(&event->child_total_time_enabled);
56470 *running += event->total_time_running +
56471 - atomic64_read(&event->child_total_time_running);
56472 + atomic64_read_unchecked(&event->child_total_time_running);
56473
56474 list_for_each_entry(child, &event->child_list, child_list) {
56475 total += perf_event_read(child);
56476 @@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
56477 userpg->offset -= local64_read(&event->hw.prev_count);
56478
56479 userpg->time_enabled = event->total_time_enabled +
56480 - atomic64_read(&event->child_total_time_enabled);
56481 + atomic64_read_unchecked(&event->child_total_time_enabled);
56482
56483 userpg->time_running = event->total_time_running +
56484 - atomic64_read(&event->child_total_time_running);
56485 + atomic64_read_unchecked(&event->child_total_time_running);
56486
56487 barrier();
56488 ++userpg->lock;
56489 @@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
56490 values[n++] = perf_event_count(event);
56491 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
56492 values[n++] = enabled +
56493 - atomic64_read(&event->child_total_time_enabled);
56494 + atomic64_read_unchecked(&event->child_total_time_enabled);
56495 }
56496 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
56497 values[n++] = running +
56498 - atomic64_read(&event->child_total_time_running);
56499 + atomic64_read_unchecked(&event->child_total_time_running);
56500 }
56501 if (read_format & PERF_FORMAT_ID)
56502 values[n++] = primary_event_id(event);
56503 @@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
56504 event->parent = parent_event;
56505
56506 event->ns = get_pid_ns(current->nsproxy->pid_ns);
56507 - event->id = atomic64_inc_return(&perf_event_id);
56508 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
56509
56510 event->state = PERF_EVENT_STATE_INACTIVE;
56511
56512 @@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
56513 /*
56514 * Add back the child's count to the parent's count:
56515 */
56516 - atomic64_add(child_val, &parent_event->child_count);
56517 - atomic64_add(child_event->total_time_enabled,
56518 + atomic64_add_unchecked(child_val, &parent_event->child_count);
56519 + atomic64_add_unchecked(child_event->total_time_enabled,
56520 &parent_event->child_total_time_enabled);
56521 - atomic64_add(child_event->total_time_running,
56522 + atomic64_add_unchecked(child_event->total_time_running,
56523 &parent_event->child_total_time_running);
56524
56525 /*
56526 diff -urNp linux-3.0.3/kernel/exit.c linux-3.0.3/kernel/exit.c
56527 --- linux-3.0.3/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
56528 +++ linux-3.0.3/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
56529 @@ -57,6 +57,10 @@
56530 #include <asm/pgtable.h>
56531 #include <asm/mmu_context.h>
56532
56533 +#ifdef CONFIG_GRKERNSEC
56534 +extern rwlock_t grsec_exec_file_lock;
56535 +#endif
56536 +
56537 static void exit_mm(struct task_struct * tsk);
56538
56539 static void __unhash_process(struct task_struct *p, bool group_dead)
56540 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
56541 struct task_struct *leader;
56542 int zap_leader;
56543 repeat:
56544 +#ifdef CONFIG_NET
56545 + gr_del_task_from_ip_table(p);
56546 +#endif
56547 +
56548 tracehook_prepare_release_task(p);
56549 /* don't need to get the RCU readlock here - the process is dead and
56550 * can't be modifying its own credentials. But shut RCU-lockdep up */
56551 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
56552 {
56553 write_lock_irq(&tasklist_lock);
56554
56555 +#ifdef CONFIG_GRKERNSEC
56556 + write_lock(&grsec_exec_file_lock);
56557 + if (current->exec_file) {
56558 + fput(current->exec_file);
56559 + current->exec_file = NULL;
56560 + }
56561 + write_unlock(&grsec_exec_file_lock);
56562 +#endif
56563 +
56564 ptrace_unlink(current);
56565 /* Reparent to init */
56566 current->real_parent = current->parent = kthreadd_task;
56567 list_move_tail(&current->sibling, &current->real_parent->children);
56568
56569 + gr_set_kernel_label(current);
56570 +
56571 /* Set the exit signal to SIGCHLD so we signal init on exit */
56572 current->exit_signal = SIGCHLD;
56573
56574 @@ -394,7 +413,7 @@ int allow_signal(int sig)
56575 * know it'll be handled, so that they don't get converted to
56576 * SIGKILL or just silently dropped.
56577 */
56578 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56579 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56580 recalc_sigpending();
56581 spin_unlock_irq(&current->sighand->siglock);
56582 return 0;
56583 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
56584 vsnprintf(current->comm, sizeof(current->comm), name, args);
56585 va_end(args);
56586
56587 +#ifdef CONFIG_GRKERNSEC
56588 + write_lock(&grsec_exec_file_lock);
56589 + if (current->exec_file) {
56590 + fput(current->exec_file);
56591 + current->exec_file = NULL;
56592 + }
56593 + write_unlock(&grsec_exec_file_lock);
56594 +#endif
56595 +
56596 + gr_set_kernel_label(current);
56597 +
56598 /*
56599 * If we were started as result of loading a module, close all of the
56600 * user space pages. We don't need them, and if we didn't close them
56601 @@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
56602 struct task_struct *tsk = current;
56603 int group_dead;
56604
56605 - profile_task_exit(tsk);
56606 -
56607 - WARN_ON(atomic_read(&tsk->fs_excl));
56608 - WARN_ON(blk_needs_flush_plug(tsk));
56609 -
56610 if (unlikely(in_interrupt()))
56611 panic("Aiee, killing interrupt handler!");
56612 - if (unlikely(!tsk->pid))
56613 - panic("Attempted to kill the idle task!");
56614
56615 /*
56616 * If do_exit is called because this processes oopsed, it's possible
56617 @@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
56618 */
56619 set_fs(USER_DS);
56620
56621 + profile_task_exit(tsk);
56622 +
56623 + WARN_ON(atomic_read(&tsk->fs_excl));
56624 + WARN_ON(blk_needs_flush_plug(tsk));
56625 +
56626 + if (unlikely(!tsk->pid))
56627 + panic("Attempted to kill the idle task!");
56628 +
56629 tracehook_report_exit(&code);
56630
56631 validate_creds_for_do_exit(tsk);
56632 @@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
56633 tsk->exit_code = code;
56634 taskstats_exit(tsk, group_dead);
56635
56636 + gr_acl_handle_psacct(tsk, code);
56637 + gr_acl_handle_exit();
56638 +
56639 exit_mm(tsk);
56640
56641 if (group_dead)
56642 diff -urNp linux-3.0.3/kernel/fork.c linux-3.0.3/kernel/fork.c
56643 --- linux-3.0.3/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
56644 +++ linux-3.0.3/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
56645 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
56646 *stackend = STACK_END_MAGIC; /* for overflow detection */
56647
56648 #ifdef CONFIG_CC_STACKPROTECTOR
56649 - tsk->stack_canary = get_random_int();
56650 + tsk->stack_canary = pax_get_random_long();
56651 #endif
56652
56653 /* One for us, one for whoever does the "release_task()" (usually parent) */
56654 @@ -308,13 +308,77 @@ out:
56655 }
56656
56657 #ifdef CONFIG_MMU
56658 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56659 +{
56660 + struct vm_area_struct *tmp;
56661 + unsigned long charge;
56662 + struct mempolicy *pol;
56663 + struct file *file;
56664 +
56665 + charge = 0;
56666 + if (mpnt->vm_flags & VM_ACCOUNT) {
56667 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56668 + if (security_vm_enough_memory(len))
56669 + goto fail_nomem;
56670 + charge = len;
56671 + }
56672 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56673 + if (!tmp)
56674 + goto fail_nomem;
56675 + *tmp = *mpnt;
56676 + tmp->vm_mm = mm;
56677 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
56678 + pol = mpol_dup(vma_policy(mpnt));
56679 + if (IS_ERR(pol))
56680 + goto fail_nomem_policy;
56681 + vma_set_policy(tmp, pol);
56682 + if (anon_vma_fork(tmp, mpnt))
56683 + goto fail_nomem_anon_vma_fork;
56684 + tmp->vm_flags &= ~VM_LOCKED;
56685 + tmp->vm_next = tmp->vm_prev = NULL;
56686 + tmp->vm_mirror = NULL;
56687 + file = tmp->vm_file;
56688 + if (file) {
56689 + struct inode *inode = file->f_path.dentry->d_inode;
56690 + struct address_space *mapping = file->f_mapping;
56691 +
56692 + get_file(file);
56693 + if (tmp->vm_flags & VM_DENYWRITE)
56694 + atomic_dec(&inode->i_writecount);
56695 + mutex_lock(&mapping->i_mmap_mutex);
56696 + if (tmp->vm_flags & VM_SHARED)
56697 + mapping->i_mmap_writable++;
56698 + flush_dcache_mmap_lock(mapping);
56699 + /* insert tmp into the share list, just after mpnt */
56700 + vma_prio_tree_add(tmp, mpnt);
56701 + flush_dcache_mmap_unlock(mapping);
56702 + mutex_unlock(&mapping->i_mmap_mutex);
56703 + }
56704 +
56705 + /*
56706 + * Clear hugetlb-related page reserves for children. This only
56707 + * affects MAP_PRIVATE mappings. Faults generated by the child
56708 + * are not guaranteed to succeed, even if read-only
56709 + */
56710 + if (is_vm_hugetlb_page(tmp))
56711 + reset_vma_resv_huge_pages(tmp);
56712 +
56713 + return tmp;
56714 +
56715 +fail_nomem_anon_vma_fork:
56716 + mpol_put(pol);
56717 +fail_nomem_policy:
56718 + kmem_cache_free(vm_area_cachep, tmp);
56719 +fail_nomem:
56720 + vm_unacct_memory(charge);
56721 + return NULL;
56722 +}
56723 +
56724 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56725 {
56726 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56727 struct rb_node **rb_link, *rb_parent;
56728 int retval;
56729 - unsigned long charge;
56730 - struct mempolicy *pol;
56731
56732 down_write(&oldmm->mmap_sem);
56733 flush_cache_dup_mm(oldmm);
56734 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
56735 mm->locked_vm = 0;
56736 mm->mmap = NULL;
56737 mm->mmap_cache = NULL;
56738 - mm->free_area_cache = oldmm->mmap_base;
56739 - mm->cached_hole_size = ~0UL;
56740 + mm->free_area_cache = oldmm->free_area_cache;
56741 + mm->cached_hole_size = oldmm->cached_hole_size;
56742 mm->map_count = 0;
56743 cpumask_clear(mm_cpumask(mm));
56744 mm->mm_rb = RB_ROOT;
56745 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
56746
56747 prev = NULL;
56748 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56749 - struct file *file;
56750 -
56751 if (mpnt->vm_flags & VM_DONTCOPY) {
56752 long pages = vma_pages(mpnt);
56753 mm->total_vm -= pages;
56754 @@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
56755 -pages);
56756 continue;
56757 }
56758 - charge = 0;
56759 - if (mpnt->vm_flags & VM_ACCOUNT) {
56760 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56761 - if (security_vm_enough_memory(len))
56762 - goto fail_nomem;
56763 - charge = len;
56764 - }
56765 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56766 - if (!tmp)
56767 - goto fail_nomem;
56768 - *tmp = *mpnt;
56769 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
56770 - pol = mpol_dup(vma_policy(mpnt));
56771 - retval = PTR_ERR(pol);
56772 - if (IS_ERR(pol))
56773 - goto fail_nomem_policy;
56774 - vma_set_policy(tmp, pol);
56775 - tmp->vm_mm = mm;
56776 - if (anon_vma_fork(tmp, mpnt))
56777 - goto fail_nomem_anon_vma_fork;
56778 - tmp->vm_flags &= ~VM_LOCKED;
56779 - tmp->vm_next = tmp->vm_prev = NULL;
56780 - file = tmp->vm_file;
56781 - if (file) {
56782 - struct inode *inode = file->f_path.dentry->d_inode;
56783 - struct address_space *mapping = file->f_mapping;
56784 -
56785 - get_file(file);
56786 - if (tmp->vm_flags & VM_DENYWRITE)
56787 - atomic_dec(&inode->i_writecount);
56788 - mutex_lock(&mapping->i_mmap_mutex);
56789 - if (tmp->vm_flags & VM_SHARED)
56790 - mapping->i_mmap_writable++;
56791 - flush_dcache_mmap_lock(mapping);
56792 - /* insert tmp into the share list, just after mpnt */
56793 - vma_prio_tree_add(tmp, mpnt);
56794 - flush_dcache_mmap_unlock(mapping);
56795 - mutex_unlock(&mapping->i_mmap_mutex);
56796 + tmp = dup_vma(mm, mpnt);
56797 + if (!tmp) {
56798 + retval = -ENOMEM;
56799 + goto out;
56800 }
56801
56802 /*
56803 - * Clear hugetlb-related page reserves for children. This only
56804 - * affects MAP_PRIVATE mappings. Faults generated by the child
56805 - * are not guaranteed to succeed, even if read-only
56806 - */
56807 - if (is_vm_hugetlb_page(tmp))
56808 - reset_vma_resv_huge_pages(tmp);
56809 -
56810 - /*
56811 * Link in the new vma and copy the page table entries.
56812 */
56813 *pprev = tmp;
56814 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
56815 if (retval)
56816 goto out;
56817 }
56818 +
56819 +#ifdef CONFIG_PAX_SEGMEXEC
56820 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56821 + struct vm_area_struct *mpnt_m;
56822 +
56823 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56824 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56825 +
56826 + if (!mpnt->vm_mirror)
56827 + continue;
56828 +
56829 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56830 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
56831 + mpnt->vm_mirror = mpnt_m;
56832 + } else {
56833 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
56834 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
56835 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
56836 + mpnt->vm_mirror->vm_mirror = mpnt;
56837 + }
56838 + }
56839 + BUG_ON(mpnt_m);
56840 + }
56841 +#endif
56842 +
56843 /* a new mm has just been created */
56844 arch_dup_mmap(oldmm, mm);
56845 retval = 0;
56846 @@ -429,14 +474,6 @@ out:
56847 flush_tlb_mm(oldmm);
56848 up_write(&oldmm->mmap_sem);
56849 return retval;
56850 -fail_nomem_anon_vma_fork:
56851 - mpol_put(pol);
56852 -fail_nomem_policy:
56853 - kmem_cache_free(vm_area_cachep, tmp);
56854 -fail_nomem:
56855 - retval = -ENOMEM;
56856 - vm_unacct_memory(charge);
56857 - goto out;
56858 }
56859
56860 static inline int mm_alloc_pgd(struct mm_struct * mm)
56861 @@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
56862 spin_unlock(&fs->lock);
56863 return -EAGAIN;
56864 }
56865 - fs->users++;
56866 + atomic_inc(&fs->users);
56867 spin_unlock(&fs->lock);
56868 return 0;
56869 }
56870 tsk->fs = copy_fs_struct(fs);
56871 if (!tsk->fs)
56872 return -ENOMEM;
56873 + gr_set_chroot_entries(tsk, &tsk->fs->root);
56874 return 0;
56875 }
56876
56877 @@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
56878 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
56879 #endif
56880 retval = -EAGAIN;
56881 +
56882 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
56883 +
56884 if (atomic_read(&p->real_cred->user->processes) >=
56885 task_rlimit(p, RLIMIT_NPROC)) {
56886 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
56887 - p->real_cred->user != INIT_USER)
56888 + if (p->real_cred->user != INIT_USER &&
56889 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
56890 goto bad_fork_free;
56891 }
56892 + current->flags &= ~PF_NPROC_EXCEEDED;
56893
56894 retval = copy_creds(p, clone_flags);
56895 if (retval < 0)
56896 @@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
56897 if (clone_flags & CLONE_THREAD)
56898 p->tgid = current->tgid;
56899
56900 + gr_copy_label(p);
56901 +
56902 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
56903 /*
56904 * Clear TID on mm_release()?
56905 @@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
56906 bad_fork_free:
56907 free_task(p);
56908 fork_out:
56909 + gr_log_forkfail(retval);
56910 +
56911 return ERR_PTR(retval);
56912 }
56913
56914 @@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
56915 if (clone_flags & CLONE_PARENT_SETTID)
56916 put_user(nr, parent_tidptr);
56917
56918 + gr_handle_brute_check();
56919 +
56920 if (clone_flags & CLONE_VFORK) {
56921 p->vfork_done = &vfork;
56922 init_completion(&vfork);
56923 @@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
56924 return 0;
56925
56926 /* don't need lock here; in the worst case we'll do useless copy */
56927 - if (fs->users == 1)
56928 + if (atomic_read(&fs->users) == 1)
56929 return 0;
56930
56931 *new_fsp = copy_fs_struct(fs);
56932 @@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
56933 fs = current->fs;
56934 spin_lock(&fs->lock);
56935 current->fs = new_fs;
56936 - if (--fs->users)
56937 + gr_set_chroot_entries(current, &current->fs->root);
56938 + if (atomic_dec_return(&fs->users))
56939 new_fs = NULL;
56940 else
56941 new_fs = fs;
56942 diff -urNp linux-3.0.3/kernel/futex.c linux-3.0.3/kernel/futex.c
56943 --- linux-3.0.3/kernel/futex.c 2011-08-23 21:44:40.000000000 -0400
56944 +++ linux-3.0.3/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
56945 @@ -54,6 +54,7 @@
56946 #include <linux/mount.h>
56947 #include <linux/pagemap.h>
56948 #include <linux/syscalls.h>
56949 +#include <linux/ptrace.h>
56950 #include <linux/signal.h>
56951 #include <linux/module.h>
56952 #include <linux/magic.h>
56953 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
56954 struct page *page, *page_head;
56955 int err, ro = 0;
56956
56957 +#ifdef CONFIG_PAX_SEGMEXEC
56958 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
56959 + return -EFAULT;
56960 +#endif
56961 +
56962 /*
56963 * The futex address must be "naturally" aligned.
56964 */
56965 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
56966 struct futex_q q = futex_q_init;
56967 int ret;
56968
56969 + pax_track_stack();
56970 +
56971 if (!bitset)
56972 return -EINVAL;
56973 q.bitset = bitset;
56974 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
56975 struct futex_q q = futex_q_init;
56976 int res, ret;
56977
56978 + pax_track_stack();
56979 +
56980 if (!bitset)
56981 return -EINVAL;
56982
56983 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56984 {
56985 struct robust_list_head __user *head;
56986 unsigned long ret;
56987 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
56988 const struct cred *cred = current_cred(), *pcred;
56989 +#endif
56990
56991 if (!futex_cmpxchg_enabled)
56992 return -ENOSYS;
56993 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56994 if (!p)
56995 goto err_unlock;
56996 ret = -EPERM;
56997 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56998 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
56999 + goto err_unlock;
57000 +#else
57001 pcred = __task_cred(p);
57002 /* If victim is in different user_ns, then uids are not
57003 comparable, so we must have CAP_SYS_PTRACE */
57004 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57005 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57006 goto err_unlock;
57007 ok:
57008 +#endif
57009 head = p->robust_list;
57010 rcu_read_unlock();
57011 }
57012 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
57013 {
57014 u32 curval;
57015 int i;
57016 + mm_segment_t oldfs;
57017
57018 /*
57019 * This will fail and we want it. Some arch implementations do
57020 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
57021 * implementation, the non-functional ones will return
57022 * -ENOSYS.
57023 */
57024 + oldfs = get_fs();
57025 + set_fs(USER_DS);
57026 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57027 futex_cmpxchg_enabled = 1;
57028 + set_fs(oldfs);
57029
57030 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57031 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57032 diff -urNp linux-3.0.3/kernel/futex_compat.c linux-3.0.3/kernel/futex_compat.c
57033 --- linux-3.0.3/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
57034 +++ linux-3.0.3/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
57035 @@ -10,6 +10,7 @@
57036 #include <linux/compat.h>
57037 #include <linux/nsproxy.h>
57038 #include <linux/futex.h>
57039 +#include <linux/ptrace.h>
57040
57041 #include <asm/uaccess.h>
57042
57043 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57044 {
57045 struct compat_robust_list_head __user *head;
57046 unsigned long ret;
57047 - const struct cred *cred = current_cred(), *pcred;
57048 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57049 + const struct cred *cred = current_cred();
57050 + const struct cred *pcred;
57051 +#endif
57052
57053 if (!futex_cmpxchg_enabled)
57054 return -ENOSYS;
57055 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57056 if (!p)
57057 goto err_unlock;
57058 ret = -EPERM;
57059 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57060 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57061 + goto err_unlock;
57062 +#else
57063 pcred = __task_cred(p);
57064 /* If victim is in different user_ns, then uids are not
57065 comparable, so we must have CAP_SYS_PTRACE */
57066 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57067 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57068 goto err_unlock;
57069 ok:
57070 +#endif
57071 head = p->compat_robust_list;
57072 rcu_read_unlock();
57073 }
57074 diff -urNp linux-3.0.3/kernel/gcov/base.c linux-3.0.3/kernel/gcov/base.c
57075 --- linux-3.0.3/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
57076 +++ linux-3.0.3/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
57077 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
57078 }
57079
57080 #ifdef CONFIG_MODULES
57081 -static inline int within(void *addr, void *start, unsigned long size)
57082 -{
57083 - return ((addr >= start) && (addr < start + size));
57084 -}
57085 -
57086 /* Update list and generate events when modules are unloaded. */
57087 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57088 void *data)
57089 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57090 prev = NULL;
57091 /* Remove entries located in module from linked list. */
57092 for (info = gcov_info_head; info; info = info->next) {
57093 - if (within(info, mod->module_core, mod->core_size)) {
57094 + if (within_module_core_rw((unsigned long)info, mod)) {
57095 if (prev)
57096 prev->next = info->next;
57097 else
57098 diff -urNp linux-3.0.3/kernel/hrtimer.c linux-3.0.3/kernel/hrtimer.c
57099 --- linux-3.0.3/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
57100 +++ linux-3.0.3/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
57101 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
57102 local_irq_restore(flags);
57103 }
57104
57105 -static void run_hrtimer_softirq(struct softirq_action *h)
57106 +static void run_hrtimer_softirq(void)
57107 {
57108 hrtimer_peek_ahead_timers();
57109 }
57110 diff -urNp linux-3.0.3/kernel/jump_label.c linux-3.0.3/kernel/jump_label.c
57111 --- linux-3.0.3/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
57112 +++ linux-3.0.3/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
57113 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
57114
57115 size = (((unsigned long)stop - (unsigned long)start)
57116 / sizeof(struct jump_entry));
57117 + pax_open_kernel();
57118 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57119 + pax_close_kernel();
57120 }
57121
57122 static void jump_label_update(struct jump_label_key *key, int enable);
57123 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module
57124 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
57125 struct jump_entry *iter;
57126
57127 + pax_open_kernel();
57128 for (iter = iter_start; iter < iter_stop; iter++) {
57129 if (within_module_init(iter->code, mod))
57130 iter->code = 0;
57131 }
57132 + pax_close_kernel();
57133 }
57134
57135 static int
57136 diff -urNp linux-3.0.3/kernel/kallsyms.c linux-3.0.3/kernel/kallsyms.c
57137 --- linux-3.0.3/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
57138 +++ linux-3.0.3/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
57139 @@ -11,6 +11,9 @@
57140 * Changed the compression method from stem compression to "table lookup"
57141 * compression (see scripts/kallsyms.c for a more complete description)
57142 */
57143 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57144 +#define __INCLUDED_BY_HIDESYM 1
57145 +#endif
57146 #include <linux/kallsyms.h>
57147 #include <linux/module.h>
57148 #include <linux/init.h>
57149 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57150
57151 static inline int is_kernel_inittext(unsigned long addr)
57152 {
57153 + if (system_state != SYSTEM_BOOTING)
57154 + return 0;
57155 +
57156 if (addr >= (unsigned long)_sinittext
57157 && addr <= (unsigned long)_einittext)
57158 return 1;
57159 return 0;
57160 }
57161
57162 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57163 +#ifdef CONFIG_MODULES
57164 +static inline int is_module_text(unsigned long addr)
57165 +{
57166 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57167 + return 1;
57168 +
57169 + addr = ktla_ktva(addr);
57170 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57171 +}
57172 +#else
57173 +static inline int is_module_text(unsigned long addr)
57174 +{
57175 + return 0;
57176 +}
57177 +#endif
57178 +#endif
57179 +
57180 static inline int is_kernel_text(unsigned long addr)
57181 {
57182 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57183 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57184
57185 static inline int is_kernel(unsigned long addr)
57186 {
57187 +
57188 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57189 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
57190 + return 1;
57191 +
57192 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57193 +#else
57194 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57195 +#endif
57196 +
57197 return 1;
57198 return in_gate_area_no_mm(addr);
57199 }
57200
57201 static int is_ksym_addr(unsigned long addr)
57202 {
57203 +
57204 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57205 + if (is_module_text(addr))
57206 + return 0;
57207 +#endif
57208 +
57209 if (all_var)
57210 return is_kernel(addr);
57211
57212 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57213
57214 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57215 {
57216 - iter->name[0] = '\0';
57217 iter->nameoff = get_symbol_offset(new_pos);
57218 iter->pos = new_pos;
57219 }
57220 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57221 {
57222 struct kallsym_iter *iter = m->private;
57223
57224 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57225 + if (current_uid())
57226 + return 0;
57227 +#endif
57228 +
57229 /* Some debugging symbols have no name. Ignore them. */
57230 if (!iter->name[0])
57231 return 0;
57232 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57233 struct kallsym_iter *iter;
57234 int ret;
57235
57236 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57237 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57238 if (!iter)
57239 return -ENOMEM;
57240 reset_iter(iter, 0);
57241 diff -urNp linux-3.0.3/kernel/kmod.c linux-3.0.3/kernel/kmod.c
57242 --- linux-3.0.3/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
57243 +++ linux-3.0.3/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
57244 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57245 * If module auto-loading support is disabled then this function
57246 * becomes a no-operation.
57247 */
57248 -int __request_module(bool wait, const char *fmt, ...)
57249 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57250 {
57251 - va_list args;
57252 char module_name[MODULE_NAME_LEN];
57253 unsigned int max_modprobes;
57254 int ret;
57255 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57256 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57257 static char *envp[] = { "HOME=/",
57258 "TERM=linux",
57259 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57260 @@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
57261 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57262 static int kmod_loop_msg;
57263
57264 - va_start(args, fmt);
57265 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57266 - va_end(args);
57267 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57268 if (ret >= MODULE_NAME_LEN)
57269 return -ENAMETOOLONG;
57270
57271 @@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
57272 if (ret)
57273 return ret;
57274
57275 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57276 + if (!current_uid()) {
57277 + /* hack to workaround consolekit/udisks stupidity */
57278 + read_lock(&tasklist_lock);
57279 + if (!strcmp(current->comm, "mount") &&
57280 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57281 + read_unlock(&tasklist_lock);
57282 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57283 + return -EPERM;
57284 + }
57285 + read_unlock(&tasklist_lock);
57286 + }
57287 +#endif
57288 +
57289 /* If modprobe needs a service that is in a module, we get a recursive
57290 * loop. Limit the number of running kmod threads to max_threads/2 or
57291 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57292 @@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
57293 atomic_dec(&kmod_concurrent);
57294 return ret;
57295 }
57296 +
57297 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57298 +{
57299 + va_list args;
57300 + int ret;
57301 +
57302 + va_start(args, fmt);
57303 + ret = ____request_module(wait, module_param, fmt, args);
57304 + va_end(args);
57305 +
57306 + return ret;
57307 +}
57308 +
57309 +int __request_module(bool wait, const char *fmt, ...)
57310 +{
57311 + va_list args;
57312 + int ret;
57313 +
57314 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57315 + if (current_uid()) {
57316 + char module_param[MODULE_NAME_LEN];
57317 +
57318 + memset(module_param, 0, sizeof(module_param));
57319 +
57320 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57321 +
57322 + va_start(args, fmt);
57323 + ret = ____request_module(wait, module_param, fmt, args);
57324 + va_end(args);
57325 +
57326 + return ret;
57327 + }
57328 +#endif
57329 +
57330 + va_start(args, fmt);
57331 + ret = ____request_module(wait, NULL, fmt, args);
57332 + va_end(args);
57333 +
57334 + return ret;
57335 +}
57336 +
57337 EXPORT_SYMBOL(__request_module);
57338 #endif /* CONFIG_MODULES */
57339
57340 diff -urNp linux-3.0.3/kernel/kprobes.c linux-3.0.3/kernel/kprobes.c
57341 --- linux-3.0.3/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
57342 +++ linux-3.0.3/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
57343 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57344 * kernel image and loaded module images reside. This is required
57345 * so x86_64 can correctly handle the %rip-relative fixups.
57346 */
57347 - kip->insns = module_alloc(PAGE_SIZE);
57348 + kip->insns = module_alloc_exec(PAGE_SIZE);
57349 if (!kip->insns) {
57350 kfree(kip);
57351 return NULL;
57352 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57353 */
57354 if (!list_is_singular(&kip->list)) {
57355 list_del(&kip->list);
57356 - module_free(NULL, kip->insns);
57357 + module_free_exec(NULL, kip->insns);
57358 kfree(kip);
57359 }
57360 return 1;
57361 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57362 {
57363 int i, err = 0;
57364 unsigned long offset = 0, size = 0;
57365 - char *modname, namebuf[128];
57366 + char *modname, namebuf[KSYM_NAME_LEN];
57367 const char *symbol_name;
57368 void *addr;
57369 struct kprobe_blackpoint *kb;
57370 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57371 const char *sym = NULL;
57372 unsigned int i = *(loff_t *) v;
57373 unsigned long offset = 0;
57374 - char *modname, namebuf[128];
57375 + char *modname, namebuf[KSYM_NAME_LEN];
57376
57377 head = &kprobe_table[i];
57378 preempt_disable();
57379 diff -urNp linux-3.0.3/kernel/lockdep.c linux-3.0.3/kernel/lockdep.c
57380 --- linux-3.0.3/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
57381 +++ linux-3.0.3/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
57382 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
57383 end = (unsigned long) &_end,
57384 addr = (unsigned long) obj;
57385
57386 +#ifdef CONFIG_PAX_KERNEXEC
57387 + start = ktla_ktva(start);
57388 +#endif
57389 +
57390 /*
57391 * static variable?
57392 */
57393 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
57394 if (!static_obj(lock->key)) {
57395 debug_locks_off();
57396 printk("INFO: trying to register non-static key.\n");
57397 + printk("lock:%pS key:%pS.\n", lock, lock->key);
57398 printk("the code is fine but needs lockdep annotation.\n");
57399 printk("turning off the locking correctness validator.\n");
57400 dump_stack();
57401 @@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
57402 if (!class)
57403 return 0;
57404 }
57405 - atomic_inc((atomic_t *)&class->ops);
57406 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57407 if (very_verbose(class)) {
57408 printk("\nacquire class [%p] %s", class->key, class->name);
57409 if (class->name_version > 1)
57410 diff -urNp linux-3.0.3/kernel/lockdep_proc.c linux-3.0.3/kernel/lockdep_proc.c
57411 --- linux-3.0.3/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
57412 +++ linux-3.0.3/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
57413 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57414
57415 static void print_name(struct seq_file *m, struct lock_class *class)
57416 {
57417 - char str[128];
57418 + char str[KSYM_NAME_LEN];
57419 const char *name = class->name;
57420
57421 if (!name) {
57422 diff -urNp linux-3.0.3/kernel/module.c linux-3.0.3/kernel/module.c
57423 --- linux-3.0.3/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
57424 +++ linux-3.0.3/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
57425 @@ -58,6 +58,7 @@
57426 #include <linux/jump_label.h>
57427 #include <linux/pfn.h>
57428 #include <linux/bsearch.h>
57429 +#include <linux/grsecurity.h>
57430
57431 #define CREATE_TRACE_POINTS
57432 #include <trace/events/module.h>
57433 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57434
57435 /* Bounds of module allocation, for speeding __module_address.
57436 * Protected by module_mutex. */
57437 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57438 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57439 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57440
57441 int register_module_notifier(struct notifier_block * nb)
57442 {
57443 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
57444 return true;
57445
57446 list_for_each_entry_rcu(mod, &modules, list) {
57447 - struct symsearch arr[] = {
57448 + struct symsearch modarr[] = {
57449 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57450 NOT_GPL_ONLY, false },
57451 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57452 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
57453 #endif
57454 };
57455
57456 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57457 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57458 return true;
57459 }
57460 return false;
57461 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
57462 static int percpu_modalloc(struct module *mod,
57463 unsigned long size, unsigned long align)
57464 {
57465 - if (align > PAGE_SIZE) {
57466 + if (align-1 >= PAGE_SIZE) {
57467 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57468 mod->name, align, PAGE_SIZE);
57469 align = PAGE_SIZE;
57470 @@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
57471 */
57472 #ifdef CONFIG_SYSFS
57473
57474 -#ifdef CONFIG_KALLSYMS
57475 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57476 static inline bool sect_empty(const Elf_Shdr *sect)
57477 {
57478 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57479 @@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
57480
57481 static void unset_module_core_ro_nx(struct module *mod)
57482 {
57483 - set_page_attributes(mod->module_core + mod->core_text_size,
57484 - mod->module_core + mod->core_size,
57485 + set_page_attributes(mod->module_core_rw,
57486 + mod->module_core_rw + mod->core_size_rw,
57487 set_memory_x);
57488 - set_page_attributes(mod->module_core,
57489 - mod->module_core + mod->core_ro_size,
57490 + set_page_attributes(mod->module_core_rx,
57491 + mod->module_core_rx + mod->core_size_rx,
57492 set_memory_rw);
57493 }
57494
57495 static void unset_module_init_ro_nx(struct module *mod)
57496 {
57497 - set_page_attributes(mod->module_init + mod->init_text_size,
57498 - mod->module_init + mod->init_size,
57499 + set_page_attributes(mod->module_init_rw,
57500 + mod->module_init_rw + mod->init_size_rw,
57501 set_memory_x);
57502 - set_page_attributes(mod->module_init,
57503 - mod->module_init + mod->init_ro_size,
57504 + set_page_attributes(mod->module_init_rx,
57505 + mod->module_init_rx + mod->init_size_rx,
57506 set_memory_rw);
57507 }
57508
57509 @@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
57510
57511 mutex_lock(&module_mutex);
57512 list_for_each_entry_rcu(mod, &modules, list) {
57513 - if ((mod->module_core) && (mod->core_text_size)) {
57514 - set_page_attributes(mod->module_core,
57515 - mod->module_core + mod->core_text_size,
57516 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57517 + set_page_attributes(mod->module_core_rx,
57518 + mod->module_core_rx + mod->core_size_rx,
57519 set_memory_rw);
57520 }
57521 - if ((mod->module_init) && (mod->init_text_size)) {
57522 - set_page_attributes(mod->module_init,
57523 - mod->module_init + mod->init_text_size,
57524 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57525 + set_page_attributes(mod->module_init_rx,
57526 + mod->module_init_rx + mod->init_size_rx,
57527 set_memory_rw);
57528 }
57529 }
57530 @@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
57531
57532 mutex_lock(&module_mutex);
57533 list_for_each_entry_rcu(mod, &modules, list) {
57534 - if ((mod->module_core) && (mod->core_text_size)) {
57535 - set_page_attributes(mod->module_core,
57536 - mod->module_core + mod->core_text_size,
57537 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57538 + set_page_attributes(mod->module_core_rx,
57539 + mod->module_core_rx + mod->core_size_rx,
57540 set_memory_ro);
57541 }
57542 - if ((mod->module_init) && (mod->init_text_size)) {
57543 - set_page_attributes(mod->module_init,
57544 - mod->module_init + mod->init_text_size,
57545 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57546 + set_page_attributes(mod->module_init_rx,
57547 + mod->module_init_rx + mod->init_size_rx,
57548 set_memory_ro);
57549 }
57550 }
57551 @@ -1722,16 +1724,19 @@ static void free_module(struct module *m
57552
57553 /* This may be NULL, but that's OK */
57554 unset_module_init_ro_nx(mod);
57555 - module_free(mod, mod->module_init);
57556 + module_free(mod, mod->module_init_rw);
57557 + module_free_exec(mod, mod->module_init_rx);
57558 kfree(mod->args);
57559 percpu_modfree(mod);
57560
57561 /* Free lock-classes: */
57562 - lockdep_free_key_range(mod->module_core, mod->core_size);
57563 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57564 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57565
57566 /* Finally, free the core (containing the module structure) */
57567 unset_module_core_ro_nx(mod);
57568 - module_free(mod, mod->module_core);
57569 + module_free_exec(mod, mod->module_core_rx);
57570 + module_free(mod, mod->module_core_rw);
57571
57572 #ifdef CONFIG_MPU
57573 update_protections(current->mm);
57574 @@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
57575 unsigned int i;
57576 int ret = 0;
57577 const struct kernel_symbol *ksym;
57578 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57579 + int is_fs_load = 0;
57580 + int register_filesystem_found = 0;
57581 + char *p;
57582 +
57583 + p = strstr(mod->args, "grsec_modharden_fs");
57584 + if (p) {
57585 + char *endptr = p + strlen("grsec_modharden_fs");
57586 + /* copy \0 as well */
57587 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57588 + is_fs_load = 1;
57589 + }
57590 +#endif
57591
57592 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57593 const char *name = info->strtab + sym[i].st_name;
57594
57595 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57596 + /* it's a real shame this will never get ripped and copied
57597 + upstream! ;(
57598 + */
57599 + if (is_fs_load && !strcmp(name, "register_filesystem"))
57600 + register_filesystem_found = 1;
57601 +#endif
57602 +
57603 switch (sym[i].st_shndx) {
57604 case SHN_COMMON:
57605 /* We compiled with -fno-common. These are not
57606 @@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
57607 ksym = resolve_symbol_wait(mod, info, name);
57608 /* Ok if resolved. */
57609 if (ksym && !IS_ERR(ksym)) {
57610 + pax_open_kernel();
57611 sym[i].st_value = ksym->value;
57612 + pax_close_kernel();
57613 break;
57614 }
57615
57616 @@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
57617 secbase = (unsigned long)mod_percpu(mod);
57618 else
57619 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57620 + pax_open_kernel();
57621 sym[i].st_value += secbase;
57622 + pax_close_kernel();
57623 break;
57624 }
57625 }
57626
57627 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57628 + if (is_fs_load && !register_filesystem_found) {
57629 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57630 + ret = -EPERM;
57631 + }
57632 +#endif
57633 +
57634 return ret;
57635 }
57636
57637 @@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
57638 || s->sh_entsize != ~0UL
57639 || strstarts(sname, ".init"))
57640 continue;
57641 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57642 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57643 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57644 + else
57645 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57646 DEBUGP("\t%s\n", name);
57647 }
57648 - switch (m) {
57649 - case 0: /* executable */
57650 - mod->core_size = debug_align(mod->core_size);
57651 - mod->core_text_size = mod->core_size;
57652 - break;
57653 - case 1: /* RO: text and ro-data */
57654 - mod->core_size = debug_align(mod->core_size);
57655 - mod->core_ro_size = mod->core_size;
57656 - break;
57657 - case 3: /* whole core */
57658 - mod->core_size = debug_align(mod->core_size);
57659 - break;
57660 - }
57661 }
57662
57663 DEBUGP("Init section allocation order:\n");
57664 @@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
57665 || s->sh_entsize != ~0UL
57666 || !strstarts(sname, ".init"))
57667 continue;
57668 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57669 - | INIT_OFFSET_MASK);
57670 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57671 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57672 + else
57673 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57674 + s->sh_entsize |= INIT_OFFSET_MASK;
57675 DEBUGP("\t%s\n", sname);
57676 }
57677 - switch (m) {
57678 - case 0: /* executable */
57679 - mod->init_size = debug_align(mod->init_size);
57680 - mod->init_text_size = mod->init_size;
57681 - break;
57682 - case 1: /* RO: text and ro-data */
57683 - mod->init_size = debug_align(mod->init_size);
57684 - mod->init_ro_size = mod->init_size;
57685 - break;
57686 - case 3: /* whole init */
57687 - mod->init_size = debug_align(mod->init_size);
57688 - break;
57689 - }
57690 }
57691 }
57692
57693 @@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
57694
57695 /* Put symbol section at end of init part of module. */
57696 symsect->sh_flags |= SHF_ALLOC;
57697 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57698 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57699 info->index.sym) | INIT_OFFSET_MASK;
57700 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57701
57702 @@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
57703 }
57704
57705 /* Append room for core symbols at end of core part. */
57706 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57707 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57708 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57709 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57710
57711 /* Put string table section at end of init part of module. */
57712 strsect->sh_flags |= SHF_ALLOC;
57713 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57714 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57715 info->index.str) | INIT_OFFSET_MASK;
57716 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57717
57718 /* Append room for core symbols' strings at end of core part. */
57719 - info->stroffs = mod->core_size;
57720 + info->stroffs = mod->core_size_rx;
57721 __set_bit(0, info->strmap);
57722 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57723 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57724 }
57725
57726 static void add_kallsyms(struct module *mod, const struct load_info *info)
57727 @@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
57728 /* Make sure we get permanent strtab: don't use info->strtab. */
57729 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57730
57731 + pax_open_kernel();
57732 +
57733 /* Set types up while we still have access to sections. */
57734 for (i = 0; i < mod->num_symtab; i++)
57735 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57736
57737 - mod->core_symtab = dst = mod->module_core + info->symoffs;
57738 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57739 src = mod->symtab;
57740 *dst = *src;
57741 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57742 @@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
57743 }
57744 mod->core_num_syms = ndst;
57745
57746 - mod->core_strtab = s = mod->module_core + info->stroffs;
57747 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57748 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57749 if (test_bit(i, info->strmap))
57750 *++s = mod->strtab[i];
57751 +
57752 + pax_close_kernel();
57753 }
57754 #else
57755 static inline void layout_symtab(struct module *mod, struct load_info *info)
57756 @@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
57757 ddebug_remove_module(debug->modname);
57758 }
57759
57760 -static void *module_alloc_update_bounds(unsigned long size)
57761 +static void *module_alloc_update_bounds_rw(unsigned long size)
57762 {
57763 void *ret = module_alloc(size);
57764
57765 if (ret) {
57766 mutex_lock(&module_mutex);
57767 /* Update module bounds. */
57768 - if ((unsigned long)ret < module_addr_min)
57769 - module_addr_min = (unsigned long)ret;
57770 - if ((unsigned long)ret + size > module_addr_max)
57771 - module_addr_max = (unsigned long)ret + size;
57772 + if ((unsigned long)ret < module_addr_min_rw)
57773 + module_addr_min_rw = (unsigned long)ret;
57774 + if ((unsigned long)ret + size > module_addr_max_rw)
57775 + module_addr_max_rw = (unsigned long)ret + size;
57776 + mutex_unlock(&module_mutex);
57777 + }
57778 + return ret;
57779 +}
57780 +
57781 +static void *module_alloc_update_bounds_rx(unsigned long size)
57782 +{
57783 + void *ret = module_alloc_exec(size);
57784 +
57785 + if (ret) {
57786 + mutex_lock(&module_mutex);
57787 + /* Update module bounds. */
57788 + if ((unsigned long)ret < module_addr_min_rx)
57789 + module_addr_min_rx = (unsigned long)ret;
57790 + if ((unsigned long)ret + size > module_addr_max_rx)
57791 + module_addr_max_rx = (unsigned long)ret + size;
57792 mutex_unlock(&module_mutex);
57793 }
57794 return ret;
57795 @@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
57796 void *ptr;
57797
57798 /* Do the allocs. */
57799 - ptr = module_alloc_update_bounds(mod->core_size);
57800 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57801 /*
57802 * The pointer to this block is stored in the module structure
57803 * which is inside the block. Just mark it as not being a
57804 @@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
57805 if (!ptr)
57806 return -ENOMEM;
57807
57808 - memset(ptr, 0, mod->core_size);
57809 - mod->module_core = ptr;
57810 + memset(ptr, 0, mod->core_size_rw);
57811 + mod->module_core_rw = ptr;
57812
57813 - ptr = module_alloc_update_bounds(mod->init_size);
57814 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
57815 /*
57816 * The pointer to this block is stored in the module structure
57817 * which is inside the block. This block doesn't need to be
57818 * scanned as it contains data and code that will be freed
57819 * after the module is initialized.
57820 */
57821 - kmemleak_ignore(ptr);
57822 - if (!ptr && mod->init_size) {
57823 - module_free(mod, mod->module_core);
57824 + kmemleak_not_leak(ptr);
57825 + if (!ptr && mod->init_size_rw) {
57826 + module_free(mod, mod->module_core_rw);
57827 return -ENOMEM;
57828 }
57829 - memset(ptr, 0, mod->init_size);
57830 - mod->module_init = ptr;
57831 + memset(ptr, 0, mod->init_size_rw);
57832 + mod->module_init_rw = ptr;
57833 +
57834 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
57835 + kmemleak_not_leak(ptr);
57836 + if (!ptr) {
57837 + module_free(mod, mod->module_init_rw);
57838 + module_free(mod, mod->module_core_rw);
57839 + return -ENOMEM;
57840 + }
57841 +
57842 + pax_open_kernel();
57843 + memset(ptr, 0, mod->core_size_rx);
57844 + pax_close_kernel();
57845 + mod->module_core_rx = ptr;
57846 +
57847 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
57848 + kmemleak_not_leak(ptr);
57849 + if (!ptr && mod->init_size_rx) {
57850 + module_free_exec(mod, mod->module_core_rx);
57851 + module_free(mod, mod->module_init_rw);
57852 + module_free(mod, mod->module_core_rw);
57853 + return -ENOMEM;
57854 + }
57855 +
57856 + pax_open_kernel();
57857 + memset(ptr, 0, mod->init_size_rx);
57858 + pax_close_kernel();
57859 + mod->module_init_rx = ptr;
57860
57861 /* Transfer each section which specifies SHF_ALLOC */
57862 DEBUGP("final section addresses:\n");
57863 @@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
57864 if (!(shdr->sh_flags & SHF_ALLOC))
57865 continue;
57866
57867 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
57868 - dest = mod->module_init
57869 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57870 - else
57871 - dest = mod->module_core + shdr->sh_entsize;
57872 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
57873 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57874 + dest = mod->module_init_rw
57875 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57876 + else
57877 + dest = mod->module_init_rx
57878 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57879 + } else {
57880 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57881 + dest = mod->module_core_rw + shdr->sh_entsize;
57882 + else
57883 + dest = mod->module_core_rx + shdr->sh_entsize;
57884 + }
57885 +
57886 + if (shdr->sh_type != SHT_NOBITS) {
57887 +
57888 +#ifdef CONFIG_PAX_KERNEXEC
57889 +#ifdef CONFIG_X86_64
57890 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
57891 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
57892 +#endif
57893 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
57894 + pax_open_kernel();
57895 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57896 + pax_close_kernel();
57897 + } else
57898 +#endif
57899
57900 - if (shdr->sh_type != SHT_NOBITS)
57901 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57902 + }
57903 /* Update sh_addr to point to copy in image. */
57904 - shdr->sh_addr = (unsigned long)dest;
57905 +
57906 +#ifdef CONFIG_PAX_KERNEXEC
57907 + if (shdr->sh_flags & SHF_EXECINSTR)
57908 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
57909 + else
57910 +#endif
57911 +
57912 + shdr->sh_addr = (unsigned long)dest;
57913 DEBUGP("\t0x%lx %s\n",
57914 shdr->sh_addr, info->secstrings + shdr->sh_name);
57915 }
57916 @@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
57917 * Do it before processing of module parameters, so the module
57918 * can provide parameter accessor functions of its own.
57919 */
57920 - if (mod->module_init)
57921 - flush_icache_range((unsigned long)mod->module_init,
57922 - (unsigned long)mod->module_init
57923 - + mod->init_size);
57924 - flush_icache_range((unsigned long)mod->module_core,
57925 - (unsigned long)mod->module_core + mod->core_size);
57926 + if (mod->module_init_rx)
57927 + flush_icache_range((unsigned long)mod->module_init_rx,
57928 + (unsigned long)mod->module_init_rx
57929 + + mod->init_size_rx);
57930 + flush_icache_range((unsigned long)mod->module_core_rx,
57931 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
57932
57933 set_fs(old_fs);
57934 }
57935 @@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
57936 {
57937 kfree(info->strmap);
57938 percpu_modfree(mod);
57939 - module_free(mod, mod->module_init);
57940 - module_free(mod, mod->module_core);
57941 + module_free_exec(mod, mod->module_init_rx);
57942 + module_free_exec(mod, mod->module_core_rx);
57943 + module_free(mod, mod->module_init_rw);
57944 + module_free(mod, mod->module_core_rw);
57945 }
57946
57947 static int post_relocation(struct module *mod, const struct load_info *info)
57948 @@ -2770,9 +2865,38 @@ static struct module *load_module(void _
57949 if (err)
57950 goto free_unload;
57951
57952 + /* Now copy in args */
57953 + mod->args = strndup_user(uargs, ~0UL >> 1);
57954 + if (IS_ERR(mod->args)) {
57955 + err = PTR_ERR(mod->args);
57956 + goto free_unload;
57957 + }
57958 +
57959 /* Set up MODINFO_ATTR fields */
57960 setup_modinfo(mod, &info);
57961
57962 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57963 + {
57964 + char *p, *p2;
57965 +
57966 + if (strstr(mod->args, "grsec_modharden_netdev")) {
57967 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
57968 + err = -EPERM;
57969 + goto free_modinfo;
57970 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
57971 + p += strlen("grsec_modharden_normal");
57972 + p2 = strstr(p, "_");
57973 + if (p2) {
57974 + *p2 = '\0';
57975 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
57976 + *p2 = '_';
57977 + }
57978 + err = -EPERM;
57979 + goto free_modinfo;
57980 + }
57981 + }
57982 +#endif
57983 +
57984 /* Fix up syms, so that st_value is a pointer to location. */
57985 err = simplify_symbols(mod, &info);
57986 if (err < 0)
57987 @@ -2788,13 +2912,6 @@ static struct module *load_module(void _
57988
57989 flush_module_icache(mod);
57990
57991 - /* Now copy in args */
57992 - mod->args = strndup_user(uargs, ~0UL >> 1);
57993 - if (IS_ERR(mod->args)) {
57994 - err = PTR_ERR(mod->args);
57995 - goto free_arch_cleanup;
57996 - }
57997 -
57998 /* Mark state as coming so strong_try_module_get() ignores us. */
57999 mod->state = MODULE_STATE_COMING;
58000
58001 @@ -2854,11 +2971,10 @@ static struct module *load_module(void _
58002 unlock:
58003 mutex_unlock(&module_mutex);
58004 synchronize_sched();
58005 - kfree(mod->args);
58006 - free_arch_cleanup:
58007 module_arch_cleanup(mod);
58008 free_modinfo:
58009 free_modinfo(mod);
58010 + kfree(mod->args);
58011 free_unload:
58012 module_unload_free(mod);
58013 free_module:
58014 @@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
58015 MODULE_STATE_COMING, mod);
58016
58017 /* Set RO and NX regions for core */
58018 - set_section_ro_nx(mod->module_core,
58019 - mod->core_text_size,
58020 - mod->core_ro_size,
58021 - mod->core_size);
58022 + set_section_ro_nx(mod->module_core_rx,
58023 + mod->core_size_rx,
58024 + mod->core_size_rx,
58025 + mod->core_size_rx);
58026
58027 /* Set RO and NX regions for init */
58028 - set_section_ro_nx(mod->module_init,
58029 - mod->init_text_size,
58030 - mod->init_ro_size,
58031 - mod->init_size);
58032 + set_section_ro_nx(mod->module_init_rx,
58033 + mod->init_size_rx,
58034 + mod->init_size_rx,
58035 + mod->init_size_rx);
58036
58037 do_mod_ctors(mod);
58038 /* Start the module */
58039 @@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
58040 mod->strtab = mod->core_strtab;
58041 #endif
58042 unset_module_init_ro_nx(mod);
58043 - module_free(mod, mod->module_init);
58044 - mod->module_init = NULL;
58045 - mod->init_size = 0;
58046 - mod->init_ro_size = 0;
58047 - mod->init_text_size = 0;
58048 + module_free(mod, mod->module_init_rw);
58049 + module_free_exec(mod, mod->module_init_rx);
58050 + mod->module_init_rw = NULL;
58051 + mod->module_init_rx = NULL;
58052 + mod->init_size_rw = 0;
58053 + mod->init_size_rx = 0;
58054 mutex_unlock(&module_mutex);
58055
58056 return 0;
58057 @@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
58058 unsigned long nextval;
58059
58060 /* At worse, next value is at end of module */
58061 - if (within_module_init(addr, mod))
58062 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
58063 + if (within_module_init_rx(addr, mod))
58064 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58065 + else if (within_module_init_rw(addr, mod))
58066 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58067 + else if (within_module_core_rx(addr, mod))
58068 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58069 + else if (within_module_core_rw(addr, mod))
58070 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58071 else
58072 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
58073 + return NULL;
58074
58075 /* Scan for closest preceding symbol, and next symbol. (ELF
58076 starts real symbols at 1). */
58077 @@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
58078 char buf[8];
58079
58080 seq_printf(m, "%s %u",
58081 - mod->name, mod->init_size + mod->core_size);
58082 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58083 print_unload_info(m, mod);
58084
58085 /* Informative for users. */
58086 @@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
58087 mod->state == MODULE_STATE_COMING ? "Loading":
58088 "Live");
58089 /* Used by oprofile and other similar tools. */
58090 - seq_printf(m, " 0x%pK", mod->module_core);
58091 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58092
58093 /* Taints info */
58094 if (mod->taints)
58095 @@ -3283,7 +3406,17 @@ static const struct file_operations proc
58096
58097 static int __init proc_modules_init(void)
58098 {
58099 +#ifndef CONFIG_GRKERNSEC_HIDESYM
58100 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58101 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58102 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58103 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58104 +#else
58105 proc_create("modules", 0, NULL, &proc_modules_operations);
58106 +#endif
58107 +#else
58108 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58109 +#endif
58110 return 0;
58111 }
58112 module_init(proc_modules_init);
58113 @@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
58114 {
58115 struct module *mod;
58116
58117 - if (addr < module_addr_min || addr > module_addr_max)
58118 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58119 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
58120 return NULL;
58121
58122 list_for_each_entry_rcu(mod, &modules, list)
58123 - if (within_module_core(addr, mod)
58124 - || within_module_init(addr, mod))
58125 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
58126 return mod;
58127 return NULL;
58128 }
58129 @@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
58130 */
58131 struct module *__module_text_address(unsigned long addr)
58132 {
58133 - struct module *mod = __module_address(addr);
58134 + struct module *mod;
58135 +
58136 +#ifdef CONFIG_X86_32
58137 + addr = ktla_ktva(addr);
58138 +#endif
58139 +
58140 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58141 + return NULL;
58142 +
58143 + mod = __module_address(addr);
58144 +
58145 if (mod) {
58146 /* Make sure it's within the text section. */
58147 - if (!within(addr, mod->module_init, mod->init_text_size)
58148 - && !within(addr, mod->module_core, mod->core_text_size))
58149 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58150 mod = NULL;
58151 }
58152 return mod;
58153 diff -urNp linux-3.0.3/kernel/mutex.c linux-3.0.3/kernel/mutex.c
58154 --- linux-3.0.3/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
58155 +++ linux-3.0.3/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
58156 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
58157 spin_lock_mutex(&lock->wait_lock, flags);
58158
58159 debug_mutex_lock_common(lock, &waiter);
58160 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58161 + debug_mutex_add_waiter(lock, &waiter, task);
58162
58163 /* add waiting tasks to the end of the waitqueue (FIFO): */
58164 list_add_tail(&waiter.list, &lock->wait_list);
58165 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
58166 * TASK_UNINTERRUPTIBLE case.)
58167 */
58168 if (unlikely(signal_pending_state(state, task))) {
58169 - mutex_remove_waiter(lock, &waiter,
58170 - task_thread_info(task));
58171 + mutex_remove_waiter(lock, &waiter, task);
58172 mutex_release(&lock->dep_map, 1, ip);
58173 spin_unlock_mutex(&lock->wait_lock, flags);
58174
58175 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
58176 done:
58177 lock_acquired(&lock->dep_map, ip);
58178 /* got the lock - rejoice! */
58179 - mutex_remove_waiter(lock, &waiter, current_thread_info());
58180 + mutex_remove_waiter(lock, &waiter, task);
58181 mutex_set_owner(lock);
58182
58183 /* set it to 0 if there are no waiters left: */
58184 diff -urNp linux-3.0.3/kernel/mutex-debug.c linux-3.0.3/kernel/mutex-debug.c
58185 --- linux-3.0.3/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
58186 +++ linux-3.0.3/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
58187 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58188 }
58189
58190 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58191 - struct thread_info *ti)
58192 + struct task_struct *task)
58193 {
58194 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58195
58196 /* Mark the current thread as blocked on the lock: */
58197 - ti->task->blocked_on = waiter;
58198 + task->blocked_on = waiter;
58199 }
58200
58201 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58202 - struct thread_info *ti)
58203 + struct task_struct *task)
58204 {
58205 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58206 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58207 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58208 - ti->task->blocked_on = NULL;
58209 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
58210 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58211 + task->blocked_on = NULL;
58212
58213 list_del_init(&waiter->list);
58214 waiter->task = NULL;
58215 diff -urNp linux-3.0.3/kernel/mutex-debug.h linux-3.0.3/kernel/mutex-debug.h
58216 --- linux-3.0.3/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
58217 +++ linux-3.0.3/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
58218 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
58219 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58220 extern void debug_mutex_add_waiter(struct mutex *lock,
58221 struct mutex_waiter *waiter,
58222 - struct thread_info *ti);
58223 + struct task_struct *task);
58224 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58225 - struct thread_info *ti);
58226 + struct task_struct *task);
58227 extern void debug_mutex_unlock(struct mutex *lock);
58228 extern void debug_mutex_init(struct mutex *lock, const char *name,
58229 struct lock_class_key *key);
58230 diff -urNp linux-3.0.3/kernel/padata.c linux-3.0.3/kernel/padata.c
58231 --- linux-3.0.3/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
58232 +++ linux-3.0.3/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
58233 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58234 padata->pd = pd;
58235 padata->cb_cpu = cb_cpu;
58236
58237 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58238 - atomic_set(&pd->seq_nr, -1);
58239 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58240 + atomic_set_unchecked(&pd->seq_nr, -1);
58241
58242 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58243 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58244
58245 target_cpu = padata_cpu_hash(padata);
58246 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58247 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58248 padata_init_pqueues(pd);
58249 padata_init_squeues(pd);
58250 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58251 - atomic_set(&pd->seq_nr, -1);
58252 + atomic_set_unchecked(&pd->seq_nr, -1);
58253 atomic_set(&pd->reorder_objects, 0);
58254 atomic_set(&pd->refcnt, 0);
58255 pd->pinst = pinst;
58256 diff -urNp linux-3.0.3/kernel/panic.c linux-3.0.3/kernel/panic.c
58257 --- linux-3.0.3/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
58258 +++ linux-3.0.3/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
58259 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58260 const char *board;
58261
58262 printk(KERN_WARNING "------------[ cut here ]------------\n");
58263 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58264 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58265 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58266 if (board)
58267 printk(KERN_WARNING "Hardware name: %s\n", board);
58268 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58269 */
58270 void __stack_chk_fail(void)
58271 {
58272 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
58273 + dump_stack();
58274 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58275 __builtin_return_address(0));
58276 }
58277 EXPORT_SYMBOL(__stack_chk_fail);
58278 diff -urNp linux-3.0.3/kernel/pid.c linux-3.0.3/kernel/pid.c
58279 --- linux-3.0.3/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
58280 +++ linux-3.0.3/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
58281 @@ -33,6 +33,7 @@
58282 #include <linux/rculist.h>
58283 #include <linux/bootmem.h>
58284 #include <linux/hash.h>
58285 +#include <linux/security.h>
58286 #include <linux/pid_namespace.h>
58287 #include <linux/init_task.h>
58288 #include <linux/syscalls.h>
58289 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58290
58291 int pid_max = PID_MAX_DEFAULT;
58292
58293 -#define RESERVED_PIDS 300
58294 +#define RESERVED_PIDS 500
58295
58296 int pid_max_min = RESERVED_PIDS + 1;
58297 int pid_max_max = PID_MAX_LIMIT;
58298 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58299 */
58300 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58301 {
58302 + struct task_struct *task;
58303 +
58304 rcu_lockdep_assert(rcu_read_lock_held());
58305 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58306 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58307 +
58308 + if (gr_pid_is_chrooted(task))
58309 + return NULL;
58310 +
58311 + return task;
58312 }
58313
58314 struct task_struct *find_task_by_vpid(pid_t vnr)
58315 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58316 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58317 }
58318
58319 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58320 +{
58321 + rcu_lockdep_assert(rcu_read_lock_held());
58322 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58323 +}
58324 +
58325 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58326 {
58327 struct pid *pid;
58328 diff -urNp linux-3.0.3/kernel/posix-cpu-timers.c linux-3.0.3/kernel/posix-cpu-timers.c
58329 --- linux-3.0.3/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
58330 +++ linux-3.0.3/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
58331 @@ -6,6 +6,7 @@
58332 #include <linux/posix-timers.h>
58333 #include <linux/errno.h>
58334 #include <linux/math64.h>
58335 +#include <linux/security.h>
58336 #include <asm/uaccess.h>
58337 #include <linux/kernel_stat.h>
58338 #include <trace/events/timer.h>
58339 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58340
58341 static __init int init_posix_cpu_timers(void)
58342 {
58343 - struct k_clock process = {
58344 + static struct k_clock process = {
58345 .clock_getres = process_cpu_clock_getres,
58346 .clock_get = process_cpu_clock_get,
58347 .timer_create = process_cpu_timer_create,
58348 .nsleep = process_cpu_nsleep,
58349 .nsleep_restart = process_cpu_nsleep_restart,
58350 };
58351 - struct k_clock thread = {
58352 + static struct k_clock thread = {
58353 .clock_getres = thread_cpu_clock_getres,
58354 .clock_get = thread_cpu_clock_get,
58355 .timer_create = thread_cpu_timer_create,
58356 diff -urNp linux-3.0.3/kernel/posix-timers.c linux-3.0.3/kernel/posix-timers.c
58357 --- linux-3.0.3/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
58358 +++ linux-3.0.3/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
58359 @@ -43,6 +43,7 @@
58360 #include <linux/idr.h>
58361 #include <linux/posix-clock.h>
58362 #include <linux/posix-timers.h>
58363 +#include <linux/grsecurity.h>
58364 #include <linux/syscalls.h>
58365 #include <linux/wait.h>
58366 #include <linux/workqueue.h>
58367 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58368 * which we beg off on and pass to do_sys_settimeofday().
58369 */
58370
58371 -static struct k_clock posix_clocks[MAX_CLOCKS];
58372 +static struct k_clock *posix_clocks[MAX_CLOCKS];
58373
58374 /*
58375 * These ones are defined below.
58376 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58377 */
58378 static __init int init_posix_timers(void)
58379 {
58380 - struct k_clock clock_realtime = {
58381 + static struct k_clock clock_realtime = {
58382 .clock_getres = hrtimer_get_res,
58383 .clock_get = posix_clock_realtime_get,
58384 .clock_set = posix_clock_realtime_set,
58385 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58386 .timer_get = common_timer_get,
58387 .timer_del = common_timer_del,
58388 };
58389 - struct k_clock clock_monotonic = {
58390 + static struct k_clock clock_monotonic = {
58391 .clock_getres = hrtimer_get_res,
58392 .clock_get = posix_ktime_get_ts,
58393 .nsleep = common_nsleep,
58394 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58395 .timer_get = common_timer_get,
58396 .timer_del = common_timer_del,
58397 };
58398 - struct k_clock clock_monotonic_raw = {
58399 + static struct k_clock clock_monotonic_raw = {
58400 .clock_getres = hrtimer_get_res,
58401 .clock_get = posix_get_monotonic_raw,
58402 };
58403 - struct k_clock clock_realtime_coarse = {
58404 + static struct k_clock clock_realtime_coarse = {
58405 .clock_getres = posix_get_coarse_res,
58406 .clock_get = posix_get_realtime_coarse,
58407 };
58408 - struct k_clock clock_monotonic_coarse = {
58409 + static struct k_clock clock_monotonic_coarse = {
58410 .clock_getres = posix_get_coarse_res,
58411 .clock_get = posix_get_monotonic_coarse,
58412 };
58413 - struct k_clock clock_boottime = {
58414 + static struct k_clock clock_boottime = {
58415 .clock_getres = hrtimer_get_res,
58416 .clock_get = posix_get_boottime,
58417 .nsleep = common_nsleep,
58418 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58419 .timer_del = common_timer_del,
58420 };
58421
58422 + pax_track_stack();
58423 +
58424 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58425 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58426 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58427 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58428 return;
58429 }
58430
58431 - posix_clocks[clock_id] = *new_clock;
58432 + posix_clocks[clock_id] = new_clock;
58433 }
58434 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58435
58436 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
58437 return (id & CLOCKFD_MASK) == CLOCKFD ?
58438 &clock_posix_dynamic : &clock_posix_cpu;
58439
58440 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58441 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58442 return NULL;
58443 - return &posix_clocks[id];
58444 + return posix_clocks[id];
58445 }
58446
58447 static int common_timer_create(struct k_itimer *new_timer)
58448 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58449 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58450 return -EFAULT;
58451
58452 + /* only the CLOCK_REALTIME clock can be set, all other clocks
58453 + have their clock_set fptr set to a nosettime dummy function
58454 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58455 + call common_clock_set, which calls do_sys_settimeofday, which
58456 + we hook
58457 + */
58458 +
58459 return kc->clock_set(which_clock, &new_tp);
58460 }
58461
58462 diff -urNp linux-3.0.3/kernel/power/poweroff.c linux-3.0.3/kernel/power/poweroff.c
58463 --- linux-3.0.3/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
58464 +++ linux-3.0.3/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
58465 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58466 .enable_mask = SYSRQ_ENABLE_BOOT,
58467 };
58468
58469 -static int pm_sysrq_init(void)
58470 +static int __init pm_sysrq_init(void)
58471 {
58472 register_sysrq_key('o', &sysrq_poweroff_op);
58473 return 0;
58474 diff -urNp linux-3.0.3/kernel/power/process.c linux-3.0.3/kernel/power/process.c
58475 --- linux-3.0.3/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
58476 +++ linux-3.0.3/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
58477 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58478 u64 elapsed_csecs64;
58479 unsigned int elapsed_csecs;
58480 bool wakeup = false;
58481 + bool timedout = false;
58482
58483 do_gettimeofday(&start);
58484
58485 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58486
58487 while (true) {
58488 todo = 0;
58489 + if (time_after(jiffies, end_time))
58490 + timedout = true;
58491 read_lock(&tasklist_lock);
58492 do_each_thread(g, p) {
58493 if (frozen(p) || !freezable(p))
58494 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58495 * try_to_stop() after schedule() in ptrace/signal
58496 * stop sees TIF_FREEZE.
58497 */
58498 - if (!task_is_stopped_or_traced(p) &&
58499 - !freezer_should_skip(p))
58500 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58501 todo++;
58502 + if (timedout) {
58503 + printk(KERN_ERR "Task refusing to freeze:\n");
58504 + sched_show_task(p);
58505 + }
58506 + }
58507 } while_each_thread(g, p);
58508 read_unlock(&tasklist_lock);
58509
58510 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58511 todo += wq_busy;
58512 }
58513
58514 - if (!todo || time_after(jiffies, end_time))
58515 + if (!todo || timedout)
58516 break;
58517
58518 if (pm_wakeup_pending()) {
58519 diff -urNp linux-3.0.3/kernel/printk.c linux-3.0.3/kernel/printk.c
58520 --- linux-3.0.3/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
58521 +++ linux-3.0.3/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
58522 @@ -313,12 +313,17 @@ static int check_syslog_permissions(int
58523 if (from_file && type != SYSLOG_ACTION_OPEN)
58524 return 0;
58525
58526 +#ifdef CONFIG_GRKERNSEC_DMESG
58527 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58528 + return -EPERM;
58529 +#endif
58530 +
58531 if (syslog_action_restricted(type)) {
58532 if (capable(CAP_SYSLOG))
58533 return 0;
58534 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58535 if (capable(CAP_SYS_ADMIN)) {
58536 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58537 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58538 "but no CAP_SYSLOG (deprecated).\n");
58539 return 0;
58540 }
58541 diff -urNp linux-3.0.3/kernel/profile.c linux-3.0.3/kernel/profile.c
58542 --- linux-3.0.3/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
58543 +++ linux-3.0.3/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
58544 @@ -39,7 +39,7 @@ struct profile_hit {
58545 /* Oprofile timer tick hook */
58546 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58547
58548 -static atomic_t *prof_buffer;
58549 +static atomic_unchecked_t *prof_buffer;
58550 static unsigned long prof_len, prof_shift;
58551
58552 int prof_on __read_mostly;
58553 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
58554 hits[i].pc = 0;
58555 continue;
58556 }
58557 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58558 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58559 hits[i].hits = hits[i].pc = 0;
58560 }
58561 }
58562 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
58563 * Add the current hit(s) and flush the write-queue out
58564 * to the global buffer:
58565 */
58566 - atomic_add(nr_hits, &prof_buffer[pc]);
58567 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58568 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58569 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58570 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58571 hits[i].pc = hits[i].hits = 0;
58572 }
58573 out:
58574 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
58575 {
58576 unsigned long pc;
58577 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58578 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58579 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58580 }
58581 #endif /* !CONFIG_SMP */
58582
58583 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58584 return -EFAULT;
58585 buf++; p++; count--; read++;
58586 }
58587 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58588 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58589 if (copy_to_user(buf, (void *)pnt, count))
58590 return -EFAULT;
58591 read += count;
58592 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58593 }
58594 #endif
58595 profile_discard_flip_buffers();
58596 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58597 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58598 return count;
58599 }
58600
58601 diff -urNp linux-3.0.3/kernel/ptrace.c linux-3.0.3/kernel/ptrace.c
58602 --- linux-3.0.3/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
58603 +++ linux-3.0.3/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
58604 @@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
58605 return ret;
58606 }
58607
58608 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58609 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58610 + unsigned int log)
58611 {
58612 const struct cred *cred = current_cred(), *tcred;
58613
58614 @@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
58615 cred->gid == tcred->sgid &&
58616 cred->gid == tcred->gid))
58617 goto ok;
58618 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58619 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58620 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58621 goto ok;
58622 rcu_read_unlock();
58623 return -EPERM;
58624 @@ -167,7 +169,9 @@ ok:
58625 smp_rmb();
58626 if (task->mm)
58627 dumpable = get_dumpable(task->mm);
58628 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58629 + if (!dumpable &&
58630 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58631 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58632 return -EPERM;
58633
58634 return security_ptrace_access_check(task, mode);
58635 @@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
58636 {
58637 int err;
58638 task_lock(task);
58639 - err = __ptrace_may_access(task, mode);
58640 + err = __ptrace_may_access(task, mode, 0);
58641 + task_unlock(task);
58642 + return !err;
58643 +}
58644 +
58645 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58646 +{
58647 + int err;
58648 + task_lock(task);
58649 + err = __ptrace_may_access(task, mode, 1);
58650 task_unlock(task);
58651 return !err;
58652 }
58653 @@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
58654 goto out;
58655
58656 task_lock(task);
58657 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58658 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58659 task_unlock(task);
58660 if (retval)
58661 goto unlock_creds;
58662 @@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
58663 goto unlock_tasklist;
58664
58665 task->ptrace = PT_PTRACED;
58666 - if (task_ns_capable(task, CAP_SYS_PTRACE))
58667 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58668 task->ptrace |= PT_PTRACE_CAP;
58669
58670 __ptrace_link(task, current);
58671 @@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
58672 {
58673 int copied = 0;
58674
58675 + pax_track_stack();
58676 +
58677 while (len > 0) {
58678 char buf[128];
58679 int this_len, retval;
58680 @@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
58681 break;
58682 return -EIO;
58683 }
58684 - if (copy_to_user(dst, buf, retval))
58685 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
58686 return -EFAULT;
58687 copied += retval;
58688 src += retval;
58689 @@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
58690 {
58691 int copied = 0;
58692
58693 + pax_track_stack();
58694 +
58695 while (len > 0) {
58696 char buf[128];
58697 int this_len, retval;
58698 @@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
58699 {
58700 int ret = -EIO;
58701 siginfo_t siginfo;
58702 - void __user *datavp = (void __user *) data;
58703 + void __user *datavp = (__force void __user *) data;
58704 unsigned long __user *datalp = datavp;
58705
58706 + pax_track_stack();
58707 +
58708 switch (request) {
58709 case PTRACE_PEEKTEXT:
58710 case PTRACE_PEEKDATA:
58711 @@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
58712 goto out;
58713 }
58714
58715 + if (gr_handle_ptrace(child, request)) {
58716 + ret = -EPERM;
58717 + goto out_put_task_struct;
58718 + }
58719 +
58720 if (request == PTRACE_ATTACH) {
58721 ret = ptrace_attach(child);
58722 /*
58723 * Some architectures need to do book-keeping after
58724 * a ptrace attach.
58725 */
58726 - if (!ret)
58727 + if (!ret) {
58728 arch_ptrace_attach(child);
58729 + gr_audit_ptrace(child);
58730 + }
58731 goto out_put_task_struct;
58732 }
58733
58734 @@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
58735 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
58736 if (copied != sizeof(tmp))
58737 return -EIO;
58738 - return put_user(tmp, (unsigned long __user *)data);
58739 + return put_user(tmp, (__force unsigned long __user *)data);
58740 }
58741
58742 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
58743 @@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
58744 siginfo_t siginfo;
58745 int ret;
58746
58747 + pax_track_stack();
58748 +
58749 switch (request) {
58750 case PTRACE_PEEKTEXT:
58751 case PTRACE_PEEKDATA:
58752 @@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
58753 goto out;
58754 }
58755
58756 + if (gr_handle_ptrace(child, request)) {
58757 + ret = -EPERM;
58758 + goto out_put_task_struct;
58759 + }
58760 +
58761 if (request == PTRACE_ATTACH) {
58762 ret = ptrace_attach(child);
58763 /*
58764 * Some architectures need to do book-keeping after
58765 * a ptrace attach.
58766 */
58767 - if (!ret)
58768 + if (!ret) {
58769 arch_ptrace_attach(child);
58770 + gr_audit_ptrace(child);
58771 + }
58772 goto out_put_task_struct;
58773 }
58774
58775 diff -urNp linux-3.0.3/kernel/rcutorture.c linux-3.0.3/kernel/rcutorture.c
58776 --- linux-3.0.3/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
58777 +++ linux-3.0.3/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
58778 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
58779 { 0 };
58780 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
58781 { 0 };
58782 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58783 -static atomic_t n_rcu_torture_alloc;
58784 -static atomic_t n_rcu_torture_alloc_fail;
58785 -static atomic_t n_rcu_torture_free;
58786 -static atomic_t n_rcu_torture_mberror;
58787 -static atomic_t n_rcu_torture_error;
58788 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58789 +static atomic_unchecked_t n_rcu_torture_alloc;
58790 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
58791 +static atomic_unchecked_t n_rcu_torture_free;
58792 +static atomic_unchecked_t n_rcu_torture_mberror;
58793 +static atomic_unchecked_t n_rcu_torture_error;
58794 static long n_rcu_torture_boost_ktrerror;
58795 static long n_rcu_torture_boost_rterror;
58796 static long n_rcu_torture_boost_failure;
58797 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
58798
58799 spin_lock_bh(&rcu_torture_lock);
58800 if (list_empty(&rcu_torture_freelist)) {
58801 - atomic_inc(&n_rcu_torture_alloc_fail);
58802 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
58803 spin_unlock_bh(&rcu_torture_lock);
58804 return NULL;
58805 }
58806 - atomic_inc(&n_rcu_torture_alloc);
58807 + atomic_inc_unchecked(&n_rcu_torture_alloc);
58808 p = rcu_torture_freelist.next;
58809 list_del_init(p);
58810 spin_unlock_bh(&rcu_torture_lock);
58811 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
58812 static void
58813 rcu_torture_free(struct rcu_torture *p)
58814 {
58815 - atomic_inc(&n_rcu_torture_free);
58816 + atomic_inc_unchecked(&n_rcu_torture_free);
58817 spin_lock_bh(&rcu_torture_lock);
58818 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
58819 spin_unlock_bh(&rcu_torture_lock);
58820 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
58821 i = rp->rtort_pipe_count;
58822 if (i > RCU_TORTURE_PIPE_LEN)
58823 i = RCU_TORTURE_PIPE_LEN;
58824 - atomic_inc(&rcu_torture_wcount[i]);
58825 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58826 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58827 rp->rtort_mbtest = 0;
58828 rcu_torture_free(rp);
58829 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
58830 i = rp->rtort_pipe_count;
58831 if (i > RCU_TORTURE_PIPE_LEN)
58832 i = RCU_TORTURE_PIPE_LEN;
58833 - atomic_inc(&rcu_torture_wcount[i]);
58834 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58835 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58836 rp->rtort_mbtest = 0;
58837 list_del(&rp->rtort_free);
58838 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
58839 i = old_rp->rtort_pipe_count;
58840 if (i > RCU_TORTURE_PIPE_LEN)
58841 i = RCU_TORTURE_PIPE_LEN;
58842 - atomic_inc(&rcu_torture_wcount[i]);
58843 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58844 old_rp->rtort_pipe_count++;
58845 cur_ops->deferred_free(old_rp);
58846 }
58847 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
58848 return;
58849 }
58850 if (p->rtort_mbtest == 0)
58851 - atomic_inc(&n_rcu_torture_mberror);
58852 + atomic_inc_unchecked(&n_rcu_torture_mberror);
58853 spin_lock(&rand_lock);
58854 cur_ops->read_delay(&rand);
58855 n_rcu_torture_timers++;
58856 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
58857 continue;
58858 }
58859 if (p->rtort_mbtest == 0)
58860 - atomic_inc(&n_rcu_torture_mberror);
58861 + atomic_inc_unchecked(&n_rcu_torture_mberror);
58862 cur_ops->read_delay(&rand);
58863 preempt_disable();
58864 pipe_count = p->rtort_pipe_count;
58865 @@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
58866 rcu_torture_current,
58867 rcu_torture_current_version,
58868 list_empty(&rcu_torture_freelist),
58869 - atomic_read(&n_rcu_torture_alloc),
58870 - atomic_read(&n_rcu_torture_alloc_fail),
58871 - atomic_read(&n_rcu_torture_free),
58872 - atomic_read(&n_rcu_torture_mberror),
58873 + atomic_read_unchecked(&n_rcu_torture_alloc),
58874 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
58875 + atomic_read_unchecked(&n_rcu_torture_free),
58876 + atomic_read_unchecked(&n_rcu_torture_mberror),
58877 n_rcu_torture_boost_ktrerror,
58878 n_rcu_torture_boost_rterror,
58879 n_rcu_torture_boost_failure,
58880 n_rcu_torture_boosts,
58881 n_rcu_torture_timers);
58882 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
58883 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
58884 n_rcu_torture_boost_ktrerror != 0 ||
58885 n_rcu_torture_boost_rterror != 0 ||
58886 n_rcu_torture_boost_failure != 0)
58887 @@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
58888 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
58889 if (i > 1) {
58890 cnt += sprintf(&page[cnt], "!!! ");
58891 - atomic_inc(&n_rcu_torture_error);
58892 + atomic_inc_unchecked(&n_rcu_torture_error);
58893 WARN_ON_ONCE(1);
58894 }
58895 cnt += sprintf(&page[cnt], "Reader Pipe: ");
58896 @@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
58897 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
58898 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58899 cnt += sprintf(&page[cnt], " %d",
58900 - atomic_read(&rcu_torture_wcount[i]));
58901 + atomic_read_unchecked(&rcu_torture_wcount[i]));
58902 }
58903 cnt += sprintf(&page[cnt], "\n");
58904 if (cur_ops->stats)
58905 @@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
58906
58907 if (cur_ops->cleanup)
58908 cur_ops->cleanup();
58909 - if (atomic_read(&n_rcu_torture_error))
58910 + if (atomic_read_unchecked(&n_rcu_torture_error))
58911 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
58912 else
58913 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
58914 @@ -1476,17 +1476,17 @@ rcu_torture_init(void)
58915
58916 rcu_torture_current = NULL;
58917 rcu_torture_current_version = 0;
58918 - atomic_set(&n_rcu_torture_alloc, 0);
58919 - atomic_set(&n_rcu_torture_alloc_fail, 0);
58920 - atomic_set(&n_rcu_torture_free, 0);
58921 - atomic_set(&n_rcu_torture_mberror, 0);
58922 - atomic_set(&n_rcu_torture_error, 0);
58923 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
58924 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
58925 + atomic_set_unchecked(&n_rcu_torture_free, 0);
58926 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
58927 + atomic_set_unchecked(&n_rcu_torture_error, 0);
58928 n_rcu_torture_boost_ktrerror = 0;
58929 n_rcu_torture_boost_rterror = 0;
58930 n_rcu_torture_boost_failure = 0;
58931 n_rcu_torture_boosts = 0;
58932 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
58933 - atomic_set(&rcu_torture_wcount[i], 0);
58934 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
58935 for_each_possible_cpu(cpu) {
58936 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58937 per_cpu(rcu_torture_count, cpu)[i] = 0;
58938 diff -urNp linux-3.0.3/kernel/rcutree.c linux-3.0.3/kernel/rcutree.c
58939 --- linux-3.0.3/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
58940 +++ linux-3.0.3/kernel/rcutree.c 2011-08-23 21:47:56.000000000 -0400
58941 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
58942 /*
58943 * Do softirq processing for the current CPU.
58944 */
58945 -static void rcu_process_callbacks(struct softirq_action *unused)
58946 +static void rcu_process_callbacks(void)
58947 {
58948 __rcu_process_callbacks(&rcu_sched_state,
58949 &__get_cpu_var(rcu_sched_data));
58950 diff -urNp linux-3.0.3/kernel/rcutree_plugin.h linux-3.0.3/kernel/rcutree_plugin.h
58951 --- linux-3.0.3/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
58952 +++ linux-3.0.3/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
58953 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
58954
58955 /* Clean up and exit. */
58956 smp_mb(); /* ensure expedited GP seen before counter increment. */
58957 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
58958 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
58959 unlock_mb_ret:
58960 mutex_unlock(&sync_rcu_preempt_exp_mutex);
58961 mb_ret:
58962 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
58963
58964 #else /* #ifndef CONFIG_SMP */
58965
58966 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
58967 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
58968 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
58969 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
58970
58971 static int synchronize_sched_expedited_cpu_stop(void *data)
58972 {
58973 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
58974 int firstsnap, s, snap, trycount = 0;
58975
58976 /* Note that atomic_inc_return() implies full memory barrier. */
58977 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
58978 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
58979 get_online_cpus();
58980
58981 /*
58982 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
58983 }
58984
58985 /* Check to see if someone else did our work for us. */
58986 - s = atomic_read(&sync_sched_expedited_done);
58987 + s = atomic_read_unchecked(&sync_sched_expedited_done);
58988 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
58989 smp_mb(); /* ensure test happens before caller kfree */
58990 return;
58991 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
58992 * grace period works for us.
58993 */
58994 get_online_cpus();
58995 - snap = atomic_read(&sync_sched_expedited_started) - 1;
58996 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
58997 smp_mb(); /* ensure read is before try_stop_cpus(). */
58998 }
58999
59000 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
59001 * than we did beat us to the punch.
59002 */
59003 do {
59004 - s = atomic_read(&sync_sched_expedited_done);
59005 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59006 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
59007 smp_mb(); /* ensure test happens before caller kfree */
59008 break;
59009 }
59010 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
59011 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
59012
59013 put_online_cpus();
59014 }
59015 diff -urNp linux-3.0.3/kernel/relay.c linux-3.0.3/kernel/relay.c
59016 --- linux-3.0.3/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
59017 +++ linux-3.0.3/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
59018 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
59019 };
59020 ssize_t ret;
59021
59022 + pax_track_stack();
59023 +
59024 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59025 return 0;
59026 if (splice_grow_spd(pipe, &spd))
59027 diff -urNp linux-3.0.3/kernel/resource.c linux-3.0.3/kernel/resource.c
59028 --- linux-3.0.3/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
59029 +++ linux-3.0.3/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
59030 @@ -141,8 +141,18 @@ static const struct file_operations proc
59031
59032 static int __init ioresources_init(void)
59033 {
59034 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59035 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59036 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59037 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59038 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59039 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59040 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59041 +#endif
59042 +#else
59043 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59044 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59045 +#endif
59046 return 0;
59047 }
59048 __initcall(ioresources_init);
59049 diff -urNp linux-3.0.3/kernel/rtmutex-tester.c linux-3.0.3/kernel/rtmutex-tester.c
59050 --- linux-3.0.3/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
59051 +++ linux-3.0.3/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
59052 @@ -20,7 +20,7 @@
59053 #define MAX_RT_TEST_MUTEXES 8
59054
59055 static spinlock_t rttest_lock;
59056 -static atomic_t rttest_event;
59057 +static atomic_unchecked_t rttest_event;
59058
59059 struct test_thread_data {
59060 int opcode;
59061 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59062
59063 case RTTEST_LOCKCONT:
59064 td->mutexes[td->opdata] = 1;
59065 - td->event = atomic_add_return(1, &rttest_event);
59066 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59067 return 0;
59068
59069 case RTTEST_RESET:
59070 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59071 return 0;
59072
59073 case RTTEST_RESETEVENT:
59074 - atomic_set(&rttest_event, 0);
59075 + atomic_set_unchecked(&rttest_event, 0);
59076 return 0;
59077
59078 default:
59079 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59080 return ret;
59081
59082 td->mutexes[id] = 1;
59083 - td->event = atomic_add_return(1, &rttest_event);
59084 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59085 rt_mutex_lock(&mutexes[id]);
59086 - td->event = atomic_add_return(1, &rttest_event);
59087 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59088 td->mutexes[id] = 4;
59089 return 0;
59090
59091 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59092 return ret;
59093
59094 td->mutexes[id] = 1;
59095 - td->event = atomic_add_return(1, &rttest_event);
59096 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59097 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59098 - td->event = atomic_add_return(1, &rttest_event);
59099 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59100 td->mutexes[id] = ret ? 0 : 4;
59101 return ret ? -EINTR : 0;
59102
59103 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59104 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59105 return ret;
59106
59107 - td->event = atomic_add_return(1, &rttest_event);
59108 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59109 rt_mutex_unlock(&mutexes[id]);
59110 - td->event = atomic_add_return(1, &rttest_event);
59111 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59112 td->mutexes[id] = 0;
59113 return 0;
59114
59115 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59116 break;
59117
59118 td->mutexes[dat] = 2;
59119 - td->event = atomic_add_return(1, &rttest_event);
59120 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59121 break;
59122
59123 default:
59124 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59125 return;
59126
59127 td->mutexes[dat] = 3;
59128 - td->event = atomic_add_return(1, &rttest_event);
59129 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59130 break;
59131
59132 case RTTEST_LOCKNOWAIT:
59133 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59134 return;
59135
59136 td->mutexes[dat] = 1;
59137 - td->event = atomic_add_return(1, &rttest_event);
59138 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59139 return;
59140
59141 default:
59142 diff -urNp linux-3.0.3/kernel/sched_autogroup.c linux-3.0.3/kernel/sched_autogroup.c
59143 --- linux-3.0.3/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
59144 +++ linux-3.0.3/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
59145 @@ -7,7 +7,7 @@
59146
59147 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59148 static struct autogroup autogroup_default;
59149 -static atomic_t autogroup_seq_nr;
59150 +static atomic_unchecked_t autogroup_seq_nr;
59151
59152 static void __init autogroup_init(struct task_struct *init_task)
59153 {
59154 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59155
59156 kref_init(&ag->kref);
59157 init_rwsem(&ag->lock);
59158 - ag->id = atomic_inc_return(&autogroup_seq_nr);
59159 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59160 ag->tg = tg;
59161 #ifdef CONFIG_RT_GROUP_SCHED
59162 /*
59163 diff -urNp linux-3.0.3/kernel/sched.c linux-3.0.3/kernel/sched.c
59164 --- linux-3.0.3/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
59165 +++ linux-3.0.3/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
59166 @@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
59167 struct rq *rq;
59168 int cpu;
59169
59170 + pax_track_stack();
59171 +
59172 need_resched:
59173 preempt_disable();
59174 cpu = smp_processor_id();
59175 @@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
59176 /* convert nice value [19,-20] to rlimit style value [1,40] */
59177 int nice_rlim = 20 - nice;
59178
59179 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59180 +
59181 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59182 capable(CAP_SYS_NICE));
59183 }
59184 @@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59185 if (nice > 19)
59186 nice = 19;
59187
59188 - if (increment < 0 && !can_nice(current, nice))
59189 + if (increment < 0 && (!can_nice(current, nice) ||
59190 + gr_handle_chroot_nice()))
59191 return -EPERM;
59192
59193 retval = security_task_setnice(current, nice);
59194 @@ -5111,6 +5116,7 @@ recheck:
59195 unsigned long rlim_rtprio =
59196 task_rlimit(p, RLIMIT_RTPRIO);
59197
59198 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59199 /* can't set/change the rt policy */
59200 if (policy != p->policy && !rlim_rtprio)
59201 return -EPERM;
59202 diff -urNp linux-3.0.3/kernel/sched_fair.c linux-3.0.3/kernel/sched_fair.c
59203 --- linux-3.0.3/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
59204 +++ linux-3.0.3/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
59205 @@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
59206 * run_rebalance_domains is triggered when needed from the scheduler tick.
59207 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59208 */
59209 -static void run_rebalance_domains(struct softirq_action *h)
59210 +static void run_rebalance_domains(void)
59211 {
59212 int this_cpu = smp_processor_id();
59213 struct rq *this_rq = cpu_rq(this_cpu);
59214 diff -urNp linux-3.0.3/kernel/signal.c linux-3.0.3/kernel/signal.c
59215 --- linux-3.0.3/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
59216 +++ linux-3.0.3/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
59217 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59218
59219 int print_fatal_signals __read_mostly;
59220
59221 -static void __user *sig_handler(struct task_struct *t, int sig)
59222 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
59223 {
59224 return t->sighand->action[sig - 1].sa.sa_handler;
59225 }
59226
59227 -static int sig_handler_ignored(void __user *handler, int sig)
59228 +static int sig_handler_ignored(__sighandler_t handler, int sig)
59229 {
59230 /* Is it explicitly or implicitly ignored? */
59231 return handler == SIG_IGN ||
59232 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59233 static int sig_task_ignored(struct task_struct *t, int sig,
59234 int from_ancestor_ns)
59235 {
59236 - void __user *handler;
59237 + __sighandler_t handler;
59238
59239 handler = sig_handler(t, sig);
59240
59241 @@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
59242 atomic_inc(&user->sigpending);
59243 rcu_read_unlock();
59244
59245 + if (!override_rlimit)
59246 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59247 +
59248 if (override_rlimit ||
59249 atomic_read(&user->sigpending) <=
59250 task_rlimit(t, RLIMIT_SIGPENDING)) {
59251 @@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
59252
59253 int unhandled_signal(struct task_struct *tsk, int sig)
59254 {
59255 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59256 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59257 if (is_global_init(tsk))
59258 return 1;
59259 if (handler != SIG_IGN && handler != SIG_DFL)
59260 @@ -770,6 +773,13 @@ static int check_kill_permission(int sig
59261 }
59262 }
59263
59264 + /* allow glibc communication via tgkill to other threads in our
59265 + thread group */
59266 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
59267 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
59268 + && gr_handle_signal(t, sig))
59269 + return -EPERM;
59270 +
59271 return security_task_kill(t, info, sig, 0);
59272 }
59273
59274 @@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
59275 return send_signal(sig, info, p, 1);
59276 }
59277
59278 -static int
59279 +int
59280 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59281 {
59282 return send_signal(sig, info, t, 0);
59283 @@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
59284 unsigned long int flags;
59285 int ret, blocked, ignored;
59286 struct k_sigaction *action;
59287 + int is_unhandled = 0;
59288
59289 spin_lock_irqsave(&t->sighand->siglock, flags);
59290 action = &t->sighand->action[sig-1];
59291 @@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
59292 }
59293 if (action->sa.sa_handler == SIG_DFL)
59294 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59295 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59296 + is_unhandled = 1;
59297 ret = specific_send_sig_info(sig, info, t);
59298 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59299
59300 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
59301 + normal operation */
59302 + if (is_unhandled) {
59303 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59304 + gr_handle_crash(t, sig);
59305 + }
59306 +
59307 return ret;
59308 }
59309
59310 @@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
59311 ret = check_kill_permission(sig, info, p);
59312 rcu_read_unlock();
59313
59314 - if (!ret && sig)
59315 + if (!ret && sig) {
59316 ret = do_send_sig_info(sig, info, p, true);
59317 + if (!ret)
59318 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59319 + }
59320
59321 return ret;
59322 }
59323 @@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
59324 {
59325 siginfo_t info;
59326
59327 + pax_track_stack();
59328 +
59329 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59330
59331 memset(&info, 0, sizeof info);
59332 @@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59333 int error = -ESRCH;
59334
59335 rcu_read_lock();
59336 - p = find_task_by_vpid(pid);
59337 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59338 + /* allow glibc communication via tgkill to other threads in our
59339 + thread group */
59340 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59341 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
59342 + p = find_task_by_vpid_unrestricted(pid);
59343 + else
59344 +#endif
59345 + p = find_task_by_vpid(pid);
59346 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59347 error = check_kill_permission(sig, info, p);
59348 /*
59349 diff -urNp linux-3.0.3/kernel/smp.c linux-3.0.3/kernel/smp.c
59350 --- linux-3.0.3/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
59351 +++ linux-3.0.3/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
59352 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
59353 }
59354 EXPORT_SYMBOL(smp_call_function);
59355
59356 -void ipi_call_lock(void)
59357 +void ipi_call_lock(void) __acquires(call_function.lock)
59358 {
59359 raw_spin_lock(&call_function.lock);
59360 }
59361
59362 -void ipi_call_unlock(void)
59363 +void ipi_call_unlock(void) __releases(call_function.lock)
59364 {
59365 raw_spin_unlock(&call_function.lock);
59366 }
59367
59368 -void ipi_call_lock_irq(void)
59369 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
59370 {
59371 raw_spin_lock_irq(&call_function.lock);
59372 }
59373
59374 -void ipi_call_unlock_irq(void)
59375 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
59376 {
59377 raw_spin_unlock_irq(&call_function.lock);
59378 }
59379 diff -urNp linux-3.0.3/kernel/softirq.c linux-3.0.3/kernel/softirq.c
59380 --- linux-3.0.3/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
59381 +++ linux-3.0.3/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
59382 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59383
59384 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59385
59386 -char *softirq_to_name[NR_SOFTIRQS] = {
59387 +const char * const softirq_to_name[NR_SOFTIRQS] = {
59388 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59389 "TASKLET", "SCHED", "HRTIMER", "RCU"
59390 };
59391 @@ -235,7 +235,7 @@ restart:
59392 kstat_incr_softirqs_this_cpu(vec_nr);
59393
59394 trace_softirq_entry(vec_nr);
59395 - h->action(h);
59396 + h->action();
59397 trace_softirq_exit(vec_nr);
59398 if (unlikely(prev_count != preempt_count())) {
59399 printk(KERN_ERR "huh, entered softirq %u %s %p"
59400 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
59401 local_irq_restore(flags);
59402 }
59403
59404 -void open_softirq(int nr, void (*action)(struct softirq_action *))
59405 +void open_softirq(int nr, void (*action)(void))
59406 {
59407 - softirq_vec[nr].action = action;
59408 + pax_open_kernel();
59409 + *(void **)&softirq_vec[nr].action = action;
59410 + pax_close_kernel();
59411 }
59412
59413 /*
59414 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
59415
59416 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59417
59418 -static void tasklet_action(struct softirq_action *a)
59419 +static void tasklet_action(void)
59420 {
59421 struct tasklet_struct *list;
59422
59423 @@ -476,7 +478,7 @@ static void tasklet_action(struct softir
59424 }
59425 }
59426
59427 -static void tasklet_hi_action(struct softirq_action *a)
59428 +static void tasklet_hi_action(void)
59429 {
59430 struct tasklet_struct *list;
59431
59432 diff -urNp linux-3.0.3/kernel/sys.c linux-3.0.3/kernel/sys.c
59433 --- linux-3.0.3/kernel/sys.c 2011-07-21 22:17:23.000000000 -0400
59434 +++ linux-3.0.3/kernel/sys.c 2011-08-25 17:24:58.000000000 -0400
59435 @@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59436 error = -EACCES;
59437 goto out;
59438 }
59439 +
59440 + if (gr_handle_chroot_setpriority(p, niceval)) {
59441 + error = -EACCES;
59442 + goto out;
59443 + }
59444 +
59445 no_nice = security_task_setnice(p, niceval);
59446 if (no_nice) {
59447 error = no_nice;
59448 @@ -537,6 +543,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59449 goto error;
59450 }
59451
59452 + if (gr_check_group_change(new->gid, new->egid, -1))
59453 + goto error;
59454 +
59455 if (rgid != (gid_t) -1 ||
59456 (egid != (gid_t) -1 && egid != old->gid))
59457 new->sgid = new->egid;
59458 @@ -566,6 +575,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59459 old = current_cred();
59460
59461 retval = -EPERM;
59462 +
59463 + if (gr_check_group_change(gid, gid, gid))
59464 + goto error;
59465 +
59466 if (nsown_capable(CAP_SETGID))
59467 new->gid = new->egid = new->sgid = new->fsgid = gid;
59468 else if (gid == old->gid || gid == old->sgid)
59469 @@ -591,11 +604,18 @@ static int set_user(struct cred *new)
59470 if (!new_user)
59471 return -EAGAIN;
59472
59473 + /*
59474 + * We don't fail in case of NPROC limit excess here because too many
59475 + * poorly written programs don't check set*uid() return code, assuming
59476 + * it never fails if called by root. We may still enforce NPROC limit
59477 + * for programs doing set*uid()+execve() by harmlessly deferring the
59478 + * failure to the execve() stage.
59479 + */
59480 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
59481 - new_user != INIT_USER) {
59482 - free_uid(new_user);
59483 - return -EAGAIN;
59484 - }
59485 + new_user != INIT_USER)
59486 + current->flags |= PF_NPROC_EXCEEDED;
59487 + else
59488 + current->flags &= ~PF_NPROC_EXCEEDED;
59489
59490 free_uid(new->user);
59491 new->user = new_user;
59492 @@ -646,6 +666,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59493 goto error;
59494 }
59495
59496 + if (gr_check_user_change(new->uid, new->euid, -1))
59497 + goto error;
59498 +
59499 if (new->uid != old->uid) {
59500 retval = set_user(new);
59501 if (retval < 0)
59502 @@ -690,6 +713,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59503 old = current_cred();
59504
59505 retval = -EPERM;
59506 +
59507 + if (gr_check_crash_uid(uid))
59508 + goto error;
59509 + if (gr_check_user_change(uid, uid, uid))
59510 + goto error;
59511 +
59512 if (nsown_capable(CAP_SETUID)) {
59513 new->suid = new->uid = uid;
59514 if (uid != old->uid) {
59515 @@ -744,6 +773,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59516 goto error;
59517 }
59518
59519 + if (gr_check_user_change(ruid, euid, -1))
59520 + goto error;
59521 +
59522 if (ruid != (uid_t) -1) {
59523 new->uid = ruid;
59524 if (ruid != old->uid) {
59525 @@ -808,6 +840,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59526 goto error;
59527 }
59528
59529 + if (gr_check_group_change(rgid, egid, -1))
59530 + goto error;
59531 +
59532 if (rgid != (gid_t) -1)
59533 new->gid = rgid;
59534 if (egid != (gid_t) -1)
59535 @@ -854,6 +889,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59536 old = current_cred();
59537 old_fsuid = old->fsuid;
59538
59539 + if (gr_check_user_change(-1, -1, uid))
59540 + goto error;
59541 +
59542 if (uid == old->uid || uid == old->euid ||
59543 uid == old->suid || uid == old->fsuid ||
59544 nsown_capable(CAP_SETUID)) {
59545 @@ -864,6 +902,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59546 }
59547 }
59548
59549 +error:
59550 abort_creds(new);
59551 return old_fsuid;
59552
59553 @@ -890,12 +929,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59554 if (gid == old->gid || gid == old->egid ||
59555 gid == old->sgid || gid == old->fsgid ||
59556 nsown_capable(CAP_SETGID)) {
59557 + if (gr_check_group_change(-1, -1, gid))
59558 + goto error;
59559 +
59560 if (gid != old_fsgid) {
59561 new->fsgid = gid;
59562 goto change_okay;
59563 }
59564 }
59565
59566 +error:
59567 abort_creds(new);
59568 return old_fsgid;
59569
59570 @@ -1642,7 +1685,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59571 error = get_dumpable(me->mm);
59572 break;
59573 case PR_SET_DUMPABLE:
59574 - if (arg2 < 0 || arg2 > 1) {
59575 + if (arg2 > 1) {
59576 error = -EINVAL;
59577 break;
59578 }
59579 diff -urNp linux-3.0.3/kernel/sysctl.c linux-3.0.3/kernel/sysctl.c
59580 --- linux-3.0.3/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
59581 +++ linux-3.0.3/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
59582 @@ -85,6 +85,13 @@
59583
59584
59585 #if defined(CONFIG_SYSCTL)
59586 +#include <linux/grsecurity.h>
59587 +#include <linux/grinternal.h>
59588 +
59589 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59590 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59591 + const int op);
59592 +extern int gr_handle_chroot_sysctl(const int op);
59593
59594 /* External variables not in a header file. */
59595 extern int sysctl_overcommit_memory;
59596 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59597 }
59598
59599 #endif
59600 +extern struct ctl_table grsecurity_table[];
59601
59602 static struct ctl_table root_table[];
59603 static struct ctl_table_root sysctl_table_root;
59604 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
59605 int sysctl_legacy_va_layout;
59606 #endif
59607
59608 +#ifdef CONFIG_PAX_SOFTMODE
59609 +static ctl_table pax_table[] = {
59610 + {
59611 + .procname = "softmode",
59612 + .data = &pax_softmode,
59613 + .maxlen = sizeof(unsigned int),
59614 + .mode = 0600,
59615 + .proc_handler = &proc_dointvec,
59616 + },
59617 +
59618 + { }
59619 +};
59620 +#endif
59621 +
59622 /* The default sysctl tables: */
59623
59624 static struct ctl_table root_table[] = {
59625 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
59626 #endif
59627
59628 static struct ctl_table kern_table[] = {
59629 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59630 + {
59631 + .procname = "grsecurity",
59632 + .mode = 0500,
59633 + .child = grsecurity_table,
59634 + },
59635 +#endif
59636 +
59637 +#ifdef CONFIG_PAX_SOFTMODE
59638 + {
59639 + .procname = "pax",
59640 + .mode = 0500,
59641 + .child = pax_table,
59642 + },
59643 +#endif
59644 +
59645 {
59646 .procname = "sched_child_runs_first",
59647 .data = &sysctl_sched_child_runs_first,
59648 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
59649 .data = &modprobe_path,
59650 .maxlen = KMOD_PATH_LEN,
59651 .mode = 0644,
59652 - .proc_handler = proc_dostring,
59653 + .proc_handler = proc_dostring_modpriv,
59654 },
59655 {
59656 .procname = "modules_disabled",
59657 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
59658 .extra1 = &zero,
59659 .extra2 = &one,
59660 },
59661 +#endif
59662 {
59663 .procname = "kptr_restrict",
59664 .data = &kptr_restrict,
59665 .maxlen = sizeof(int),
59666 .mode = 0644,
59667 .proc_handler = proc_dmesg_restrict,
59668 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59669 + .extra1 = &two,
59670 +#else
59671 .extra1 = &zero,
59672 +#endif
59673 .extra2 = &two,
59674 },
59675 -#endif
59676 {
59677 .procname = "ngroups_max",
59678 .data = &ngroups_max,
59679 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
59680 .proc_handler = proc_dointvec_minmax,
59681 .extra1 = &zero,
59682 },
59683 + {
59684 + .procname = "heap_stack_gap",
59685 + .data = &sysctl_heap_stack_gap,
59686 + .maxlen = sizeof(sysctl_heap_stack_gap),
59687 + .mode = 0644,
59688 + .proc_handler = proc_doulongvec_minmax,
59689 + },
59690 #else
59691 {
59692 .procname = "nr_trim_pages",
59693 @@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
59694 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
59695 {
59696 int mode;
59697 + int error;
59698 +
59699 + if (table->parent != NULL && table->parent->procname != NULL &&
59700 + table->procname != NULL &&
59701 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
59702 + return -EACCES;
59703 + if (gr_handle_chroot_sysctl(op))
59704 + return -EACCES;
59705 + error = gr_handle_sysctl(table, op);
59706 + if (error)
59707 + return error;
59708
59709 if (root->permissions)
59710 mode = root->permissions(root, current->nsproxy, table);
59711 @@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
59712 buffer, lenp, ppos);
59713 }
59714
59715 +int proc_dostring_modpriv(struct ctl_table *table, int write,
59716 + void __user *buffer, size_t *lenp, loff_t *ppos)
59717 +{
59718 + if (write && !capable(CAP_SYS_MODULE))
59719 + return -EPERM;
59720 +
59721 + return _proc_do_string(table->data, table->maxlen, write,
59722 + buffer, lenp, ppos);
59723 +}
59724 +
59725 static size_t proc_skip_spaces(char **buf)
59726 {
59727 size_t ret;
59728 @@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
59729 len = strlen(tmp);
59730 if (len > *size)
59731 len = *size;
59732 + if (len > sizeof(tmp))
59733 + len = sizeof(tmp);
59734 if (copy_to_user(*buf, tmp, len))
59735 return -EFAULT;
59736 *size -= len;
59737 @@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
59738 *i = val;
59739 } else {
59740 val = convdiv * (*i) / convmul;
59741 - if (!first)
59742 + if (!first) {
59743 err = proc_put_char(&buffer, &left, '\t');
59744 + if (err)
59745 + break;
59746 + }
59747 err = proc_put_long(&buffer, &left, val, false);
59748 if (err)
59749 break;
59750 @@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
59751 return -ENOSYS;
59752 }
59753
59754 +int proc_dostring_modpriv(struct ctl_table *table, int write,
59755 + void __user *buffer, size_t *lenp, loff_t *ppos)
59756 +{
59757 + return -ENOSYS;
59758 +}
59759 +
59760 int proc_dointvec(struct ctl_table *table, int write,
59761 void __user *buffer, size_t *lenp, loff_t *ppos)
59762 {
59763 @@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
59764 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
59765 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
59766 EXPORT_SYMBOL(proc_dostring);
59767 +EXPORT_SYMBOL(proc_dostring_modpriv);
59768 EXPORT_SYMBOL(proc_doulongvec_minmax);
59769 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
59770 EXPORT_SYMBOL(register_sysctl_table);
59771 diff -urNp linux-3.0.3/kernel/sysctl_check.c linux-3.0.3/kernel/sysctl_check.c
59772 --- linux-3.0.3/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
59773 +++ linux-3.0.3/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
59774 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
59775 set_fail(&fail, table, "Directory with extra2");
59776 } else {
59777 if ((table->proc_handler == proc_dostring) ||
59778 + (table->proc_handler == proc_dostring_modpriv) ||
59779 (table->proc_handler == proc_dointvec) ||
59780 (table->proc_handler == proc_dointvec_minmax) ||
59781 (table->proc_handler == proc_dointvec_jiffies) ||
59782 diff -urNp linux-3.0.3/kernel/taskstats.c linux-3.0.3/kernel/taskstats.c
59783 --- linux-3.0.3/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
59784 +++ linux-3.0.3/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
59785 @@ -27,9 +27,12 @@
59786 #include <linux/cgroup.h>
59787 #include <linux/fs.h>
59788 #include <linux/file.h>
59789 +#include <linux/grsecurity.h>
59790 #include <net/genetlink.h>
59791 #include <asm/atomic.h>
59792
59793 +extern int gr_is_taskstats_denied(int pid);
59794 +
59795 /*
59796 * Maximum length of a cpumask that can be specified in
59797 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
59798 @@ -558,6 +561,9 @@ err:
59799
59800 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
59801 {
59802 + if (gr_is_taskstats_denied(current->pid))
59803 + return -EACCES;
59804 +
59805 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
59806 return cmd_attr_register_cpumask(info);
59807 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
59808 diff -urNp linux-3.0.3/kernel/time/alarmtimer.c linux-3.0.3/kernel/time/alarmtimer.c
59809 --- linux-3.0.3/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
59810 +++ linux-3.0.3/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
59811 @@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
59812 {
59813 int error = 0;
59814 int i;
59815 - struct k_clock alarm_clock = {
59816 + static struct k_clock alarm_clock = {
59817 .clock_getres = alarm_clock_getres,
59818 .clock_get = alarm_clock_get,
59819 .timer_create = alarm_timer_create,
59820 diff -urNp linux-3.0.3/kernel/time/tick-broadcast.c linux-3.0.3/kernel/time/tick-broadcast.c
59821 --- linux-3.0.3/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
59822 +++ linux-3.0.3/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
59823 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
59824 * then clear the broadcast bit.
59825 */
59826 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
59827 - int cpu = smp_processor_id();
59828 + cpu = smp_processor_id();
59829
59830 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
59831 tick_broadcast_clear_oneshot(cpu);
59832 diff -urNp linux-3.0.3/kernel/time/timekeeping.c linux-3.0.3/kernel/time/timekeeping.c
59833 --- linux-3.0.3/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
59834 +++ linux-3.0.3/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
59835 @@ -14,6 +14,7 @@
59836 #include <linux/init.h>
59837 #include <linux/mm.h>
59838 #include <linux/sched.h>
59839 +#include <linux/grsecurity.h>
59840 #include <linux/syscore_ops.h>
59841 #include <linux/clocksource.h>
59842 #include <linux/jiffies.h>
59843 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
59844 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
59845 return -EINVAL;
59846
59847 + gr_log_timechange();
59848 +
59849 write_seqlock_irqsave(&xtime_lock, flags);
59850
59851 timekeeping_forward_now();
59852 diff -urNp linux-3.0.3/kernel/time/timer_list.c linux-3.0.3/kernel/time/timer_list.c
59853 --- linux-3.0.3/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
59854 +++ linux-3.0.3/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
59855 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
59856
59857 static void print_name_offset(struct seq_file *m, void *sym)
59858 {
59859 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59860 + SEQ_printf(m, "<%p>", NULL);
59861 +#else
59862 char symname[KSYM_NAME_LEN];
59863
59864 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
59865 SEQ_printf(m, "<%pK>", sym);
59866 else
59867 SEQ_printf(m, "%s", symname);
59868 +#endif
59869 }
59870
59871 static void
59872 @@ -112,7 +116,11 @@ next_one:
59873 static void
59874 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
59875 {
59876 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59877 + SEQ_printf(m, " .base: %p\n", NULL);
59878 +#else
59879 SEQ_printf(m, " .base: %pK\n", base);
59880 +#endif
59881 SEQ_printf(m, " .index: %d\n",
59882 base->index);
59883 SEQ_printf(m, " .resolution: %Lu nsecs\n",
59884 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
59885 {
59886 struct proc_dir_entry *pe;
59887
59888 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59889 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
59890 +#else
59891 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
59892 +#endif
59893 if (!pe)
59894 return -ENOMEM;
59895 return 0;
59896 diff -urNp linux-3.0.3/kernel/time/timer_stats.c linux-3.0.3/kernel/time/timer_stats.c
59897 --- linux-3.0.3/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
59898 +++ linux-3.0.3/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
59899 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
59900 static unsigned long nr_entries;
59901 static struct entry entries[MAX_ENTRIES];
59902
59903 -static atomic_t overflow_count;
59904 +static atomic_unchecked_t overflow_count;
59905
59906 /*
59907 * The entries are in a hash-table, for fast lookup:
59908 @@ -140,7 +140,7 @@ static void reset_entries(void)
59909 nr_entries = 0;
59910 memset(entries, 0, sizeof(entries));
59911 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
59912 - atomic_set(&overflow_count, 0);
59913 + atomic_set_unchecked(&overflow_count, 0);
59914 }
59915
59916 static struct entry *alloc_entry(void)
59917 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
59918 if (likely(entry))
59919 entry->count++;
59920 else
59921 - atomic_inc(&overflow_count);
59922 + atomic_inc_unchecked(&overflow_count);
59923
59924 out_unlock:
59925 raw_spin_unlock_irqrestore(lock, flags);
59926 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
59927
59928 static void print_name_offset(struct seq_file *m, unsigned long addr)
59929 {
59930 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59931 + seq_printf(m, "<%p>", NULL);
59932 +#else
59933 char symname[KSYM_NAME_LEN];
59934
59935 if (lookup_symbol_name(addr, symname) < 0)
59936 seq_printf(m, "<%p>", (void *)addr);
59937 else
59938 seq_printf(m, "%s", symname);
59939 +#endif
59940 }
59941
59942 static int tstats_show(struct seq_file *m, void *v)
59943 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
59944
59945 seq_puts(m, "Timer Stats Version: v0.2\n");
59946 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
59947 - if (atomic_read(&overflow_count))
59948 + if (atomic_read_unchecked(&overflow_count))
59949 seq_printf(m, "Overflow: %d entries\n",
59950 - atomic_read(&overflow_count));
59951 + atomic_read_unchecked(&overflow_count));
59952
59953 for (i = 0; i < nr_entries; i++) {
59954 entry = entries + i;
59955 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
59956 {
59957 struct proc_dir_entry *pe;
59958
59959 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59960 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
59961 +#else
59962 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
59963 +#endif
59964 if (!pe)
59965 return -ENOMEM;
59966 return 0;
59967 diff -urNp linux-3.0.3/kernel/time.c linux-3.0.3/kernel/time.c
59968 --- linux-3.0.3/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
59969 +++ linux-3.0.3/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
59970 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
59971 return error;
59972
59973 if (tz) {
59974 + /* we log in do_settimeofday called below, so don't log twice
59975 + */
59976 + if (!tv)
59977 + gr_log_timechange();
59978 +
59979 /* SMP safe, global irq locking makes it work. */
59980 sys_tz = *tz;
59981 update_vsyscall_tz();
59982 diff -urNp linux-3.0.3/kernel/timer.c linux-3.0.3/kernel/timer.c
59983 --- linux-3.0.3/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
59984 +++ linux-3.0.3/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
59985 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
59986 /*
59987 * This function runs timers and the timer-tq in bottom half context.
59988 */
59989 -static void run_timer_softirq(struct softirq_action *h)
59990 +static void run_timer_softirq(void)
59991 {
59992 struct tvec_base *base = __this_cpu_read(tvec_bases);
59993
59994 diff -urNp linux-3.0.3/kernel/trace/blktrace.c linux-3.0.3/kernel/trace/blktrace.c
59995 --- linux-3.0.3/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
59996 +++ linux-3.0.3/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
59997 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
59998 struct blk_trace *bt = filp->private_data;
59999 char buf[16];
60000
60001 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
60002 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
60003
60004 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
60005 }
60006 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
60007 return 1;
60008
60009 bt = buf->chan->private_data;
60010 - atomic_inc(&bt->dropped);
60011 + atomic_inc_unchecked(&bt->dropped);
60012 return 0;
60013 }
60014
60015 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
60016
60017 bt->dir = dir;
60018 bt->dev = dev;
60019 - atomic_set(&bt->dropped, 0);
60020 + atomic_set_unchecked(&bt->dropped, 0);
60021
60022 ret = -EIO;
60023 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60024 diff -urNp linux-3.0.3/kernel/trace/ftrace.c linux-3.0.3/kernel/trace/ftrace.c
60025 --- linux-3.0.3/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
60026 +++ linux-3.0.3/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
60027 @@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
60028 if (unlikely(ftrace_disabled))
60029 return 0;
60030
60031 + ret = ftrace_arch_code_modify_prepare();
60032 + FTRACE_WARN_ON(ret);
60033 + if (ret)
60034 + return 0;
60035 +
60036 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60037 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60038 if (ret) {
60039 ftrace_bug(ret, ip);
60040 - return 0;
60041 }
60042 - return 1;
60043 + return ret ? 0 : 1;
60044 }
60045
60046 /*
60047 @@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
60048
60049 int
60050 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60051 - void *data)
60052 + void *data)
60053 {
60054 struct ftrace_func_probe *entry;
60055 struct ftrace_page *pg;
60056 diff -urNp linux-3.0.3/kernel/trace/trace.c linux-3.0.3/kernel/trace/trace.c
60057 --- linux-3.0.3/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
60058 +++ linux-3.0.3/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
60059 @@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
60060 size_t rem;
60061 unsigned int i;
60062
60063 + pax_track_stack();
60064 +
60065 if (splice_grow_spd(pipe, &spd))
60066 return -ENOMEM;
60067
60068 @@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
60069 int entries, size, i;
60070 size_t ret;
60071
60072 + pax_track_stack();
60073 +
60074 if (splice_grow_spd(pipe, &spd))
60075 return -ENOMEM;
60076
60077 @@ -3990,10 +3994,9 @@ static const struct file_operations trac
60078 };
60079 #endif
60080
60081 -static struct dentry *d_tracer;
60082 -
60083 struct dentry *tracing_init_dentry(void)
60084 {
60085 + static struct dentry *d_tracer;
60086 static int once;
60087
60088 if (d_tracer)
60089 @@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
60090 return d_tracer;
60091 }
60092
60093 -static struct dentry *d_percpu;
60094 -
60095 struct dentry *tracing_dentry_percpu(void)
60096 {
60097 + static struct dentry *d_percpu;
60098 static int once;
60099 struct dentry *d_tracer;
60100
60101 diff -urNp linux-3.0.3/kernel/trace/trace_events.c linux-3.0.3/kernel/trace/trace_events.c
60102 --- linux-3.0.3/kernel/trace/trace_events.c 2011-08-23 21:44:40.000000000 -0400
60103 +++ linux-3.0.3/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
60104 @@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
60105 struct ftrace_module_file_ops {
60106 struct list_head list;
60107 struct module *mod;
60108 - struct file_operations id;
60109 - struct file_operations enable;
60110 - struct file_operations format;
60111 - struct file_operations filter;
60112 };
60113
60114 static struct ftrace_module_file_ops *
60115 @@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
60116
60117 file_ops->mod = mod;
60118
60119 - file_ops->id = ftrace_event_id_fops;
60120 - file_ops->id.owner = mod;
60121 -
60122 - file_ops->enable = ftrace_enable_fops;
60123 - file_ops->enable.owner = mod;
60124 -
60125 - file_ops->filter = ftrace_event_filter_fops;
60126 - file_ops->filter.owner = mod;
60127 -
60128 - file_ops->format = ftrace_event_format_fops;
60129 - file_ops->format.owner = mod;
60130 + pax_open_kernel();
60131 + *(void **)&mod->trace_id.owner = mod;
60132 + *(void **)&mod->trace_enable.owner = mod;
60133 + *(void **)&mod->trace_filter.owner = mod;
60134 + *(void **)&mod->trace_format.owner = mod;
60135 + pax_close_kernel();
60136
60137 list_add(&file_ops->list, &ftrace_module_file_list);
60138
60139 @@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
60140
60141 for_each_event(call, start, end) {
60142 __trace_add_event_call(*call, mod,
60143 - &file_ops->id, &file_ops->enable,
60144 - &file_ops->filter, &file_ops->format);
60145 + &mod->trace_id, &mod->trace_enable,
60146 + &mod->trace_filter, &mod->trace_format);
60147 }
60148 }
60149
60150 diff -urNp linux-3.0.3/kernel/trace/trace_mmiotrace.c linux-3.0.3/kernel/trace/trace_mmiotrace.c
60151 --- linux-3.0.3/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
60152 +++ linux-3.0.3/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
60153 @@ -24,7 +24,7 @@ struct header_iter {
60154 static struct trace_array *mmio_trace_array;
60155 static bool overrun_detected;
60156 static unsigned long prev_overruns;
60157 -static atomic_t dropped_count;
60158 +static atomic_unchecked_t dropped_count;
60159
60160 static void mmio_reset_data(struct trace_array *tr)
60161 {
60162 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60163
60164 static unsigned long count_overruns(struct trace_iterator *iter)
60165 {
60166 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
60167 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60168 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60169
60170 if (over > prev_overruns)
60171 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60172 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60173 sizeof(*entry), 0, pc);
60174 if (!event) {
60175 - atomic_inc(&dropped_count);
60176 + atomic_inc_unchecked(&dropped_count);
60177 return;
60178 }
60179 entry = ring_buffer_event_data(event);
60180 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60181 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60182 sizeof(*entry), 0, pc);
60183 if (!event) {
60184 - atomic_inc(&dropped_count);
60185 + atomic_inc_unchecked(&dropped_count);
60186 return;
60187 }
60188 entry = ring_buffer_event_data(event);
60189 diff -urNp linux-3.0.3/kernel/trace/trace_output.c linux-3.0.3/kernel/trace/trace_output.c
60190 --- linux-3.0.3/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
60191 +++ linux-3.0.3/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
60192 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60193
60194 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60195 if (!IS_ERR(p)) {
60196 - p = mangle_path(s->buffer + s->len, p, "\n");
60197 + p = mangle_path(s->buffer + s->len, p, "\n\\");
60198 if (p) {
60199 s->len = p - s->buffer;
60200 return 1;
60201 diff -urNp linux-3.0.3/kernel/trace/trace_stack.c linux-3.0.3/kernel/trace/trace_stack.c
60202 --- linux-3.0.3/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
60203 +++ linux-3.0.3/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
60204 @@ -50,7 +50,7 @@ static inline void check_stack(void)
60205 return;
60206
60207 /* we do not handle interrupt stacks yet */
60208 - if (!object_is_on_stack(&this_size))
60209 + if (!object_starts_on_stack(&this_size))
60210 return;
60211
60212 local_irq_save(flags);
60213 diff -urNp linux-3.0.3/kernel/trace/trace_workqueue.c linux-3.0.3/kernel/trace/trace_workqueue.c
60214 --- linux-3.0.3/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
60215 +++ linux-3.0.3/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
60216 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60217 int cpu;
60218 pid_t pid;
60219 /* Can be inserted from interrupt or user context, need to be atomic */
60220 - atomic_t inserted;
60221 + atomic_unchecked_t inserted;
60222 /*
60223 * Don't need to be atomic, works are serialized in a single workqueue thread
60224 * on a single CPU.
60225 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60226 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60227 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60228 if (node->pid == wq_thread->pid) {
60229 - atomic_inc(&node->inserted);
60230 + atomic_inc_unchecked(&node->inserted);
60231 goto found;
60232 }
60233 }
60234 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60235 tsk = get_pid_task(pid, PIDTYPE_PID);
60236 if (tsk) {
60237 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60238 - atomic_read(&cws->inserted), cws->executed,
60239 + atomic_read_unchecked(&cws->inserted), cws->executed,
60240 tsk->comm);
60241 put_task_struct(tsk);
60242 }
60243 diff -urNp linux-3.0.3/lib/bug.c linux-3.0.3/lib/bug.c
60244 --- linux-3.0.3/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
60245 +++ linux-3.0.3/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
60246 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60247 return BUG_TRAP_TYPE_NONE;
60248
60249 bug = find_bug(bugaddr);
60250 + if (!bug)
60251 + return BUG_TRAP_TYPE_NONE;
60252
60253 file = NULL;
60254 line = 0;
60255 diff -urNp linux-3.0.3/lib/debugobjects.c linux-3.0.3/lib/debugobjects.c
60256 --- linux-3.0.3/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
60257 +++ linux-3.0.3/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
60258 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60259 if (limit > 4)
60260 return;
60261
60262 - is_on_stack = object_is_on_stack(addr);
60263 + is_on_stack = object_starts_on_stack(addr);
60264 if (is_on_stack == onstack)
60265 return;
60266
60267 diff -urNp linux-3.0.3/lib/dma-debug.c linux-3.0.3/lib/dma-debug.c
60268 --- linux-3.0.3/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
60269 +++ linux-3.0.3/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
60270 @@ -870,7 +870,7 @@ out:
60271
60272 static void check_for_stack(struct device *dev, void *addr)
60273 {
60274 - if (object_is_on_stack(addr))
60275 + if (object_starts_on_stack(addr))
60276 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60277 "stack [addr=%p]\n", addr);
60278 }
60279 diff -urNp linux-3.0.3/lib/extable.c linux-3.0.3/lib/extable.c
60280 --- linux-3.0.3/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
60281 +++ linux-3.0.3/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
60282 @@ -13,6 +13,7 @@
60283 #include <linux/init.h>
60284 #include <linux/sort.h>
60285 #include <asm/uaccess.h>
60286 +#include <asm/pgtable.h>
60287
60288 #ifndef ARCH_HAS_SORT_EXTABLE
60289 /*
60290 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
60291 void sort_extable(struct exception_table_entry *start,
60292 struct exception_table_entry *finish)
60293 {
60294 + pax_open_kernel();
60295 sort(start, finish - start, sizeof(struct exception_table_entry),
60296 cmp_ex, NULL);
60297 + pax_close_kernel();
60298 }
60299
60300 #ifdef CONFIG_MODULES
60301 diff -urNp linux-3.0.3/lib/inflate.c linux-3.0.3/lib/inflate.c
60302 --- linux-3.0.3/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
60303 +++ linux-3.0.3/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
60304 @@ -269,7 +269,7 @@ static void free(void *where)
60305 malloc_ptr = free_mem_ptr;
60306 }
60307 #else
60308 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60309 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60310 #define free(a) kfree(a)
60311 #endif
60312
60313 diff -urNp linux-3.0.3/lib/Kconfig.debug linux-3.0.3/lib/Kconfig.debug
60314 --- linux-3.0.3/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
60315 +++ linux-3.0.3/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
60316 @@ -1088,6 +1088,7 @@ config LATENCYTOP
60317 depends on DEBUG_KERNEL
60318 depends on STACKTRACE_SUPPORT
60319 depends on PROC_FS
60320 + depends on !GRKERNSEC_HIDESYM
60321 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60322 select KALLSYMS
60323 select KALLSYMS_ALL
60324 diff -urNp linux-3.0.3/lib/kref.c linux-3.0.3/lib/kref.c
60325 --- linux-3.0.3/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
60326 +++ linux-3.0.3/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
60327 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60328 */
60329 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60330 {
60331 - WARN_ON(release == NULL);
60332 + BUG_ON(release == NULL);
60333 WARN_ON(release == (void (*)(struct kref *))kfree);
60334
60335 if (atomic_dec_and_test(&kref->refcount)) {
60336 diff -urNp linux-3.0.3/lib/radix-tree.c linux-3.0.3/lib/radix-tree.c
60337 --- linux-3.0.3/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
60338 +++ linux-3.0.3/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
60339 @@ -80,7 +80,7 @@ struct radix_tree_preload {
60340 int nr;
60341 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60342 };
60343 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60344 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60345
60346 static inline void *ptr_to_indirect(void *ptr)
60347 {
60348 diff -urNp linux-3.0.3/lib/vsprintf.c linux-3.0.3/lib/vsprintf.c
60349 --- linux-3.0.3/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
60350 +++ linux-3.0.3/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
60351 @@ -16,6 +16,9 @@
60352 * - scnprintf and vscnprintf
60353 */
60354
60355 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60356 +#define __INCLUDED_BY_HIDESYM 1
60357 +#endif
60358 #include <stdarg.h>
60359 #include <linux/module.h>
60360 #include <linux/types.h>
60361 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60362 char sym[KSYM_SYMBOL_LEN];
60363 if (ext == 'B')
60364 sprint_backtrace(sym, value);
60365 - else if (ext != 'f' && ext != 's')
60366 + else if (ext != 'f' && ext != 's' && ext != 'a')
60367 sprint_symbol(sym, value);
60368 else
60369 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60370 @@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
60371 return string(buf, end, uuid, spec);
60372 }
60373
60374 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60375 +int kptr_restrict __read_mostly = 2;
60376 +#else
60377 int kptr_restrict __read_mostly;
60378 +#endif
60379
60380 /*
60381 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60382 @@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
60383 * - 'S' For symbolic direct pointers with offset
60384 * - 's' For symbolic direct pointers without offset
60385 * - 'B' For backtraced symbolic direct pointers with offset
60386 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60387 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60388 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60389 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60390 * - 'M' For a 6-byte MAC address, it prints the address in the
60391 @@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
60392 {
60393 if (!ptr && *fmt != 'K') {
60394 /*
60395 - * Print (null) with the same width as a pointer so it makes
60396 + * Print (nil) with the same width as a pointer so it makes
60397 * tabular output look nice.
60398 */
60399 if (spec.field_width == -1)
60400 spec.field_width = 2 * sizeof(void *);
60401 - return string(buf, end, "(null)", spec);
60402 + return string(buf, end, "(nil)", spec);
60403 }
60404
60405 switch (*fmt) {
60406 @@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
60407 /* Fallthrough */
60408 case 'S':
60409 case 's':
60410 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60411 + break;
60412 +#else
60413 + return symbol_string(buf, end, ptr, spec, *fmt);
60414 +#endif
60415 + case 'A':
60416 + case 'a':
60417 case 'B':
60418 return symbol_string(buf, end, ptr, spec, *fmt);
60419 case 'R':
60420 @@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
60421 typeof(type) value; \
60422 if (sizeof(type) == 8) { \
60423 args = PTR_ALIGN(args, sizeof(u32)); \
60424 - *(u32 *)&value = *(u32 *)args; \
60425 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60426 + *(u32 *)&value = *(const u32 *)args; \
60427 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60428 } else { \
60429 args = PTR_ALIGN(args, sizeof(type)); \
60430 - value = *(typeof(type) *)args; \
60431 + value = *(const typeof(type) *)args; \
60432 } \
60433 args += sizeof(type); \
60434 value; \
60435 @@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
60436 case FORMAT_TYPE_STR: {
60437 const char *str_arg = args;
60438 args += strlen(str_arg) + 1;
60439 - str = string(str, end, (char *)str_arg, spec);
60440 + str = string(str, end, str_arg, spec);
60441 break;
60442 }
60443
60444 diff -urNp linux-3.0.3/localversion-grsec linux-3.0.3/localversion-grsec
60445 --- linux-3.0.3/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60446 +++ linux-3.0.3/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
60447 @@ -0,0 +1 @@
60448 +-grsec
60449 diff -urNp linux-3.0.3/Makefile linux-3.0.3/Makefile
60450 --- linux-3.0.3/Makefile 2011-08-23 21:44:40.000000000 -0400
60451 +++ linux-3.0.3/Makefile 2011-08-27 21:15:31.000000000 -0400
60452 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60453
60454 HOSTCC = gcc
60455 HOSTCXX = g++
60456 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60457 -HOSTCXXFLAGS = -O2
60458 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60459 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60460 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60461
60462 # Decide whether to build built-in, modular, or both.
60463 # Normally, just do built-in.
60464 @@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60465 KBUILD_CPPFLAGS := -D__KERNEL__
60466
60467 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60468 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
60469 -fno-strict-aliasing -fno-common \
60470 -Werror-implicit-function-declaration \
60471 -Wno-format-security \
60472 -fno-delete-null-pointer-checks
60473 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60474 KBUILD_AFLAGS_KERNEL :=
60475 KBUILD_CFLAGS_KERNEL :=
60476 KBUILD_AFLAGS := -D__ASSEMBLY__
60477 @@ -564,6 +567,25 @@ else
60478 KBUILD_CFLAGS += -O2
60479 endif
60480
60481 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60482 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
60483 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
60484 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60485 +endif
60486 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60487 +gcc-plugins0:
60488 + $(Q)$(MAKE) $(build)=tools/gcc
60489 +gcc-plugins: scripts_basic gcc-plugins0
60490 +else
60491 +gcc-plugins:
60492 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60493 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60494 +else
60495 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60496 +endif
60497 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60498 +endif
60499 +
60500 include $(srctree)/arch/$(SRCARCH)/Makefile
60501
60502 ifneq ($(CONFIG_FRAME_WARN),0)
60503 @@ -708,7 +730,7 @@ export mod_strip_cmd
60504
60505
60506 ifeq ($(KBUILD_EXTMOD),)
60507 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60508 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60509
60510 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60511 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60512 @@ -907,6 +929,7 @@ define rule_vmlinux-modpost
60513 endef
60514
60515 # vmlinux image - including updated kernel symbols
60516 +vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60517 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
60518 ifdef CONFIG_HEADERS_CHECK
60519 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
60520 @@ -973,7 +996,7 @@ ifneq ($(KBUILD_SRC),)
60521 endif
60522
60523 # prepare2 creates a makefile if using a separate output directory
60524 -prepare2: prepare3 outputmakefile asm-generic
60525 +prepare2: prepare3 outputmakefile asm-generic gcc-plugins
60526
60527 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60528 include/config/auto.conf
60529 @@ -1087,6 +1110,7 @@ all: modules
60530 # using awk while concatenating to the final file.
60531
60532 PHONY += modules
60533 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60534 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
60535 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
60536 @$(kecho) ' Building modules, stage 2.';
60537 @@ -1359,6 +1383,7 @@ PHONY += $(module-dirs) modules
60538 $(module-dirs): crmodverdir $(objtree)/Module.symvers
60539 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
60540
60541 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60542 modules: $(module-dirs)
60543 @$(kecho) ' Building modules, stage 2.';
60544 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
60545 @@ -1404,7 +1429,7 @@ clean: $(clean-dirs)
60546 $(call cmd,rmdirs)
60547 $(call cmd,rmfiles)
60548 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60549 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60550 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60551 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60552 -o -name '*.symtypes' -o -name 'modules.order' \
60553 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60554 diff -urNp linux-3.0.3/mm/filemap.c linux-3.0.3/mm/filemap.c
60555 --- linux-3.0.3/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
60556 +++ linux-3.0.3/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
60557 @@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
60558 struct address_space *mapping = file->f_mapping;
60559
60560 if (!mapping->a_ops->readpage)
60561 - return -ENOEXEC;
60562 + return -ENODEV;
60563 file_accessed(file);
60564 vma->vm_ops = &generic_file_vm_ops;
60565 vma->vm_flags |= VM_CAN_NONLINEAR;
60566 @@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
60567 *pos = i_size_read(inode);
60568
60569 if (limit != RLIM_INFINITY) {
60570 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60571 if (*pos >= limit) {
60572 send_sig(SIGXFSZ, current, 0);
60573 return -EFBIG;
60574 diff -urNp linux-3.0.3/mm/fremap.c linux-3.0.3/mm/fremap.c
60575 --- linux-3.0.3/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
60576 +++ linux-3.0.3/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
60577 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60578 retry:
60579 vma = find_vma(mm, start);
60580
60581 +#ifdef CONFIG_PAX_SEGMEXEC
60582 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60583 + goto out;
60584 +#endif
60585 +
60586 /*
60587 * Make sure the vma is shared, that it supports prefaulting,
60588 * and that the remapped range is valid and fully within
60589 diff -urNp linux-3.0.3/mm/highmem.c linux-3.0.3/mm/highmem.c
60590 --- linux-3.0.3/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
60591 +++ linux-3.0.3/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
60592 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60593 * So no dangers, even with speculative execution.
60594 */
60595 page = pte_page(pkmap_page_table[i]);
60596 + pax_open_kernel();
60597 pte_clear(&init_mm, (unsigned long)page_address(page),
60598 &pkmap_page_table[i]);
60599 -
60600 + pax_close_kernel();
60601 set_page_address(page, NULL);
60602 need_flush = 1;
60603 }
60604 @@ -186,9 +187,11 @@ start:
60605 }
60606 }
60607 vaddr = PKMAP_ADDR(last_pkmap_nr);
60608 +
60609 + pax_open_kernel();
60610 set_pte_at(&init_mm, vaddr,
60611 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60612 -
60613 + pax_close_kernel();
60614 pkmap_count[last_pkmap_nr] = 1;
60615 set_page_address(page, (void *)vaddr);
60616
60617 diff -urNp linux-3.0.3/mm/huge_memory.c linux-3.0.3/mm/huge_memory.c
60618 --- linux-3.0.3/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
60619 +++ linux-3.0.3/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
60620 @@ -702,7 +702,7 @@ out:
60621 * run pte_offset_map on the pmd, if an huge pmd could
60622 * materialize from under us from a different thread.
60623 */
60624 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60625 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60626 return VM_FAULT_OOM;
60627 /* if an huge pmd materialized from under us just retry later */
60628 if (unlikely(pmd_trans_huge(*pmd)))
60629 diff -urNp linux-3.0.3/mm/hugetlb.c linux-3.0.3/mm/hugetlb.c
60630 --- linux-3.0.3/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
60631 +++ linux-3.0.3/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
60632 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60633 return 1;
60634 }
60635
60636 +#ifdef CONFIG_PAX_SEGMEXEC
60637 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60638 +{
60639 + struct mm_struct *mm = vma->vm_mm;
60640 + struct vm_area_struct *vma_m;
60641 + unsigned long address_m;
60642 + pte_t *ptep_m;
60643 +
60644 + vma_m = pax_find_mirror_vma(vma);
60645 + if (!vma_m)
60646 + return;
60647 +
60648 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60649 + address_m = address + SEGMEXEC_TASK_SIZE;
60650 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60651 + get_page(page_m);
60652 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
60653 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60654 +}
60655 +#endif
60656 +
60657 /*
60658 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60659 */
60660 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
60661 make_huge_pte(vma, new_page, 1));
60662 page_remove_rmap(old_page);
60663 hugepage_add_new_anon_rmap(new_page, vma, address);
60664 +
60665 +#ifdef CONFIG_PAX_SEGMEXEC
60666 + pax_mirror_huge_pte(vma, address, new_page);
60667 +#endif
60668 +
60669 /* Make the old page be freed below */
60670 new_page = old_page;
60671 mmu_notifier_invalidate_range_end(mm,
60672 @@ -2591,6 +2617,10 @@ retry:
60673 && (vma->vm_flags & VM_SHARED)));
60674 set_huge_pte_at(mm, address, ptep, new_pte);
60675
60676 +#ifdef CONFIG_PAX_SEGMEXEC
60677 + pax_mirror_huge_pte(vma, address, page);
60678 +#endif
60679 +
60680 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60681 /* Optimization, do the COW without a second fault */
60682 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60683 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60684 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60685 struct hstate *h = hstate_vma(vma);
60686
60687 +#ifdef CONFIG_PAX_SEGMEXEC
60688 + struct vm_area_struct *vma_m;
60689 +#endif
60690 +
60691 ptep = huge_pte_offset(mm, address);
60692 if (ptep) {
60693 entry = huge_ptep_get(ptep);
60694 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60695 VM_FAULT_SET_HINDEX(h - hstates);
60696 }
60697
60698 +#ifdef CONFIG_PAX_SEGMEXEC
60699 + vma_m = pax_find_mirror_vma(vma);
60700 + if (vma_m) {
60701 + unsigned long address_m;
60702 +
60703 + if (vma->vm_start > vma_m->vm_start) {
60704 + address_m = address;
60705 + address -= SEGMEXEC_TASK_SIZE;
60706 + vma = vma_m;
60707 + h = hstate_vma(vma);
60708 + } else
60709 + address_m = address + SEGMEXEC_TASK_SIZE;
60710 +
60711 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
60712 + return VM_FAULT_OOM;
60713 + address_m &= HPAGE_MASK;
60714 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
60715 + }
60716 +#endif
60717 +
60718 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
60719 if (!ptep)
60720 return VM_FAULT_OOM;
60721 diff -urNp linux-3.0.3/mm/internal.h linux-3.0.3/mm/internal.h
60722 --- linux-3.0.3/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
60723 +++ linux-3.0.3/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
60724 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
60725 * in mm/page_alloc.c
60726 */
60727 extern void __free_pages_bootmem(struct page *page, unsigned int order);
60728 +extern void free_compound_page(struct page *page);
60729 extern void prep_compound_page(struct page *page, unsigned long order);
60730 #ifdef CONFIG_MEMORY_FAILURE
60731 extern bool is_free_buddy_page(struct page *page);
60732 diff -urNp linux-3.0.3/mm/Kconfig linux-3.0.3/mm/Kconfig
60733 --- linux-3.0.3/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
60734 +++ linux-3.0.3/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
60735 @@ -240,7 +240,7 @@ config KSM
60736 config DEFAULT_MMAP_MIN_ADDR
60737 int "Low address space to protect from user allocation"
60738 depends on MMU
60739 - default 4096
60740 + default 65536
60741 help
60742 This is the portion of low virtual memory which should be protected
60743 from userspace allocation. Keeping a user from writing to low pages
60744 diff -urNp linux-3.0.3/mm/kmemleak.c linux-3.0.3/mm/kmemleak.c
60745 --- linux-3.0.3/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
60746 +++ linux-3.0.3/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
60747 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
60748
60749 for (i = 0; i < object->trace_len; i++) {
60750 void *ptr = (void *)object->trace[i];
60751 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
60752 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
60753 }
60754 }
60755
60756 diff -urNp linux-3.0.3/mm/madvise.c linux-3.0.3/mm/madvise.c
60757 --- linux-3.0.3/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
60758 +++ linux-3.0.3/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
60759 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
60760 pgoff_t pgoff;
60761 unsigned long new_flags = vma->vm_flags;
60762
60763 +#ifdef CONFIG_PAX_SEGMEXEC
60764 + struct vm_area_struct *vma_m;
60765 +#endif
60766 +
60767 switch (behavior) {
60768 case MADV_NORMAL:
60769 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
60770 @@ -110,6 +114,13 @@ success:
60771 /*
60772 * vm_flags is protected by the mmap_sem held in write mode.
60773 */
60774 +
60775 +#ifdef CONFIG_PAX_SEGMEXEC
60776 + vma_m = pax_find_mirror_vma(vma);
60777 + if (vma_m)
60778 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
60779 +#endif
60780 +
60781 vma->vm_flags = new_flags;
60782
60783 out:
60784 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
60785 struct vm_area_struct ** prev,
60786 unsigned long start, unsigned long end)
60787 {
60788 +
60789 +#ifdef CONFIG_PAX_SEGMEXEC
60790 + struct vm_area_struct *vma_m;
60791 +#endif
60792 +
60793 *prev = vma;
60794 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
60795 return -EINVAL;
60796 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
60797 zap_page_range(vma, start, end - start, &details);
60798 } else
60799 zap_page_range(vma, start, end - start, NULL);
60800 +
60801 +#ifdef CONFIG_PAX_SEGMEXEC
60802 + vma_m = pax_find_mirror_vma(vma);
60803 + if (vma_m) {
60804 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
60805 + struct zap_details details = {
60806 + .nonlinear_vma = vma_m,
60807 + .last_index = ULONG_MAX,
60808 + };
60809 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
60810 + } else
60811 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
60812 + }
60813 +#endif
60814 +
60815 return 0;
60816 }
60817
60818 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
60819 if (end < start)
60820 goto out;
60821
60822 +#ifdef CONFIG_PAX_SEGMEXEC
60823 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60824 + if (end > SEGMEXEC_TASK_SIZE)
60825 + goto out;
60826 + } else
60827 +#endif
60828 +
60829 + if (end > TASK_SIZE)
60830 + goto out;
60831 +
60832 error = 0;
60833 if (end == start)
60834 goto out;
60835 diff -urNp linux-3.0.3/mm/memory.c linux-3.0.3/mm/memory.c
60836 --- linux-3.0.3/mm/memory.c 2011-08-23 21:44:40.000000000 -0400
60837 +++ linux-3.0.3/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
60838 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
60839 return;
60840
60841 pmd = pmd_offset(pud, start);
60842 +
60843 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
60844 pud_clear(pud);
60845 pmd_free_tlb(tlb, pmd, start);
60846 +#endif
60847 +
60848 }
60849
60850 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
60851 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct
60852 if (end - 1 > ceiling - 1)
60853 return;
60854
60855 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
60856 pud = pud_offset(pgd, start);
60857 pgd_clear(pgd);
60858 pud_free_tlb(tlb, pud, start);
60859 +#endif
60860 +
60861 }
60862
60863 /*
60864 @@ -1577,12 +1584,6 @@ no_page_table:
60865 return page;
60866 }
60867
60868 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
60869 -{
60870 - return stack_guard_page_start(vma, addr) ||
60871 - stack_guard_page_end(vma, addr+PAGE_SIZE);
60872 -}
60873 -
60874 /**
60875 * __get_user_pages() - pin user pages in memory
60876 * @tsk: task_struct of target task
60877 @@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
60878 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
60879 i = 0;
60880
60881 - do {
60882 + while (nr_pages) {
60883 struct vm_area_struct *vma;
60884
60885 - vma = find_extend_vma(mm, start);
60886 + vma = find_vma(mm, start);
60887 if (!vma && in_gate_area(mm, start)) {
60888 unsigned long pg = start & PAGE_MASK;
60889 pgd_t *pgd;
60890 @@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
60891 goto next_page;
60892 }
60893
60894 - if (!vma ||
60895 + if (!vma || start < vma->vm_start ||
60896 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
60897 !(vm_flags & vma->vm_flags))
60898 return i ? : -EFAULT;
60899 @@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
60900 int ret;
60901 unsigned int fault_flags = 0;
60902
60903 - /* For mlock, just skip the stack guard page. */
60904 - if (foll_flags & FOLL_MLOCK) {
60905 - if (stack_guard_page(vma, start))
60906 - goto next_page;
60907 - }
60908 if (foll_flags & FOLL_WRITE)
60909 fault_flags |= FAULT_FLAG_WRITE;
60910 if (nonblocking)
60911 @@ -1811,7 +1807,7 @@ next_page:
60912 start += PAGE_SIZE;
60913 nr_pages--;
60914 } while (nr_pages && start < vma->vm_end);
60915 - } while (nr_pages);
60916 + }
60917 return i;
60918 }
60919 EXPORT_SYMBOL(__get_user_pages);
60920 @@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
60921 page_add_file_rmap(page);
60922 set_pte_at(mm, addr, pte, mk_pte(page, prot));
60923
60924 +#ifdef CONFIG_PAX_SEGMEXEC
60925 + pax_mirror_file_pte(vma, addr, page, ptl);
60926 +#endif
60927 +
60928 retval = 0;
60929 pte_unmap_unlock(pte, ptl);
60930 return retval;
60931 @@ -2052,10 +2052,22 @@ out:
60932 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
60933 struct page *page)
60934 {
60935 +
60936 +#ifdef CONFIG_PAX_SEGMEXEC
60937 + struct vm_area_struct *vma_m;
60938 +#endif
60939 +
60940 if (addr < vma->vm_start || addr >= vma->vm_end)
60941 return -EFAULT;
60942 if (!page_count(page))
60943 return -EINVAL;
60944 +
60945 +#ifdef CONFIG_PAX_SEGMEXEC
60946 + vma_m = pax_find_mirror_vma(vma);
60947 + if (vma_m)
60948 + vma_m->vm_flags |= VM_INSERTPAGE;
60949 +#endif
60950 +
60951 vma->vm_flags |= VM_INSERTPAGE;
60952 return insert_page(vma, addr, page, vma->vm_page_prot);
60953 }
60954 @@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
60955 unsigned long pfn)
60956 {
60957 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
60958 + BUG_ON(vma->vm_mirror);
60959
60960 if (addr < vma->vm_start || addr >= vma->vm_end)
60961 return -EFAULT;
60962 @@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
60963 copy_user_highpage(dst, src, va, vma);
60964 }
60965
60966 +#ifdef CONFIG_PAX_SEGMEXEC
60967 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
60968 +{
60969 + struct mm_struct *mm = vma->vm_mm;
60970 + spinlock_t *ptl;
60971 + pte_t *pte, entry;
60972 +
60973 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
60974 + entry = *pte;
60975 + if (!pte_present(entry)) {
60976 + if (!pte_none(entry)) {
60977 + BUG_ON(pte_file(entry));
60978 + free_swap_and_cache(pte_to_swp_entry(entry));
60979 + pte_clear_not_present_full(mm, address, pte, 0);
60980 + }
60981 + } else {
60982 + struct page *page;
60983 +
60984 + flush_cache_page(vma, address, pte_pfn(entry));
60985 + entry = ptep_clear_flush(vma, address, pte);
60986 + BUG_ON(pte_dirty(entry));
60987 + page = vm_normal_page(vma, address, entry);
60988 + if (page) {
60989 + update_hiwater_rss(mm);
60990 + if (PageAnon(page))
60991 + dec_mm_counter_fast(mm, MM_ANONPAGES);
60992 + else
60993 + dec_mm_counter_fast(mm, MM_FILEPAGES);
60994 + page_remove_rmap(page);
60995 + page_cache_release(page);
60996 + }
60997 + }
60998 + pte_unmap_unlock(pte, ptl);
60999 +}
61000 +
61001 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
61002 + *
61003 + * the ptl of the lower mapped page is held on entry and is not released on exit
61004 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
61005 + */
61006 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61007 +{
61008 + struct mm_struct *mm = vma->vm_mm;
61009 + unsigned long address_m;
61010 + spinlock_t *ptl_m;
61011 + struct vm_area_struct *vma_m;
61012 + pmd_t *pmd_m;
61013 + pte_t *pte_m, entry_m;
61014 +
61015 + BUG_ON(!page_m || !PageAnon(page_m));
61016 +
61017 + vma_m = pax_find_mirror_vma(vma);
61018 + if (!vma_m)
61019 + return;
61020 +
61021 + BUG_ON(!PageLocked(page_m));
61022 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61023 + address_m = address + SEGMEXEC_TASK_SIZE;
61024 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61025 + pte_m = pte_offset_map(pmd_m, address_m);
61026 + ptl_m = pte_lockptr(mm, pmd_m);
61027 + if (ptl != ptl_m) {
61028 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61029 + if (!pte_none(*pte_m))
61030 + goto out;
61031 + }
61032 +
61033 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61034 + page_cache_get(page_m);
61035 + page_add_anon_rmap(page_m, vma_m, address_m);
61036 + inc_mm_counter_fast(mm, MM_ANONPAGES);
61037 + set_pte_at(mm, address_m, pte_m, entry_m);
61038 + update_mmu_cache(vma_m, address_m, entry_m);
61039 +out:
61040 + if (ptl != ptl_m)
61041 + spin_unlock(ptl_m);
61042 + pte_unmap(pte_m);
61043 + unlock_page(page_m);
61044 +}
61045 +
61046 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61047 +{
61048 + struct mm_struct *mm = vma->vm_mm;
61049 + unsigned long address_m;
61050 + spinlock_t *ptl_m;
61051 + struct vm_area_struct *vma_m;
61052 + pmd_t *pmd_m;
61053 + pte_t *pte_m, entry_m;
61054 +
61055 + BUG_ON(!page_m || PageAnon(page_m));
61056 +
61057 + vma_m = pax_find_mirror_vma(vma);
61058 + if (!vma_m)
61059 + return;
61060 +
61061 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61062 + address_m = address + SEGMEXEC_TASK_SIZE;
61063 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61064 + pte_m = pte_offset_map(pmd_m, address_m);
61065 + ptl_m = pte_lockptr(mm, pmd_m);
61066 + if (ptl != ptl_m) {
61067 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61068 + if (!pte_none(*pte_m))
61069 + goto out;
61070 + }
61071 +
61072 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61073 + page_cache_get(page_m);
61074 + page_add_file_rmap(page_m);
61075 + inc_mm_counter_fast(mm, MM_FILEPAGES);
61076 + set_pte_at(mm, address_m, pte_m, entry_m);
61077 + update_mmu_cache(vma_m, address_m, entry_m);
61078 +out:
61079 + if (ptl != ptl_m)
61080 + spin_unlock(ptl_m);
61081 + pte_unmap(pte_m);
61082 +}
61083 +
61084 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61085 +{
61086 + struct mm_struct *mm = vma->vm_mm;
61087 + unsigned long address_m;
61088 + spinlock_t *ptl_m;
61089 + struct vm_area_struct *vma_m;
61090 + pmd_t *pmd_m;
61091 + pte_t *pte_m, entry_m;
61092 +
61093 + vma_m = pax_find_mirror_vma(vma);
61094 + if (!vma_m)
61095 + return;
61096 +
61097 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61098 + address_m = address + SEGMEXEC_TASK_SIZE;
61099 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61100 + pte_m = pte_offset_map(pmd_m, address_m);
61101 + ptl_m = pte_lockptr(mm, pmd_m);
61102 + if (ptl != ptl_m) {
61103 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61104 + if (!pte_none(*pte_m))
61105 + goto out;
61106 + }
61107 +
61108 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61109 + set_pte_at(mm, address_m, pte_m, entry_m);
61110 +out:
61111 + if (ptl != ptl_m)
61112 + spin_unlock(ptl_m);
61113 + pte_unmap(pte_m);
61114 +}
61115 +
61116 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61117 +{
61118 + struct page *page_m;
61119 + pte_t entry;
61120 +
61121 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61122 + goto out;
61123 +
61124 + entry = *pte;
61125 + page_m = vm_normal_page(vma, address, entry);
61126 + if (!page_m)
61127 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61128 + else if (PageAnon(page_m)) {
61129 + if (pax_find_mirror_vma(vma)) {
61130 + pte_unmap_unlock(pte, ptl);
61131 + lock_page(page_m);
61132 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61133 + if (pte_same(entry, *pte))
61134 + pax_mirror_anon_pte(vma, address, page_m, ptl);
61135 + else
61136 + unlock_page(page_m);
61137 + }
61138 + } else
61139 + pax_mirror_file_pte(vma, address, page_m, ptl);
61140 +
61141 +out:
61142 + pte_unmap_unlock(pte, ptl);
61143 +}
61144 +#endif
61145 +
61146 /*
61147 * This routine handles present pages, when users try to write
61148 * to a shared page. It is done by copying the page to a new address
61149 @@ -2667,6 +2860,12 @@ gotten:
61150 */
61151 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61152 if (likely(pte_same(*page_table, orig_pte))) {
61153 +
61154 +#ifdef CONFIG_PAX_SEGMEXEC
61155 + if (pax_find_mirror_vma(vma))
61156 + BUG_ON(!trylock_page(new_page));
61157 +#endif
61158 +
61159 if (old_page) {
61160 if (!PageAnon(old_page)) {
61161 dec_mm_counter_fast(mm, MM_FILEPAGES);
61162 @@ -2718,6 +2917,10 @@ gotten:
61163 page_remove_rmap(old_page);
61164 }
61165
61166 +#ifdef CONFIG_PAX_SEGMEXEC
61167 + pax_mirror_anon_pte(vma, address, new_page, ptl);
61168 +#endif
61169 +
61170 /* Free the old page.. */
61171 new_page = old_page;
61172 ret |= VM_FAULT_WRITE;
61173 @@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
61174 swap_free(entry);
61175 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61176 try_to_free_swap(page);
61177 +
61178 +#ifdef CONFIG_PAX_SEGMEXEC
61179 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61180 +#endif
61181 +
61182 unlock_page(page);
61183 if (swapcache) {
61184 /*
61185 @@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
61186
61187 /* No need to invalidate - it was non-present before */
61188 update_mmu_cache(vma, address, page_table);
61189 +
61190 +#ifdef CONFIG_PAX_SEGMEXEC
61191 + pax_mirror_anon_pte(vma, address, page, ptl);
61192 +#endif
61193 +
61194 unlock:
61195 pte_unmap_unlock(page_table, ptl);
61196 out:
61197 @@ -3039,40 +3252,6 @@ out_release:
61198 }
61199
61200 /*
61201 - * This is like a special single-page "expand_{down|up}wards()",
61202 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
61203 - * doesn't hit another vma.
61204 - */
61205 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61206 -{
61207 - address &= PAGE_MASK;
61208 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61209 - struct vm_area_struct *prev = vma->vm_prev;
61210 -
61211 - /*
61212 - * Is there a mapping abutting this one below?
61213 - *
61214 - * That's only ok if it's the same stack mapping
61215 - * that has gotten split..
61216 - */
61217 - if (prev && prev->vm_end == address)
61218 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61219 -
61220 - expand_downwards(vma, address - PAGE_SIZE);
61221 - }
61222 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61223 - struct vm_area_struct *next = vma->vm_next;
61224 -
61225 - /* As VM_GROWSDOWN but s/below/above/ */
61226 - if (next && next->vm_start == address + PAGE_SIZE)
61227 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61228 -
61229 - expand_upwards(vma, address + PAGE_SIZE);
61230 - }
61231 - return 0;
61232 -}
61233 -
61234 -/*
61235 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61236 * but allow concurrent faults), and pte mapped but not yet locked.
61237 * We return with mmap_sem still held, but pte unmapped and unlocked.
61238 @@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
61239 unsigned long address, pte_t *page_table, pmd_t *pmd,
61240 unsigned int flags)
61241 {
61242 - struct page *page;
61243 + struct page *page = NULL;
61244 spinlock_t *ptl;
61245 pte_t entry;
61246
61247 - pte_unmap(page_table);
61248 -
61249 - /* Check if we need to add a guard page to the stack */
61250 - if (check_stack_guard_page(vma, address) < 0)
61251 - return VM_FAULT_SIGBUS;
61252 -
61253 - /* Use the zero-page for reads */
61254 if (!(flags & FAULT_FLAG_WRITE)) {
61255 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61256 vma->vm_page_prot));
61257 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61258 + ptl = pte_lockptr(mm, pmd);
61259 + spin_lock(ptl);
61260 if (!pte_none(*page_table))
61261 goto unlock;
61262 goto setpte;
61263 }
61264
61265 /* Allocate our own private page. */
61266 + pte_unmap(page_table);
61267 +
61268 if (unlikely(anon_vma_prepare(vma)))
61269 goto oom;
61270 page = alloc_zeroed_user_highpage_movable(vma, address);
61271 @@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
61272 if (!pte_none(*page_table))
61273 goto release;
61274
61275 +#ifdef CONFIG_PAX_SEGMEXEC
61276 + if (pax_find_mirror_vma(vma))
61277 + BUG_ON(!trylock_page(page));
61278 +#endif
61279 +
61280 inc_mm_counter_fast(mm, MM_ANONPAGES);
61281 page_add_new_anon_rmap(page, vma, address);
61282 setpte:
61283 @@ -3127,6 +3307,12 @@ setpte:
61284
61285 /* No need to invalidate - it was non-present before */
61286 update_mmu_cache(vma, address, page_table);
61287 +
61288 +#ifdef CONFIG_PAX_SEGMEXEC
61289 + if (page)
61290 + pax_mirror_anon_pte(vma, address, page, ptl);
61291 +#endif
61292 +
61293 unlock:
61294 pte_unmap_unlock(page_table, ptl);
61295 return 0;
61296 @@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
61297 */
61298 /* Only go through if we didn't race with anybody else... */
61299 if (likely(pte_same(*page_table, orig_pte))) {
61300 +
61301 +#ifdef CONFIG_PAX_SEGMEXEC
61302 + if (anon && pax_find_mirror_vma(vma))
61303 + BUG_ON(!trylock_page(page));
61304 +#endif
61305 +
61306 flush_icache_page(vma, page);
61307 entry = mk_pte(page, vma->vm_page_prot);
61308 if (flags & FAULT_FLAG_WRITE)
61309 @@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
61310
61311 /* no need to invalidate: a not-present page won't be cached */
61312 update_mmu_cache(vma, address, page_table);
61313 +
61314 +#ifdef CONFIG_PAX_SEGMEXEC
61315 + if (anon)
61316 + pax_mirror_anon_pte(vma, address, page, ptl);
61317 + else
61318 + pax_mirror_file_pte(vma, address, page, ptl);
61319 +#endif
61320 +
61321 } else {
61322 if (charged)
61323 mem_cgroup_uncharge_page(page);
61324 @@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
61325 if (flags & FAULT_FLAG_WRITE)
61326 flush_tlb_fix_spurious_fault(vma, address);
61327 }
61328 +
61329 +#ifdef CONFIG_PAX_SEGMEXEC
61330 + pax_mirror_pte(vma, address, pte, pmd, ptl);
61331 + return 0;
61332 +#endif
61333 +
61334 unlock:
61335 pte_unmap_unlock(pte, ptl);
61336 return 0;
61337 @@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
61338 pmd_t *pmd;
61339 pte_t *pte;
61340
61341 +#ifdef CONFIG_PAX_SEGMEXEC
61342 + struct vm_area_struct *vma_m;
61343 +#endif
61344 +
61345 __set_current_state(TASK_RUNNING);
61346
61347 count_vm_event(PGFAULT);
61348 @@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
61349 if (unlikely(is_vm_hugetlb_page(vma)))
61350 return hugetlb_fault(mm, vma, address, flags);
61351
61352 +#ifdef CONFIG_PAX_SEGMEXEC
61353 + vma_m = pax_find_mirror_vma(vma);
61354 + if (vma_m) {
61355 + unsigned long address_m;
61356 + pgd_t *pgd_m;
61357 + pud_t *pud_m;
61358 + pmd_t *pmd_m;
61359 +
61360 + if (vma->vm_start > vma_m->vm_start) {
61361 + address_m = address;
61362 + address -= SEGMEXEC_TASK_SIZE;
61363 + vma = vma_m;
61364 + } else
61365 + address_m = address + SEGMEXEC_TASK_SIZE;
61366 +
61367 + pgd_m = pgd_offset(mm, address_m);
61368 + pud_m = pud_alloc(mm, pgd_m, address_m);
61369 + if (!pud_m)
61370 + return VM_FAULT_OOM;
61371 + pmd_m = pmd_alloc(mm, pud_m, address_m);
61372 + if (!pmd_m)
61373 + return VM_FAULT_OOM;
61374 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61375 + return VM_FAULT_OOM;
61376 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61377 + }
61378 +#endif
61379 +
61380 pgd = pgd_offset(mm, address);
61381 pud = pud_alloc(mm, pgd, address);
61382 if (!pud)
61383 @@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
61384 * run pte_offset_map on the pmd, if an huge pmd could
61385 * materialize from under us from a different thread.
61386 */
61387 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61388 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61389 return VM_FAULT_OOM;
61390 /* if an huge pmd materialized from under us just retry later */
61391 if (unlikely(pmd_trans_huge(*pmd)))
61392 @@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
61393 gate_vma.vm_start = FIXADDR_USER_START;
61394 gate_vma.vm_end = FIXADDR_USER_END;
61395 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61396 - gate_vma.vm_page_prot = __P101;
61397 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61398 /*
61399 * Make sure the vDSO gets into every core dump.
61400 * Dumping its contents makes post-mortem fully interpretable later
61401 diff -urNp linux-3.0.3/mm/memory-failure.c linux-3.0.3/mm/memory-failure.c
61402 --- linux-3.0.3/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
61403 +++ linux-3.0.3/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
61404 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61405
61406 int sysctl_memory_failure_recovery __read_mostly = 1;
61407
61408 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61409 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61410
61411 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61412
61413 @@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
61414 }
61415
61416 nr_pages = 1 << compound_trans_order(hpage);
61417 - atomic_long_add(nr_pages, &mce_bad_pages);
61418 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61419
61420 /*
61421 * We need/can do nothing about count=0 pages.
61422 @@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
61423 if (!PageHWPoison(hpage)
61424 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61425 || (p != hpage && TestSetPageHWPoison(hpage))) {
61426 - atomic_long_sub(nr_pages, &mce_bad_pages);
61427 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61428 return 0;
61429 }
61430 set_page_hwpoison_huge_page(hpage);
61431 @@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
61432 }
61433 if (hwpoison_filter(p)) {
61434 if (TestClearPageHWPoison(p))
61435 - atomic_long_sub(nr_pages, &mce_bad_pages);
61436 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61437 unlock_page(hpage);
61438 put_page(hpage);
61439 return 0;
61440 @@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
61441 return 0;
61442 }
61443 if (TestClearPageHWPoison(p))
61444 - atomic_long_sub(nr_pages, &mce_bad_pages);
61445 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61446 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61447 return 0;
61448 }
61449 @@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
61450 */
61451 if (TestClearPageHWPoison(page)) {
61452 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61453 - atomic_long_sub(nr_pages, &mce_bad_pages);
61454 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61455 freeit = 1;
61456 if (PageHuge(page))
61457 clear_page_hwpoison_huge_page(page);
61458 @@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
61459 }
61460 done:
61461 if (!PageHWPoison(hpage))
61462 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61463 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61464 set_page_hwpoison_huge_page(hpage);
61465 dequeue_hwpoisoned_huge_page(hpage);
61466 /* keep elevated page count for bad page */
61467 @@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
61468 return ret;
61469
61470 done:
61471 - atomic_long_add(1, &mce_bad_pages);
61472 + atomic_long_add_unchecked(1, &mce_bad_pages);
61473 SetPageHWPoison(page);
61474 /* keep elevated page count for bad page */
61475 return ret;
61476 diff -urNp linux-3.0.3/mm/mempolicy.c linux-3.0.3/mm/mempolicy.c
61477 --- linux-3.0.3/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
61478 +++ linux-3.0.3/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
61479 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
61480 unsigned long vmstart;
61481 unsigned long vmend;
61482
61483 +#ifdef CONFIG_PAX_SEGMEXEC
61484 + struct vm_area_struct *vma_m;
61485 +#endif
61486 +
61487 vma = find_vma_prev(mm, start, &prev);
61488 if (!vma || vma->vm_start > start)
61489 return -EFAULT;
61490 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
61491 err = policy_vma(vma, new_pol);
61492 if (err)
61493 goto out;
61494 +
61495 +#ifdef CONFIG_PAX_SEGMEXEC
61496 + vma_m = pax_find_mirror_vma(vma);
61497 + if (vma_m) {
61498 + err = policy_vma(vma_m, new_pol);
61499 + if (err)
61500 + goto out;
61501 + }
61502 +#endif
61503 +
61504 }
61505
61506 out:
61507 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
61508
61509 if (end < start)
61510 return -EINVAL;
61511 +
61512 +#ifdef CONFIG_PAX_SEGMEXEC
61513 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61514 + if (end > SEGMEXEC_TASK_SIZE)
61515 + return -EINVAL;
61516 + } else
61517 +#endif
61518 +
61519 + if (end > TASK_SIZE)
61520 + return -EINVAL;
61521 +
61522 if (end == start)
61523 return 0;
61524
61525 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61526 if (!mm)
61527 goto out;
61528
61529 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61530 + if (mm != current->mm &&
61531 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61532 + err = -EPERM;
61533 + goto out;
61534 + }
61535 +#endif
61536 +
61537 /*
61538 * Check if this process has the right to modify the specified
61539 * process. The right exists if the process has administrative
61540 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61541 rcu_read_lock();
61542 tcred = __task_cred(task);
61543 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61544 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61545 - !capable(CAP_SYS_NICE)) {
61546 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61547 rcu_read_unlock();
61548 err = -EPERM;
61549 goto out;
61550 diff -urNp linux-3.0.3/mm/migrate.c linux-3.0.3/mm/migrate.c
61551 --- linux-3.0.3/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
61552 +++ linux-3.0.3/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
61553 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
61554 unsigned long chunk_start;
61555 int err;
61556
61557 + pax_track_stack();
61558 +
61559 task_nodes = cpuset_mems_allowed(task);
61560
61561 err = -ENOMEM;
61562 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61563 if (!mm)
61564 return -EINVAL;
61565
61566 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61567 + if (mm != current->mm &&
61568 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61569 + err = -EPERM;
61570 + goto out;
61571 + }
61572 +#endif
61573 +
61574 /*
61575 * Check if this process has the right to modify the specified
61576 * process. The right exists if the process has administrative
61577 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61578 rcu_read_lock();
61579 tcred = __task_cred(task);
61580 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61581 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61582 - !capable(CAP_SYS_NICE)) {
61583 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61584 rcu_read_unlock();
61585 err = -EPERM;
61586 goto out;
61587 diff -urNp linux-3.0.3/mm/mlock.c linux-3.0.3/mm/mlock.c
61588 --- linux-3.0.3/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
61589 +++ linux-3.0.3/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
61590 @@ -13,6 +13,7 @@
61591 #include <linux/pagemap.h>
61592 #include <linux/mempolicy.h>
61593 #include <linux/syscalls.h>
61594 +#include <linux/security.h>
61595 #include <linux/sched.h>
61596 #include <linux/module.h>
61597 #include <linux/rmap.h>
61598 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61599 return -EINVAL;
61600 if (end == start)
61601 return 0;
61602 + if (end > TASK_SIZE)
61603 + return -EINVAL;
61604 +
61605 vma = find_vma_prev(current->mm, start, &prev);
61606 if (!vma || vma->vm_start > start)
61607 return -ENOMEM;
61608 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61609 for (nstart = start ; ; ) {
61610 vm_flags_t newflags;
61611
61612 +#ifdef CONFIG_PAX_SEGMEXEC
61613 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61614 + break;
61615 +#endif
61616 +
61617 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61618
61619 newflags = vma->vm_flags | VM_LOCKED;
61620 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61621 lock_limit >>= PAGE_SHIFT;
61622
61623 /* check against resource limits */
61624 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61625 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61626 error = do_mlock(start, len, 1);
61627 up_write(&current->mm->mmap_sem);
61628 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61629 static int do_mlockall(int flags)
61630 {
61631 struct vm_area_struct * vma, * prev = NULL;
61632 - unsigned int def_flags = 0;
61633
61634 if (flags & MCL_FUTURE)
61635 - def_flags = VM_LOCKED;
61636 - current->mm->def_flags = def_flags;
61637 + current->mm->def_flags |= VM_LOCKED;
61638 + else
61639 + current->mm->def_flags &= ~VM_LOCKED;
61640 if (flags == MCL_FUTURE)
61641 goto out;
61642
61643 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61644 vm_flags_t newflags;
61645
61646 +#ifdef CONFIG_PAX_SEGMEXEC
61647 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61648 + break;
61649 +#endif
61650 +
61651 + BUG_ON(vma->vm_end > TASK_SIZE);
61652 newflags = vma->vm_flags | VM_LOCKED;
61653 if (!(flags & MCL_CURRENT))
61654 newflags &= ~VM_LOCKED;
61655 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61656 lock_limit >>= PAGE_SHIFT;
61657
61658 ret = -ENOMEM;
61659 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
61660 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
61661 capable(CAP_IPC_LOCK))
61662 ret = do_mlockall(flags);
61663 diff -urNp linux-3.0.3/mm/mmap.c linux-3.0.3/mm/mmap.c
61664 --- linux-3.0.3/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
61665 +++ linux-3.0.3/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
61666 @@ -46,6 +46,16 @@
61667 #define arch_rebalance_pgtables(addr, len) (addr)
61668 #endif
61669
61670 +static inline void verify_mm_writelocked(struct mm_struct *mm)
61671 +{
61672 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
61673 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
61674 + up_read(&mm->mmap_sem);
61675 + BUG();
61676 + }
61677 +#endif
61678 +}
61679 +
61680 static void unmap_region(struct mm_struct *mm,
61681 struct vm_area_struct *vma, struct vm_area_struct *prev,
61682 unsigned long start, unsigned long end);
61683 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
61684 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
61685 *
61686 */
61687 -pgprot_t protection_map[16] = {
61688 +pgprot_t protection_map[16] __read_only = {
61689 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
61690 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
61691 };
61692
61693 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
61694 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61695 {
61696 - return __pgprot(pgprot_val(protection_map[vm_flags &
61697 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
61698 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
61699 pgprot_val(arch_vm_get_page_prot(vm_flags)));
61700 +
61701 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61702 + if (!(__supported_pte_mask & _PAGE_NX) &&
61703 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
61704 + (vm_flags & (VM_READ | VM_WRITE)))
61705 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
61706 +#endif
61707 +
61708 + return prot;
61709 }
61710 EXPORT_SYMBOL(vm_get_page_prot);
61711
61712 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
61713 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
61714 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
61715 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
61716 /*
61717 * Make sure vm_committed_as in one cacheline and not cacheline shared with
61718 * other variables. It can be updated by several CPUs frequently.
61719 @@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
61720 struct vm_area_struct *next = vma->vm_next;
61721
61722 might_sleep();
61723 + BUG_ON(vma->vm_mirror);
61724 if (vma->vm_ops && vma->vm_ops->close)
61725 vma->vm_ops->close(vma);
61726 if (vma->vm_file) {
61727 @@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
61728 * not page aligned -Ram Gupta
61729 */
61730 rlim = rlimit(RLIMIT_DATA);
61731 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
61732 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
61733 (mm->end_data - mm->start_data) > rlim)
61734 goto out;
61735 @@ -697,6 +719,12 @@ static int
61736 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
61737 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61738 {
61739 +
61740 +#ifdef CONFIG_PAX_SEGMEXEC
61741 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
61742 + return 0;
61743 +#endif
61744 +
61745 if (is_mergeable_vma(vma, file, vm_flags) &&
61746 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61747 if (vma->vm_pgoff == vm_pgoff)
61748 @@ -716,6 +744,12 @@ static int
61749 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
61750 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61751 {
61752 +
61753 +#ifdef CONFIG_PAX_SEGMEXEC
61754 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
61755 + return 0;
61756 +#endif
61757 +
61758 if (is_mergeable_vma(vma, file, vm_flags) &&
61759 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61760 pgoff_t vm_pglen;
61761 @@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
61762 struct vm_area_struct *vma_merge(struct mm_struct *mm,
61763 struct vm_area_struct *prev, unsigned long addr,
61764 unsigned long end, unsigned long vm_flags,
61765 - struct anon_vma *anon_vma, struct file *file,
61766 + struct anon_vma *anon_vma, struct file *file,
61767 pgoff_t pgoff, struct mempolicy *policy)
61768 {
61769 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
61770 struct vm_area_struct *area, *next;
61771 int err;
61772
61773 +#ifdef CONFIG_PAX_SEGMEXEC
61774 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
61775 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
61776 +
61777 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
61778 +#endif
61779 +
61780 /*
61781 * We later require that vma->vm_flags == vm_flags,
61782 * so this tests vma->vm_flags & VM_SPECIAL, too.
61783 @@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
61784 if (next && next->vm_end == end) /* cases 6, 7, 8 */
61785 next = next->vm_next;
61786
61787 +#ifdef CONFIG_PAX_SEGMEXEC
61788 + if (prev)
61789 + prev_m = pax_find_mirror_vma(prev);
61790 + if (area)
61791 + area_m = pax_find_mirror_vma(area);
61792 + if (next)
61793 + next_m = pax_find_mirror_vma(next);
61794 +#endif
61795 +
61796 /*
61797 * Can it merge with the predecessor?
61798 */
61799 @@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
61800 /* cases 1, 6 */
61801 err = vma_adjust(prev, prev->vm_start,
61802 next->vm_end, prev->vm_pgoff, NULL);
61803 - } else /* cases 2, 5, 7 */
61804 +
61805 +#ifdef CONFIG_PAX_SEGMEXEC
61806 + if (!err && prev_m)
61807 + err = vma_adjust(prev_m, prev_m->vm_start,
61808 + next_m->vm_end, prev_m->vm_pgoff, NULL);
61809 +#endif
61810 +
61811 + } else { /* cases 2, 5, 7 */
61812 err = vma_adjust(prev, prev->vm_start,
61813 end, prev->vm_pgoff, NULL);
61814 +
61815 +#ifdef CONFIG_PAX_SEGMEXEC
61816 + if (!err && prev_m)
61817 + err = vma_adjust(prev_m, prev_m->vm_start,
61818 + end_m, prev_m->vm_pgoff, NULL);
61819 +#endif
61820 +
61821 + }
61822 if (err)
61823 return NULL;
61824 khugepaged_enter_vma_merge(prev);
61825 @@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
61826 mpol_equal(policy, vma_policy(next)) &&
61827 can_vma_merge_before(next, vm_flags,
61828 anon_vma, file, pgoff+pglen)) {
61829 - if (prev && addr < prev->vm_end) /* case 4 */
61830 + if (prev && addr < prev->vm_end) { /* case 4 */
61831 err = vma_adjust(prev, prev->vm_start,
61832 addr, prev->vm_pgoff, NULL);
61833 - else /* cases 3, 8 */
61834 +
61835 +#ifdef CONFIG_PAX_SEGMEXEC
61836 + if (!err && prev_m)
61837 + err = vma_adjust(prev_m, prev_m->vm_start,
61838 + addr_m, prev_m->vm_pgoff, NULL);
61839 +#endif
61840 +
61841 + } else { /* cases 3, 8 */
61842 err = vma_adjust(area, addr, next->vm_end,
61843 next->vm_pgoff - pglen, NULL);
61844 +
61845 +#ifdef CONFIG_PAX_SEGMEXEC
61846 + if (!err && area_m)
61847 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
61848 + next_m->vm_pgoff - pglen, NULL);
61849 +#endif
61850 +
61851 + }
61852 if (err)
61853 return NULL;
61854 khugepaged_enter_vma_merge(area);
61855 @@ -929,14 +1009,11 @@ none:
61856 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
61857 struct file *file, long pages)
61858 {
61859 - const unsigned long stack_flags
61860 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
61861 -
61862 if (file) {
61863 mm->shared_vm += pages;
61864 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
61865 mm->exec_vm += pages;
61866 - } else if (flags & stack_flags)
61867 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
61868 mm->stack_vm += pages;
61869 if (flags & (VM_RESERVED|VM_IO))
61870 mm->reserved_vm += pages;
61871 @@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
61872 * (the exception is when the underlying filesystem is noexec
61873 * mounted, in which case we dont add PROT_EXEC.)
61874 */
61875 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
61876 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
61877 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
61878 prot |= PROT_EXEC;
61879
61880 @@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
61881 /* Obtain the address to map to. we verify (or select) it and ensure
61882 * that it represents a valid section of the address space.
61883 */
61884 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
61885 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
61886 if (addr & ~PAGE_MASK)
61887 return addr;
61888
61889 @@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
61890 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
61891 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
61892
61893 +#ifdef CONFIG_PAX_MPROTECT
61894 + if (mm->pax_flags & MF_PAX_MPROTECT) {
61895 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
61896 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
61897 + gr_log_rwxmmap(file);
61898 +
61899 +#ifdef CONFIG_PAX_EMUPLT
61900 + vm_flags &= ~VM_EXEC;
61901 +#else
61902 + return -EPERM;
61903 +#endif
61904 +
61905 + }
61906 +
61907 + if (!(vm_flags & VM_EXEC))
61908 + vm_flags &= ~VM_MAYEXEC;
61909 +#else
61910 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
61911 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61912 +#endif
61913 + else
61914 + vm_flags &= ~VM_MAYWRITE;
61915 + }
61916 +#endif
61917 +
61918 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61919 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
61920 + vm_flags &= ~VM_PAGEEXEC;
61921 +#endif
61922 +
61923 if (flags & MAP_LOCKED)
61924 if (!can_do_mlock())
61925 return -EPERM;
61926 @@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
61927 locked += mm->locked_vm;
61928 lock_limit = rlimit(RLIMIT_MEMLOCK);
61929 lock_limit >>= PAGE_SHIFT;
61930 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
61931 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
61932 return -EAGAIN;
61933 }
61934 @@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
61935 if (error)
61936 return error;
61937
61938 + if (!gr_acl_handle_mmap(file, prot))
61939 + return -EACCES;
61940 +
61941 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
61942 }
61943 EXPORT_SYMBOL(do_mmap_pgoff);
61944 @@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
61945 vm_flags_t vm_flags = vma->vm_flags;
61946
61947 /* If it was private or non-writable, the write bit is already clear */
61948 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
61949 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
61950 return 0;
61951
61952 /* The backer wishes to know when pages are first written to? */
61953 @@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
61954 unsigned long charged = 0;
61955 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
61956
61957 +#ifdef CONFIG_PAX_SEGMEXEC
61958 + struct vm_area_struct *vma_m = NULL;
61959 +#endif
61960 +
61961 + /*
61962 + * mm->mmap_sem is required to protect against another thread
61963 + * changing the mappings in case we sleep.
61964 + */
61965 + verify_mm_writelocked(mm);
61966 +
61967 /* Clear old maps */
61968 error = -ENOMEM;
61969 -munmap_back:
61970 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
61971 if (vma && vma->vm_start < addr + len) {
61972 if (do_munmap(mm, addr, len))
61973 return -ENOMEM;
61974 - goto munmap_back;
61975 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
61976 + BUG_ON(vma && vma->vm_start < addr + len);
61977 }
61978
61979 /* Check against address space limit. */
61980 @@ -1266,6 +1387,16 @@ munmap_back:
61981 goto unacct_error;
61982 }
61983
61984 +#ifdef CONFIG_PAX_SEGMEXEC
61985 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
61986 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
61987 + if (!vma_m) {
61988 + error = -ENOMEM;
61989 + goto free_vma;
61990 + }
61991 + }
61992 +#endif
61993 +
61994 vma->vm_mm = mm;
61995 vma->vm_start = addr;
61996 vma->vm_end = addr + len;
61997 @@ -1289,6 +1420,19 @@ munmap_back:
61998 error = file->f_op->mmap(file, vma);
61999 if (error)
62000 goto unmap_and_free_vma;
62001 +
62002 +#ifdef CONFIG_PAX_SEGMEXEC
62003 + if (vma_m && (vm_flags & VM_EXECUTABLE))
62004 + added_exe_file_vma(mm);
62005 +#endif
62006 +
62007 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62008 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
62009 + vma->vm_flags |= VM_PAGEEXEC;
62010 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62011 + }
62012 +#endif
62013 +
62014 if (vm_flags & VM_EXECUTABLE)
62015 added_exe_file_vma(mm);
62016
62017 @@ -1324,6 +1468,11 @@ munmap_back:
62018 vma_link(mm, vma, prev, rb_link, rb_parent);
62019 file = vma->vm_file;
62020
62021 +#ifdef CONFIG_PAX_SEGMEXEC
62022 + if (vma_m)
62023 + BUG_ON(pax_mirror_vma(vma_m, vma));
62024 +#endif
62025 +
62026 /* Once vma denies write, undo our temporary denial count */
62027 if (correct_wcount)
62028 atomic_inc(&inode->i_writecount);
62029 @@ -1332,6 +1481,7 @@ out:
62030
62031 mm->total_vm += len >> PAGE_SHIFT;
62032 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62033 + track_exec_limit(mm, addr, addr + len, vm_flags);
62034 if (vm_flags & VM_LOCKED) {
62035 if (!mlock_vma_pages_range(vma, addr, addr + len))
62036 mm->locked_vm += (len >> PAGE_SHIFT);
62037 @@ -1349,6 +1499,12 @@ unmap_and_free_vma:
62038 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62039 charged = 0;
62040 free_vma:
62041 +
62042 +#ifdef CONFIG_PAX_SEGMEXEC
62043 + if (vma_m)
62044 + kmem_cache_free(vm_area_cachep, vma_m);
62045 +#endif
62046 +
62047 kmem_cache_free(vm_area_cachep, vma);
62048 unacct_error:
62049 if (charged)
62050 @@ -1356,6 +1512,44 @@ unacct_error:
62051 return error;
62052 }
62053
62054 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62055 +{
62056 + if (!vma) {
62057 +#ifdef CONFIG_STACK_GROWSUP
62058 + if (addr > sysctl_heap_stack_gap)
62059 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62060 + else
62061 + vma = find_vma(current->mm, 0);
62062 + if (vma && (vma->vm_flags & VM_GROWSUP))
62063 + return false;
62064 +#endif
62065 + return true;
62066 + }
62067 +
62068 + if (addr + len > vma->vm_start)
62069 + return false;
62070 +
62071 + if (vma->vm_flags & VM_GROWSDOWN)
62072 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62073 +#ifdef CONFIG_STACK_GROWSUP
62074 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62075 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62076 +#endif
62077 +
62078 + return true;
62079 +}
62080 +
62081 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62082 +{
62083 + if (vma->vm_start < len)
62084 + return -ENOMEM;
62085 + if (!(vma->vm_flags & VM_GROWSDOWN))
62086 + return vma->vm_start - len;
62087 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
62088 + return vma->vm_start - len - sysctl_heap_stack_gap;
62089 + return -ENOMEM;
62090 +}
62091 +
62092 /* Get an address range which is currently unmapped.
62093 * For shmat() with addr=0.
62094 *
62095 @@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
62096 if (flags & MAP_FIXED)
62097 return addr;
62098
62099 +#ifdef CONFIG_PAX_RANDMMAP
62100 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62101 +#endif
62102 +
62103 if (addr) {
62104 addr = PAGE_ALIGN(addr);
62105 - vma = find_vma(mm, addr);
62106 - if (TASK_SIZE - len >= addr &&
62107 - (!vma || addr + len <= vma->vm_start))
62108 - return addr;
62109 + if (TASK_SIZE - len >= addr) {
62110 + vma = find_vma(mm, addr);
62111 + if (check_heap_stack_gap(vma, addr, len))
62112 + return addr;
62113 + }
62114 }
62115 if (len > mm->cached_hole_size) {
62116 - start_addr = addr = mm->free_area_cache;
62117 + start_addr = addr = mm->free_area_cache;
62118 } else {
62119 - start_addr = addr = TASK_UNMAPPED_BASE;
62120 - mm->cached_hole_size = 0;
62121 + start_addr = addr = mm->mmap_base;
62122 + mm->cached_hole_size = 0;
62123 }
62124
62125 full_search:
62126 @@ -1404,34 +1603,40 @@ full_search:
62127 * Start a new search - just in case we missed
62128 * some holes.
62129 */
62130 - if (start_addr != TASK_UNMAPPED_BASE) {
62131 - addr = TASK_UNMAPPED_BASE;
62132 - start_addr = addr;
62133 + if (start_addr != mm->mmap_base) {
62134 + start_addr = addr = mm->mmap_base;
62135 mm->cached_hole_size = 0;
62136 goto full_search;
62137 }
62138 return -ENOMEM;
62139 }
62140 - if (!vma || addr + len <= vma->vm_start) {
62141 - /*
62142 - * Remember the place where we stopped the search:
62143 - */
62144 - mm->free_area_cache = addr + len;
62145 - return addr;
62146 - }
62147 + if (check_heap_stack_gap(vma, addr, len))
62148 + break;
62149 if (addr + mm->cached_hole_size < vma->vm_start)
62150 mm->cached_hole_size = vma->vm_start - addr;
62151 addr = vma->vm_end;
62152 }
62153 +
62154 + /*
62155 + * Remember the place where we stopped the search:
62156 + */
62157 + mm->free_area_cache = addr + len;
62158 + return addr;
62159 }
62160 #endif
62161
62162 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62163 {
62164 +
62165 +#ifdef CONFIG_PAX_SEGMEXEC
62166 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62167 + return;
62168 +#endif
62169 +
62170 /*
62171 * Is this a new hole at the lowest possible address?
62172 */
62173 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62174 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62175 mm->free_area_cache = addr;
62176 mm->cached_hole_size = ~0UL;
62177 }
62178 @@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
62179 {
62180 struct vm_area_struct *vma;
62181 struct mm_struct *mm = current->mm;
62182 - unsigned long addr = addr0;
62183 + unsigned long base = mm->mmap_base, addr = addr0;
62184
62185 /* requested length too big for entire address space */
62186 if (len > TASK_SIZE)
62187 @@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
62188 if (flags & MAP_FIXED)
62189 return addr;
62190
62191 +#ifdef CONFIG_PAX_RANDMMAP
62192 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62193 +#endif
62194 +
62195 /* requesting a specific address */
62196 if (addr) {
62197 addr = PAGE_ALIGN(addr);
62198 - vma = find_vma(mm, addr);
62199 - if (TASK_SIZE - len >= addr &&
62200 - (!vma || addr + len <= vma->vm_start))
62201 - return addr;
62202 + if (TASK_SIZE - len >= addr) {
62203 + vma = find_vma(mm, addr);
62204 + if (check_heap_stack_gap(vma, addr, len))
62205 + return addr;
62206 + }
62207 }
62208
62209 /* check if free_area_cache is useful for us */
62210 @@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
62211 /* make sure it can fit in the remaining address space */
62212 if (addr > len) {
62213 vma = find_vma(mm, addr-len);
62214 - if (!vma || addr <= vma->vm_start)
62215 + if (check_heap_stack_gap(vma, addr - len, len))
62216 /* remember the address as a hint for next time */
62217 return (mm->free_area_cache = addr-len);
62218 }
62219 @@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
62220 * return with success:
62221 */
62222 vma = find_vma(mm, addr);
62223 - if (!vma || addr+len <= vma->vm_start)
62224 + if (check_heap_stack_gap(vma, addr, len))
62225 /* remember the address as a hint for next time */
62226 return (mm->free_area_cache = addr);
62227
62228 @@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
62229 mm->cached_hole_size = vma->vm_start - addr;
62230
62231 /* try just below the current vma->vm_start */
62232 - addr = vma->vm_start-len;
62233 - } while (len < vma->vm_start);
62234 + addr = skip_heap_stack_gap(vma, len);
62235 + } while (!IS_ERR_VALUE(addr));
62236
62237 bottomup:
62238 /*
62239 @@ -1515,13 +1725,21 @@ bottomup:
62240 * can happen with large stack limits and large mmap()
62241 * allocations.
62242 */
62243 + mm->mmap_base = TASK_UNMAPPED_BASE;
62244 +
62245 +#ifdef CONFIG_PAX_RANDMMAP
62246 + if (mm->pax_flags & MF_PAX_RANDMMAP)
62247 + mm->mmap_base += mm->delta_mmap;
62248 +#endif
62249 +
62250 + mm->free_area_cache = mm->mmap_base;
62251 mm->cached_hole_size = ~0UL;
62252 - mm->free_area_cache = TASK_UNMAPPED_BASE;
62253 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62254 /*
62255 * Restore the topdown base:
62256 */
62257 - mm->free_area_cache = mm->mmap_base;
62258 + mm->mmap_base = base;
62259 + mm->free_area_cache = base;
62260 mm->cached_hole_size = ~0UL;
62261
62262 return addr;
62263 @@ -1530,6 +1748,12 @@ bottomup:
62264
62265 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62266 {
62267 +
62268 +#ifdef CONFIG_PAX_SEGMEXEC
62269 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62270 + return;
62271 +#endif
62272 +
62273 /*
62274 * Is this a new hole at the highest possible address?
62275 */
62276 @@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
62277 mm->free_area_cache = addr;
62278
62279 /* dont allow allocations above current base */
62280 - if (mm->free_area_cache > mm->mmap_base)
62281 + if (mm->free_area_cache > mm->mmap_base) {
62282 mm->free_area_cache = mm->mmap_base;
62283 + mm->cached_hole_size = ~0UL;
62284 + }
62285 }
62286
62287 unsigned long
62288 @@ -1646,6 +1872,28 @@ out:
62289 return prev ? prev->vm_next : vma;
62290 }
62291
62292 +#ifdef CONFIG_PAX_SEGMEXEC
62293 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62294 +{
62295 + struct vm_area_struct *vma_m;
62296 +
62297 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62298 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62299 + BUG_ON(vma->vm_mirror);
62300 + return NULL;
62301 + }
62302 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62303 + vma_m = vma->vm_mirror;
62304 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62305 + BUG_ON(vma->vm_file != vma_m->vm_file);
62306 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62307 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62308 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62309 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62310 + return vma_m;
62311 +}
62312 +#endif
62313 +
62314 /*
62315 * Verify that the stack growth is acceptable and
62316 * update accounting. This is shared with both the
62317 @@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
62318 return -ENOMEM;
62319
62320 /* Stack limit test */
62321 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
62322 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62323 return -ENOMEM;
62324
62325 @@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
62326 locked = mm->locked_vm + grow;
62327 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62328 limit >>= PAGE_SHIFT;
62329 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62330 if (locked > limit && !capable(CAP_IPC_LOCK))
62331 return -ENOMEM;
62332 }
62333 @@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
62334 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62335 * vma is the last one with address > vma->vm_end. Have to extend vma.
62336 */
62337 +#ifndef CONFIG_IA64
62338 +static
62339 +#endif
62340 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62341 {
62342 int error;
62343 + bool locknext;
62344
62345 if (!(vma->vm_flags & VM_GROWSUP))
62346 return -EFAULT;
62347
62348 + /* Also guard against wrapping around to address 0. */
62349 + if (address < PAGE_ALIGN(address+1))
62350 + address = PAGE_ALIGN(address+1);
62351 + else
62352 + return -ENOMEM;
62353 +
62354 /*
62355 * We must make sure the anon_vma is allocated
62356 * so that the anon_vma locking is not a noop.
62357 */
62358 if (unlikely(anon_vma_prepare(vma)))
62359 return -ENOMEM;
62360 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62361 + if (locknext && anon_vma_prepare(vma->vm_next))
62362 + return -ENOMEM;
62363 vma_lock_anon_vma(vma);
62364 + if (locknext)
62365 + vma_lock_anon_vma(vma->vm_next);
62366
62367 /*
62368 * vma->vm_start/vm_end cannot change under us because the caller
62369 * is required to hold the mmap_sem in read mode. We need the
62370 - * anon_vma lock to serialize against concurrent expand_stacks.
62371 - * Also guard against wrapping around to address 0.
62372 + * anon_vma locks to serialize against concurrent expand_stacks
62373 + * and expand_upwards.
62374 */
62375 - if (address < PAGE_ALIGN(address+4))
62376 - address = PAGE_ALIGN(address+4);
62377 - else {
62378 - vma_unlock_anon_vma(vma);
62379 - return -ENOMEM;
62380 - }
62381 error = 0;
62382
62383 /* Somebody else might have raced and expanded it already */
62384 - if (address > vma->vm_end) {
62385 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62386 + error = -ENOMEM;
62387 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62388 unsigned long size, grow;
62389
62390 size = address - vma->vm_start;
62391 @@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
62392 }
62393 }
62394 }
62395 + if (locknext)
62396 + vma_unlock_anon_vma(vma->vm_next);
62397 vma_unlock_anon_vma(vma);
62398 khugepaged_enter_vma_merge(vma);
62399 return error;
62400 @@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
62401 unsigned long address)
62402 {
62403 int error;
62404 + bool lockprev = false;
62405 + struct vm_area_struct *prev;
62406
62407 /*
62408 * We must make sure the anon_vma is allocated
62409 @@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
62410 if (error)
62411 return error;
62412
62413 + prev = vma->vm_prev;
62414 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62415 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62416 +#endif
62417 + if (lockprev && anon_vma_prepare(prev))
62418 + return -ENOMEM;
62419 + if (lockprev)
62420 + vma_lock_anon_vma(prev);
62421 +
62422 vma_lock_anon_vma(vma);
62423
62424 /*
62425 @@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
62426 */
62427
62428 /* Somebody else might have raced and expanded it already */
62429 - if (address < vma->vm_start) {
62430 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62431 + error = -ENOMEM;
62432 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62433 unsigned long size, grow;
62434
62435 +#ifdef CONFIG_PAX_SEGMEXEC
62436 + struct vm_area_struct *vma_m;
62437 +
62438 + vma_m = pax_find_mirror_vma(vma);
62439 +#endif
62440 +
62441 size = vma->vm_end - address;
62442 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62443
62444 @@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
62445 if (!error) {
62446 vma->vm_start = address;
62447 vma->vm_pgoff -= grow;
62448 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62449 +
62450 +#ifdef CONFIG_PAX_SEGMEXEC
62451 + if (vma_m) {
62452 + vma_m->vm_start -= grow << PAGE_SHIFT;
62453 + vma_m->vm_pgoff -= grow;
62454 + }
62455 +#endif
62456 +
62457 perf_event_mmap(vma);
62458 }
62459 }
62460 }
62461 vma_unlock_anon_vma(vma);
62462 + if (lockprev)
62463 + vma_unlock_anon_vma(prev);
62464 khugepaged_enter_vma_merge(vma);
62465 return error;
62466 }
62467 @@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
62468 do {
62469 long nrpages = vma_pages(vma);
62470
62471 +#ifdef CONFIG_PAX_SEGMEXEC
62472 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62473 + vma = remove_vma(vma);
62474 + continue;
62475 + }
62476 +#endif
62477 +
62478 mm->total_vm -= nrpages;
62479 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62480 vma = remove_vma(vma);
62481 @@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62482 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62483 vma->vm_prev = NULL;
62484 do {
62485 +
62486 +#ifdef CONFIG_PAX_SEGMEXEC
62487 + if (vma->vm_mirror) {
62488 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62489 + vma->vm_mirror->vm_mirror = NULL;
62490 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
62491 + vma->vm_mirror = NULL;
62492 + }
62493 +#endif
62494 +
62495 rb_erase(&vma->vm_rb, &mm->mm_rb);
62496 mm->map_count--;
62497 tail_vma = vma;
62498 @@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
62499 struct vm_area_struct *new;
62500 int err = -ENOMEM;
62501
62502 +#ifdef CONFIG_PAX_SEGMEXEC
62503 + struct vm_area_struct *vma_m, *new_m = NULL;
62504 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62505 +#endif
62506 +
62507 if (is_vm_hugetlb_page(vma) && (addr &
62508 ~(huge_page_mask(hstate_vma(vma)))))
62509 return -EINVAL;
62510
62511 +#ifdef CONFIG_PAX_SEGMEXEC
62512 + vma_m = pax_find_mirror_vma(vma);
62513 +#endif
62514 +
62515 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62516 if (!new)
62517 goto out_err;
62518
62519 +#ifdef CONFIG_PAX_SEGMEXEC
62520 + if (vma_m) {
62521 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62522 + if (!new_m) {
62523 + kmem_cache_free(vm_area_cachep, new);
62524 + goto out_err;
62525 + }
62526 + }
62527 +#endif
62528 +
62529 /* most fields are the same, copy all, and then fixup */
62530 *new = *vma;
62531
62532 @@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
62533 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62534 }
62535
62536 +#ifdef CONFIG_PAX_SEGMEXEC
62537 + if (vma_m) {
62538 + *new_m = *vma_m;
62539 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
62540 + new_m->vm_mirror = new;
62541 + new->vm_mirror = new_m;
62542 +
62543 + if (new_below)
62544 + new_m->vm_end = addr_m;
62545 + else {
62546 + new_m->vm_start = addr_m;
62547 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62548 + }
62549 + }
62550 +#endif
62551 +
62552 pol = mpol_dup(vma_policy(vma));
62553 if (IS_ERR(pol)) {
62554 err = PTR_ERR(pol);
62555 @@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
62556 else
62557 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62558
62559 +#ifdef CONFIG_PAX_SEGMEXEC
62560 + if (!err && vma_m) {
62561 + if (anon_vma_clone(new_m, vma_m))
62562 + goto out_free_mpol;
62563 +
62564 + mpol_get(pol);
62565 + vma_set_policy(new_m, pol);
62566 +
62567 + if (new_m->vm_file) {
62568 + get_file(new_m->vm_file);
62569 + if (vma_m->vm_flags & VM_EXECUTABLE)
62570 + added_exe_file_vma(mm);
62571 + }
62572 +
62573 + if (new_m->vm_ops && new_m->vm_ops->open)
62574 + new_m->vm_ops->open(new_m);
62575 +
62576 + if (new_below)
62577 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62578 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62579 + else
62580 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62581 +
62582 + if (err) {
62583 + if (new_m->vm_ops && new_m->vm_ops->close)
62584 + new_m->vm_ops->close(new_m);
62585 + if (new_m->vm_file) {
62586 + if (vma_m->vm_flags & VM_EXECUTABLE)
62587 + removed_exe_file_vma(mm);
62588 + fput(new_m->vm_file);
62589 + }
62590 + mpol_put(pol);
62591 + }
62592 + }
62593 +#endif
62594 +
62595 /* Success. */
62596 if (!err)
62597 return 0;
62598 @@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
62599 removed_exe_file_vma(mm);
62600 fput(new->vm_file);
62601 }
62602 - unlink_anon_vmas(new);
62603 out_free_mpol:
62604 mpol_put(pol);
62605 out_free_vma:
62606 +
62607 +#ifdef CONFIG_PAX_SEGMEXEC
62608 + if (new_m) {
62609 + unlink_anon_vmas(new_m);
62610 + kmem_cache_free(vm_area_cachep, new_m);
62611 + }
62612 +#endif
62613 +
62614 + unlink_anon_vmas(new);
62615 kmem_cache_free(vm_area_cachep, new);
62616 out_err:
62617 return err;
62618 @@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
62619 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62620 unsigned long addr, int new_below)
62621 {
62622 +
62623 +#ifdef CONFIG_PAX_SEGMEXEC
62624 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62625 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62626 + if (mm->map_count >= sysctl_max_map_count-1)
62627 + return -ENOMEM;
62628 + } else
62629 +#endif
62630 +
62631 if (mm->map_count >= sysctl_max_map_count)
62632 return -ENOMEM;
62633
62634 @@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
62635 * work. This now handles partial unmappings.
62636 * Jeremy Fitzhardinge <jeremy@goop.org>
62637 */
62638 +#ifdef CONFIG_PAX_SEGMEXEC
62639 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62640 {
62641 + int ret = __do_munmap(mm, start, len);
62642 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62643 + return ret;
62644 +
62645 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62646 +}
62647 +
62648 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62649 +#else
62650 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62651 +#endif
62652 +{
62653 unsigned long end;
62654 struct vm_area_struct *vma, *prev, *last;
62655
62656 + /*
62657 + * mm->mmap_sem is required to protect against another thread
62658 + * changing the mappings in case we sleep.
62659 + */
62660 + verify_mm_writelocked(mm);
62661 +
62662 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
62663 return -EINVAL;
62664
62665 @@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
62666 /* Fix up all other VM information */
62667 remove_vma_list(mm, vma);
62668
62669 + track_exec_limit(mm, start, end, 0UL);
62670 +
62671 return 0;
62672 }
62673
62674 @@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
62675
62676 profile_munmap(addr);
62677
62678 +#ifdef CONFIG_PAX_SEGMEXEC
62679 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
62680 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
62681 + return -EINVAL;
62682 +#endif
62683 +
62684 down_write(&mm->mmap_sem);
62685 ret = do_munmap(mm, addr, len);
62686 up_write(&mm->mmap_sem);
62687 return ret;
62688 }
62689
62690 -static inline void verify_mm_writelocked(struct mm_struct *mm)
62691 -{
62692 -#ifdef CONFIG_DEBUG_VM
62693 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62694 - WARN_ON(1);
62695 - up_read(&mm->mmap_sem);
62696 - }
62697 -#endif
62698 -}
62699 -
62700 /*
62701 * this is really a simplified "do_mmap". it only handles
62702 * anonymous maps. eventually we may be able to do some
62703 @@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
62704 struct rb_node ** rb_link, * rb_parent;
62705 pgoff_t pgoff = addr >> PAGE_SHIFT;
62706 int error;
62707 + unsigned long charged;
62708
62709 len = PAGE_ALIGN(len);
62710 if (!len)
62711 @@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
62712
62713 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
62714
62715 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62716 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62717 + flags &= ~VM_EXEC;
62718 +
62719 +#ifdef CONFIG_PAX_MPROTECT
62720 + if (mm->pax_flags & MF_PAX_MPROTECT)
62721 + flags &= ~VM_MAYEXEC;
62722 +#endif
62723 +
62724 + }
62725 +#endif
62726 +
62727 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
62728 if (error & ~PAGE_MASK)
62729 return error;
62730
62731 + charged = len >> PAGE_SHIFT;
62732 +
62733 /*
62734 * mlock MCL_FUTURE?
62735 */
62736 if (mm->def_flags & VM_LOCKED) {
62737 unsigned long locked, lock_limit;
62738 - locked = len >> PAGE_SHIFT;
62739 + locked = charged;
62740 locked += mm->locked_vm;
62741 lock_limit = rlimit(RLIMIT_MEMLOCK);
62742 lock_limit >>= PAGE_SHIFT;
62743 @@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
62744 /*
62745 * Clear old maps. this also does some error checking for us
62746 */
62747 - munmap_back:
62748 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62749 if (vma && vma->vm_start < addr + len) {
62750 if (do_munmap(mm, addr, len))
62751 return -ENOMEM;
62752 - goto munmap_back;
62753 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62754 + BUG_ON(vma && vma->vm_start < addr + len);
62755 }
62756
62757 /* Check against address space limits *after* clearing old maps... */
62758 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
62759 + if (!may_expand_vm(mm, charged))
62760 return -ENOMEM;
62761
62762 if (mm->map_count > sysctl_max_map_count)
62763 return -ENOMEM;
62764
62765 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
62766 + if (security_vm_enough_memory(charged))
62767 return -ENOMEM;
62768
62769 /* Can we just expand an old private anonymous mapping? */
62770 @@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
62771 */
62772 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62773 if (!vma) {
62774 - vm_unacct_memory(len >> PAGE_SHIFT);
62775 + vm_unacct_memory(charged);
62776 return -ENOMEM;
62777 }
62778
62779 @@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
62780 vma_link(mm, vma, prev, rb_link, rb_parent);
62781 out:
62782 perf_event_mmap(vma);
62783 - mm->total_vm += len >> PAGE_SHIFT;
62784 + mm->total_vm += charged;
62785 if (flags & VM_LOCKED) {
62786 if (!mlock_vma_pages_range(vma, addr, addr + len))
62787 - mm->locked_vm += (len >> PAGE_SHIFT);
62788 + mm->locked_vm += charged;
62789 }
62790 + track_exec_limit(mm, addr, addr + len, flags);
62791 return addr;
62792 }
62793
62794 @@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
62795 * Walk the list again, actually closing and freeing it,
62796 * with preemption enabled, without holding any MM locks.
62797 */
62798 - while (vma)
62799 + while (vma) {
62800 + vma->vm_mirror = NULL;
62801 vma = remove_vma(vma);
62802 + }
62803
62804 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
62805 }
62806 @@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
62807 struct vm_area_struct * __vma, * prev;
62808 struct rb_node ** rb_link, * rb_parent;
62809
62810 +#ifdef CONFIG_PAX_SEGMEXEC
62811 + struct vm_area_struct *vma_m = NULL;
62812 +#endif
62813 +
62814 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
62815 + return -EPERM;
62816 +
62817 /*
62818 * The vm_pgoff of a purely anonymous vma should be irrelevant
62819 * until its first write fault, when page's anon_vma and index
62820 @@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
62821 if ((vma->vm_flags & VM_ACCOUNT) &&
62822 security_vm_enough_memory_mm(mm, vma_pages(vma)))
62823 return -ENOMEM;
62824 +
62825 +#ifdef CONFIG_PAX_SEGMEXEC
62826 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
62827 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62828 + if (!vma_m)
62829 + return -ENOMEM;
62830 + }
62831 +#endif
62832 +
62833 vma_link(mm, vma, prev, rb_link, rb_parent);
62834 +
62835 +#ifdef CONFIG_PAX_SEGMEXEC
62836 + if (vma_m)
62837 + BUG_ON(pax_mirror_vma(vma_m, vma));
62838 +#endif
62839 +
62840 return 0;
62841 }
62842
62843 @@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
62844 struct rb_node **rb_link, *rb_parent;
62845 struct mempolicy *pol;
62846
62847 + BUG_ON(vma->vm_mirror);
62848 +
62849 /*
62850 * If anonymous vma has not yet been faulted, update new pgoff
62851 * to match new location, to increase its chance of merging.
62852 @@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
62853 return NULL;
62854 }
62855
62856 +#ifdef CONFIG_PAX_SEGMEXEC
62857 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
62858 +{
62859 + struct vm_area_struct *prev_m;
62860 + struct rb_node **rb_link_m, *rb_parent_m;
62861 + struct mempolicy *pol_m;
62862 +
62863 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
62864 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
62865 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
62866 + *vma_m = *vma;
62867 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
62868 + if (anon_vma_clone(vma_m, vma))
62869 + return -ENOMEM;
62870 + pol_m = vma_policy(vma_m);
62871 + mpol_get(pol_m);
62872 + vma_set_policy(vma_m, pol_m);
62873 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
62874 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
62875 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
62876 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
62877 + if (vma_m->vm_file)
62878 + get_file(vma_m->vm_file);
62879 + if (vma_m->vm_ops && vma_m->vm_ops->open)
62880 + vma_m->vm_ops->open(vma_m);
62881 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
62882 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
62883 + vma_m->vm_mirror = vma;
62884 + vma->vm_mirror = vma_m;
62885 + return 0;
62886 +}
62887 +#endif
62888 +
62889 /*
62890 * Return true if the calling process may expand its vm space by the passed
62891 * number of pages
62892 @@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
62893 unsigned long lim;
62894
62895 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
62896 -
62897 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
62898 if (cur + npages > lim)
62899 return 0;
62900 return 1;
62901 @@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
62902 vma->vm_start = addr;
62903 vma->vm_end = addr + len;
62904
62905 +#ifdef CONFIG_PAX_MPROTECT
62906 + if (mm->pax_flags & MF_PAX_MPROTECT) {
62907 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
62908 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
62909 + return -EPERM;
62910 + if (!(vm_flags & VM_EXEC))
62911 + vm_flags &= ~VM_MAYEXEC;
62912 +#else
62913 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62914 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62915 +#endif
62916 + else
62917 + vm_flags &= ~VM_MAYWRITE;
62918 + }
62919 +#endif
62920 +
62921 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
62922 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62923
62924 diff -urNp linux-3.0.3/mm/mprotect.c linux-3.0.3/mm/mprotect.c
62925 --- linux-3.0.3/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
62926 +++ linux-3.0.3/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
62927 @@ -23,10 +23,16 @@
62928 #include <linux/mmu_notifier.h>
62929 #include <linux/migrate.h>
62930 #include <linux/perf_event.h>
62931 +
62932 +#ifdef CONFIG_PAX_MPROTECT
62933 +#include <linux/elf.h>
62934 +#endif
62935 +
62936 #include <asm/uaccess.h>
62937 #include <asm/pgtable.h>
62938 #include <asm/cacheflush.h>
62939 #include <asm/tlbflush.h>
62940 +#include <asm/mmu_context.h>
62941
62942 #ifndef pgprot_modify
62943 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
62944 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
62945 flush_tlb_range(vma, start, end);
62946 }
62947
62948 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62949 +/* called while holding the mmap semaphor for writing except stack expansion */
62950 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
62951 +{
62952 + unsigned long oldlimit, newlimit = 0UL;
62953 +
62954 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
62955 + return;
62956 +
62957 + spin_lock(&mm->page_table_lock);
62958 + oldlimit = mm->context.user_cs_limit;
62959 + if ((prot & VM_EXEC) && oldlimit < end)
62960 + /* USER_CS limit moved up */
62961 + newlimit = end;
62962 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
62963 + /* USER_CS limit moved down */
62964 + newlimit = start;
62965 +
62966 + if (newlimit) {
62967 + mm->context.user_cs_limit = newlimit;
62968 +
62969 +#ifdef CONFIG_SMP
62970 + wmb();
62971 + cpus_clear(mm->context.cpu_user_cs_mask);
62972 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
62973 +#endif
62974 +
62975 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
62976 + }
62977 + spin_unlock(&mm->page_table_lock);
62978 + if (newlimit == end) {
62979 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
62980 +
62981 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
62982 + if (is_vm_hugetlb_page(vma))
62983 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
62984 + else
62985 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
62986 + }
62987 +}
62988 +#endif
62989 +
62990 int
62991 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
62992 unsigned long start, unsigned long end, unsigned long newflags)
62993 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
62994 int error;
62995 int dirty_accountable = 0;
62996
62997 +#ifdef CONFIG_PAX_SEGMEXEC
62998 + struct vm_area_struct *vma_m = NULL;
62999 + unsigned long start_m, end_m;
63000 +
63001 + start_m = start + SEGMEXEC_TASK_SIZE;
63002 + end_m = end + SEGMEXEC_TASK_SIZE;
63003 +#endif
63004 +
63005 if (newflags == oldflags) {
63006 *pprev = vma;
63007 return 0;
63008 }
63009
63010 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
63011 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
63012 +
63013 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
63014 + return -ENOMEM;
63015 +
63016 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
63017 + return -ENOMEM;
63018 + }
63019 +
63020 /*
63021 * If we make a private mapping writable we increase our commit;
63022 * but (without finer accounting) cannot reduce our commit if we
63023 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63024 }
63025 }
63026
63027 +#ifdef CONFIG_PAX_SEGMEXEC
63028 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63029 + if (start != vma->vm_start) {
63030 + error = split_vma(mm, vma, start, 1);
63031 + if (error)
63032 + goto fail;
63033 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63034 + *pprev = (*pprev)->vm_next;
63035 + }
63036 +
63037 + if (end != vma->vm_end) {
63038 + error = split_vma(mm, vma, end, 0);
63039 + if (error)
63040 + goto fail;
63041 + }
63042 +
63043 + if (pax_find_mirror_vma(vma)) {
63044 + error = __do_munmap(mm, start_m, end_m - start_m);
63045 + if (error)
63046 + goto fail;
63047 + } else {
63048 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63049 + if (!vma_m) {
63050 + error = -ENOMEM;
63051 + goto fail;
63052 + }
63053 + vma->vm_flags = newflags;
63054 + error = pax_mirror_vma(vma_m, vma);
63055 + if (error) {
63056 + vma->vm_flags = oldflags;
63057 + goto fail;
63058 + }
63059 + }
63060 + }
63061 +#endif
63062 +
63063 /*
63064 * First try to merge with previous and/or next vma.
63065 */
63066 @@ -204,9 +306,21 @@ success:
63067 * vm_flags and vm_page_prot are protected by the mmap_sem
63068 * held in write mode.
63069 */
63070 +
63071 +#ifdef CONFIG_PAX_SEGMEXEC
63072 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63073 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63074 +#endif
63075 +
63076 vma->vm_flags = newflags;
63077 +
63078 +#ifdef CONFIG_PAX_MPROTECT
63079 + if (mm->binfmt && mm->binfmt->handle_mprotect)
63080 + mm->binfmt->handle_mprotect(vma, newflags);
63081 +#endif
63082 +
63083 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63084 - vm_get_page_prot(newflags));
63085 + vm_get_page_prot(vma->vm_flags));
63086
63087 if (vma_wants_writenotify(vma)) {
63088 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63089 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63090 end = start + len;
63091 if (end <= start)
63092 return -ENOMEM;
63093 +
63094 +#ifdef CONFIG_PAX_SEGMEXEC
63095 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63096 + if (end > SEGMEXEC_TASK_SIZE)
63097 + return -EINVAL;
63098 + } else
63099 +#endif
63100 +
63101 + if (end > TASK_SIZE)
63102 + return -EINVAL;
63103 +
63104 if (!arch_validate_prot(prot))
63105 return -EINVAL;
63106
63107 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63108 /*
63109 * Does the application expect PROT_READ to imply PROT_EXEC:
63110 */
63111 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63112 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63113 prot |= PROT_EXEC;
63114
63115 vm_flags = calc_vm_prot_bits(prot);
63116 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63117 if (start > vma->vm_start)
63118 prev = vma;
63119
63120 +#ifdef CONFIG_PAX_MPROTECT
63121 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63122 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
63123 +#endif
63124 +
63125 for (nstart = start ; ; ) {
63126 unsigned long newflags;
63127
63128 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63129
63130 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63131 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63132 + if (prot & (PROT_WRITE | PROT_EXEC))
63133 + gr_log_rwxmprotect(vma->vm_file);
63134 +
63135 + error = -EACCES;
63136 + goto out;
63137 + }
63138 +
63139 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63140 error = -EACCES;
63141 goto out;
63142 }
63143 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63144 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63145 if (error)
63146 goto out;
63147 +
63148 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
63149 +
63150 nstart = tmp;
63151
63152 if (nstart < prev->vm_end)
63153 diff -urNp linux-3.0.3/mm/mremap.c linux-3.0.3/mm/mremap.c
63154 --- linux-3.0.3/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
63155 +++ linux-3.0.3/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
63156 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
63157 continue;
63158 pte = ptep_clear_flush(vma, old_addr, old_pte);
63159 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63160 +
63161 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63162 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63163 + pte = pte_exprotect(pte);
63164 +#endif
63165 +
63166 set_pte_at(mm, new_addr, new_pte, pte);
63167 }
63168
63169 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
63170 if (is_vm_hugetlb_page(vma))
63171 goto Einval;
63172
63173 +#ifdef CONFIG_PAX_SEGMEXEC
63174 + if (pax_find_mirror_vma(vma))
63175 + goto Einval;
63176 +#endif
63177 +
63178 /* We can't remap across vm area boundaries */
63179 if (old_len > vma->vm_end - addr)
63180 goto Efault;
63181 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
63182 unsigned long ret = -EINVAL;
63183 unsigned long charged = 0;
63184 unsigned long map_flags;
63185 + unsigned long pax_task_size = TASK_SIZE;
63186
63187 if (new_addr & ~PAGE_MASK)
63188 goto out;
63189
63190 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63191 +#ifdef CONFIG_PAX_SEGMEXEC
63192 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63193 + pax_task_size = SEGMEXEC_TASK_SIZE;
63194 +#endif
63195 +
63196 + pax_task_size -= PAGE_SIZE;
63197 +
63198 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63199 goto out;
63200
63201 /* Check if the location we're moving into overlaps the
63202 * old location at all, and fail if it does.
63203 */
63204 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
63205 - goto out;
63206 -
63207 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
63208 + if (addr + old_len > new_addr && new_addr + new_len > addr)
63209 goto out;
63210
63211 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63212 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
63213 struct vm_area_struct *vma;
63214 unsigned long ret = -EINVAL;
63215 unsigned long charged = 0;
63216 + unsigned long pax_task_size = TASK_SIZE;
63217
63218 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63219 goto out;
63220 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
63221 if (!new_len)
63222 goto out;
63223
63224 +#ifdef CONFIG_PAX_SEGMEXEC
63225 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63226 + pax_task_size = SEGMEXEC_TASK_SIZE;
63227 +#endif
63228 +
63229 + pax_task_size -= PAGE_SIZE;
63230 +
63231 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63232 + old_len > pax_task_size || addr > pax_task_size-old_len)
63233 + goto out;
63234 +
63235 if (flags & MREMAP_FIXED) {
63236 if (flags & MREMAP_MAYMOVE)
63237 ret = mremap_to(addr, old_len, new_addr, new_len);
63238 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
63239 addr + new_len);
63240 }
63241 ret = addr;
63242 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63243 goto out;
63244 }
63245 }
63246 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
63247 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63248 if (ret)
63249 goto out;
63250 +
63251 + map_flags = vma->vm_flags;
63252 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63253 + if (!(ret & ~PAGE_MASK)) {
63254 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63255 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63256 + }
63257 }
63258 out:
63259 if (ret & ~PAGE_MASK)
63260 diff -urNp linux-3.0.3/mm/nobootmem.c linux-3.0.3/mm/nobootmem.c
63261 --- linux-3.0.3/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
63262 +++ linux-3.0.3/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
63263 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63264 unsigned long __init free_all_memory_core_early(int nodeid)
63265 {
63266 int i;
63267 - u64 start, end;
63268 + u64 start, end, startrange, endrange;
63269 unsigned long count = 0;
63270 - struct range *range = NULL;
63271 + struct range *range = NULL, rangerange = { 0, 0 };
63272 int nr_range;
63273
63274 nr_range = get_free_all_memory_range(&range, nodeid);
63275 + startrange = __pa(range) >> PAGE_SHIFT;
63276 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63277
63278 for (i = 0; i < nr_range; i++) {
63279 start = range[i].start;
63280 end = range[i].end;
63281 + if (start <= endrange && startrange < end) {
63282 + BUG_ON(rangerange.start | rangerange.end);
63283 + rangerange = range[i];
63284 + continue;
63285 + }
63286 count += end - start;
63287 __free_pages_memory(start, end);
63288 }
63289 + start = rangerange.start;
63290 + end = rangerange.end;
63291 + count += end - start;
63292 + __free_pages_memory(start, end);
63293
63294 return count;
63295 }
63296 diff -urNp linux-3.0.3/mm/nommu.c linux-3.0.3/mm/nommu.c
63297 --- linux-3.0.3/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
63298 +++ linux-3.0.3/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
63299 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63300 int sysctl_overcommit_ratio = 50; /* default is 50% */
63301 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63302 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63303 -int heap_stack_gap = 0;
63304
63305 atomic_long_t mmap_pages_allocated;
63306
63307 @@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
63308 EXPORT_SYMBOL(find_vma);
63309
63310 /*
63311 - * find a VMA
63312 - * - we don't extend stack VMAs under NOMMU conditions
63313 - */
63314 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63315 -{
63316 - return find_vma(mm, addr);
63317 -}
63318 -
63319 -/*
63320 * expand a stack to a given address
63321 * - not supported under NOMMU conditions
63322 */
63323 @@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
63324
63325 /* most fields are the same, copy all, and then fixup */
63326 *new = *vma;
63327 + INIT_LIST_HEAD(&new->anon_vma_chain);
63328 *region = *vma->vm_region;
63329 new->vm_region = region;
63330
63331 diff -urNp linux-3.0.3/mm/page_alloc.c linux-3.0.3/mm/page_alloc.c
63332 --- linux-3.0.3/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
63333 +++ linux-3.0.3/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
63334 @@ -340,7 +340,7 @@ out:
63335 * This usage means that zero-order pages may not be compound.
63336 */
63337
63338 -static void free_compound_page(struct page *page)
63339 +void free_compound_page(struct page *page)
63340 {
63341 __free_pages_ok(page, compound_order(page));
63342 }
63343 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
63344 int i;
63345 int bad = 0;
63346
63347 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63348 + unsigned long index = 1UL << order;
63349 +#endif
63350 +
63351 trace_mm_page_free_direct(page, order);
63352 kmemcheck_free_shadow(page, order);
63353
63354 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
63355 debug_check_no_obj_freed(page_address(page),
63356 PAGE_SIZE << order);
63357 }
63358 +
63359 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63360 + for (; index; --index)
63361 + sanitize_highpage(page + index - 1);
63362 +#endif
63363 +
63364 arch_free_page(page, order);
63365 kernel_map_pages(page, 1 << order, 0);
63366
63367 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
63368 arch_alloc_page(page, order);
63369 kernel_map_pages(page, 1 << order, 1);
63370
63371 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
63372 if (gfp_flags & __GFP_ZERO)
63373 prep_zero_page(page, order, gfp_flags);
63374 +#endif
63375
63376 if (order && (gfp_flags & __GFP_COMP))
63377 prep_compound_page(page, order);
63378 @@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
63379 int cpu;
63380 struct zone *zone;
63381
63382 + pax_track_stack();
63383 +
63384 for_each_populated_zone(zone) {
63385 if (skip_free_areas_node(filter, zone_to_nid(zone)))
63386 continue;
63387 diff -urNp linux-3.0.3/mm/percpu.c linux-3.0.3/mm/percpu.c
63388 --- linux-3.0.3/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
63389 +++ linux-3.0.3/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
63390 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63391 static unsigned int pcpu_last_unit_cpu __read_mostly;
63392
63393 /* the address of the first chunk which starts with the kernel static area */
63394 -void *pcpu_base_addr __read_mostly;
63395 +void *pcpu_base_addr __read_only;
63396 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63397
63398 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63399 diff -urNp linux-3.0.3/mm/rmap.c linux-3.0.3/mm/rmap.c
63400 --- linux-3.0.3/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
63401 +++ linux-3.0.3/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
63402 @@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
63403 struct anon_vma *anon_vma = vma->anon_vma;
63404 struct anon_vma_chain *avc;
63405
63406 +#ifdef CONFIG_PAX_SEGMEXEC
63407 + struct anon_vma_chain *avc_m = NULL;
63408 +#endif
63409 +
63410 might_sleep();
63411 if (unlikely(!anon_vma)) {
63412 struct mm_struct *mm = vma->vm_mm;
63413 @@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
63414 if (!avc)
63415 goto out_enomem;
63416
63417 +#ifdef CONFIG_PAX_SEGMEXEC
63418 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
63419 + if (!avc_m)
63420 + goto out_enomem_free_avc;
63421 +#endif
63422 +
63423 anon_vma = find_mergeable_anon_vma(vma);
63424 allocated = NULL;
63425 if (!anon_vma) {
63426 @@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
63427 /* page_table_lock to protect against threads */
63428 spin_lock(&mm->page_table_lock);
63429 if (likely(!vma->anon_vma)) {
63430 +
63431 +#ifdef CONFIG_PAX_SEGMEXEC
63432 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63433 +
63434 + if (vma_m) {
63435 + BUG_ON(vma_m->anon_vma);
63436 + vma_m->anon_vma = anon_vma;
63437 + avc_m->anon_vma = anon_vma;
63438 + avc_m->vma = vma;
63439 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63440 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
63441 + avc_m = NULL;
63442 + }
63443 +#endif
63444 +
63445 vma->anon_vma = anon_vma;
63446 avc->anon_vma = anon_vma;
63447 avc->vma = vma;
63448 @@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
63449
63450 if (unlikely(allocated))
63451 put_anon_vma(allocated);
63452 +
63453 +#ifdef CONFIG_PAX_SEGMEXEC
63454 + if (unlikely(avc_m))
63455 + anon_vma_chain_free(avc_m);
63456 +#endif
63457 +
63458 if (unlikely(avc))
63459 anon_vma_chain_free(avc);
63460 }
63461 return 0;
63462
63463 out_enomem_free_avc:
63464 +
63465 +#ifdef CONFIG_PAX_SEGMEXEC
63466 + if (avc_m)
63467 + anon_vma_chain_free(avc_m);
63468 +#endif
63469 +
63470 anon_vma_chain_free(avc);
63471 out_enomem:
63472 return -ENOMEM;
63473 @@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
63474 * Attach the anon_vmas from src to dst.
63475 * Returns 0 on success, -ENOMEM on failure.
63476 */
63477 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63478 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63479 {
63480 struct anon_vma_chain *avc, *pavc;
63481 struct anon_vma *root = NULL;
63482 @@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
63483 * the corresponding VMA in the parent process is attached to.
63484 * Returns 0 on success, non-zero on failure.
63485 */
63486 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63487 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63488 {
63489 struct anon_vma_chain *avc;
63490 struct anon_vma *anon_vma;
63491 diff -urNp linux-3.0.3/mm/shmem.c linux-3.0.3/mm/shmem.c
63492 --- linux-3.0.3/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
63493 +++ linux-3.0.3/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
63494 @@ -31,7 +31,7 @@
63495 #include <linux/percpu_counter.h>
63496 #include <linux/swap.h>
63497
63498 -static struct vfsmount *shm_mnt;
63499 +struct vfsmount *shm_mnt;
63500
63501 #ifdef CONFIG_SHMEM
63502 /*
63503 @@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
63504 goto unlock;
63505 }
63506 entry = shmem_swp_entry(info, index, NULL);
63507 + if (!entry)
63508 + goto unlock;
63509 if (entry->val) {
63510 /*
63511 * The more uptodate page coming down from a stacked
63512 @@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
63513 struct vm_area_struct pvma;
63514 struct page *page;
63515
63516 + pax_track_stack();
63517 +
63518 spol = mpol_cond_copy(&mpol,
63519 mpol_shared_policy_lookup(&info->policy, idx));
63520
63521 @@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
63522 int err = -ENOMEM;
63523
63524 /* Round up to L1_CACHE_BYTES to resist false sharing */
63525 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63526 - L1_CACHE_BYTES), GFP_KERNEL);
63527 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63528 if (!sbinfo)
63529 return -ENOMEM;
63530
63531 diff -urNp linux-3.0.3/mm/slab.c linux-3.0.3/mm/slab.c
63532 --- linux-3.0.3/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
63533 +++ linux-3.0.3/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
63534 @@ -151,7 +151,7 @@
63535
63536 /* Legal flag mask for kmem_cache_create(). */
63537 #if DEBUG
63538 -# define CREATE_MASK (SLAB_RED_ZONE | \
63539 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63540 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63541 SLAB_CACHE_DMA | \
63542 SLAB_STORE_USER | \
63543 @@ -159,7 +159,7 @@
63544 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63545 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63546 #else
63547 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63548 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63549 SLAB_CACHE_DMA | \
63550 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63551 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63552 @@ -288,7 +288,7 @@ struct kmem_list3 {
63553 * Need this for bootstrapping a per node allocator.
63554 */
63555 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63556 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63557 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63558 #define CACHE_CACHE 0
63559 #define SIZE_AC MAX_NUMNODES
63560 #define SIZE_L3 (2 * MAX_NUMNODES)
63561 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
63562 if ((x)->max_freeable < i) \
63563 (x)->max_freeable = i; \
63564 } while (0)
63565 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63566 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63567 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63568 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63569 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63570 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63571 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63572 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63573 #else
63574 #define STATS_INC_ACTIVE(x) do { } while (0)
63575 #define STATS_DEC_ACTIVE(x) do { } while (0)
63576 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
63577 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63578 */
63579 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63580 - const struct slab *slab, void *obj)
63581 + const struct slab *slab, const void *obj)
63582 {
63583 u32 offset = (obj - slab->s_mem);
63584 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63585 @@ -564,7 +564,7 @@ struct cache_names {
63586 static struct cache_names __initdata cache_names[] = {
63587 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63588 #include <linux/kmalloc_sizes.h>
63589 - {NULL,}
63590 + {NULL}
63591 #undef CACHE
63592 };
63593
63594 @@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
63595 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63596 sizes[INDEX_AC].cs_size,
63597 ARCH_KMALLOC_MINALIGN,
63598 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63599 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63600 NULL);
63601
63602 if (INDEX_AC != INDEX_L3) {
63603 @@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
63604 kmem_cache_create(names[INDEX_L3].name,
63605 sizes[INDEX_L3].cs_size,
63606 ARCH_KMALLOC_MINALIGN,
63607 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63608 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63609 NULL);
63610 }
63611
63612 @@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
63613 sizes->cs_cachep = kmem_cache_create(names->name,
63614 sizes->cs_size,
63615 ARCH_KMALLOC_MINALIGN,
63616 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63617 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63618 NULL);
63619 }
63620 #ifdef CONFIG_ZONE_DMA
63621 @@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
63622 }
63623 /* cpu stats */
63624 {
63625 - unsigned long allochit = atomic_read(&cachep->allochit);
63626 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63627 - unsigned long freehit = atomic_read(&cachep->freehit);
63628 - unsigned long freemiss = atomic_read(&cachep->freemiss);
63629 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63630 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63631 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63632 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63633
63634 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63635 allochit, allocmiss, freehit, freemiss);
63636 @@ -4532,15 +4532,66 @@ static const struct file_operations proc
63637
63638 static int __init slab_proc_init(void)
63639 {
63640 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63641 + mode_t gr_mode = S_IRUGO;
63642 +
63643 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63644 + gr_mode = S_IRUSR;
63645 +#endif
63646 +
63647 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
63648 #ifdef CONFIG_DEBUG_SLAB_LEAK
63649 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
63650 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
63651 #endif
63652 return 0;
63653 }
63654 module_init(slab_proc_init);
63655 #endif
63656
63657 +void check_object_size(const void *ptr, unsigned long n, bool to)
63658 +{
63659 +
63660 +#ifdef CONFIG_PAX_USERCOPY
63661 + struct page *page;
63662 + struct kmem_cache *cachep = NULL;
63663 + struct slab *slabp;
63664 + unsigned int objnr;
63665 + unsigned long offset;
63666 +
63667 + if (!n)
63668 + return;
63669 +
63670 + if (ZERO_OR_NULL_PTR(ptr))
63671 + goto report;
63672 +
63673 + if (!virt_addr_valid(ptr))
63674 + return;
63675 +
63676 + page = virt_to_head_page(ptr);
63677 +
63678 + if (!PageSlab(page)) {
63679 + if (object_is_on_stack(ptr, n) == -1)
63680 + goto report;
63681 + return;
63682 + }
63683 +
63684 + cachep = page_get_cache(page);
63685 + if (!(cachep->flags & SLAB_USERCOPY))
63686 + goto report;
63687 +
63688 + slabp = page_get_slab(page);
63689 + objnr = obj_to_index(cachep, slabp, ptr);
63690 + BUG_ON(objnr >= cachep->num);
63691 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
63692 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
63693 + return;
63694 +
63695 +report:
63696 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
63697 +#endif
63698 +
63699 +}
63700 +EXPORT_SYMBOL(check_object_size);
63701 +
63702 /**
63703 * ksize - get the actual amount of memory allocated for a given object
63704 * @objp: Pointer to the object
63705 diff -urNp linux-3.0.3/mm/slob.c linux-3.0.3/mm/slob.c
63706 --- linux-3.0.3/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
63707 +++ linux-3.0.3/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
63708 @@ -29,7 +29,7 @@
63709 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
63710 * alloc_pages() directly, allocating compound pages so the page order
63711 * does not have to be separately tracked, and also stores the exact
63712 - * allocation size in page->private so that it can be used to accurately
63713 + * allocation size in slob_page->size so that it can be used to accurately
63714 * provide ksize(). These objects are detected in kfree() because slob_page()
63715 * is false for them.
63716 *
63717 @@ -58,6 +58,7 @@
63718 */
63719
63720 #include <linux/kernel.h>
63721 +#include <linux/sched.h>
63722 #include <linux/slab.h>
63723 #include <linux/mm.h>
63724 #include <linux/swap.h> /* struct reclaim_state */
63725 @@ -102,7 +103,8 @@ struct slob_page {
63726 unsigned long flags; /* mandatory */
63727 atomic_t _count; /* mandatory */
63728 slobidx_t units; /* free units left in page */
63729 - unsigned long pad[2];
63730 + unsigned long pad[1];
63731 + unsigned long size; /* size when >=PAGE_SIZE */
63732 slob_t *free; /* first free slob_t in page */
63733 struct list_head list; /* linked list of free pages */
63734 };
63735 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
63736 */
63737 static inline int is_slob_page(struct slob_page *sp)
63738 {
63739 - return PageSlab((struct page *)sp);
63740 + return PageSlab((struct page *)sp) && !sp->size;
63741 }
63742
63743 static inline void set_slob_page(struct slob_page *sp)
63744 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
63745
63746 static inline struct slob_page *slob_page(const void *addr)
63747 {
63748 - return (struct slob_page *)virt_to_page(addr);
63749 + return (struct slob_page *)virt_to_head_page(addr);
63750 }
63751
63752 /*
63753 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
63754 /*
63755 * Return the size of a slob block.
63756 */
63757 -static slobidx_t slob_units(slob_t *s)
63758 +static slobidx_t slob_units(const slob_t *s)
63759 {
63760 if (s->units > 0)
63761 return s->units;
63762 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
63763 /*
63764 * Return the next free slob block pointer after this one.
63765 */
63766 -static slob_t *slob_next(slob_t *s)
63767 +static slob_t *slob_next(const slob_t *s)
63768 {
63769 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
63770 slobidx_t next;
63771 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
63772 /*
63773 * Returns true if s is the last free block in its page.
63774 */
63775 -static int slob_last(slob_t *s)
63776 +static int slob_last(const slob_t *s)
63777 {
63778 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
63779 }
63780 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
63781 if (!page)
63782 return NULL;
63783
63784 + set_slob_page(page);
63785 return page_address(page);
63786 }
63787
63788 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
63789 if (!b)
63790 return NULL;
63791 sp = slob_page(b);
63792 - set_slob_page(sp);
63793
63794 spin_lock_irqsave(&slob_lock, flags);
63795 sp->units = SLOB_UNITS(PAGE_SIZE);
63796 sp->free = b;
63797 + sp->size = 0;
63798 INIT_LIST_HEAD(&sp->list);
63799 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
63800 set_slob_page_free(sp, slob_list);
63801 @@ -476,10 +479,9 @@ out:
63802 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
63803 */
63804
63805 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63806 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
63807 {
63808 - unsigned int *m;
63809 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63810 + slob_t *m;
63811 void *ret;
63812
63813 lockdep_trace_alloc(gfp);
63814 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
63815
63816 if (!m)
63817 return NULL;
63818 - *m = size;
63819 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
63820 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
63821 + m[0].units = size;
63822 + m[1].units = align;
63823 ret = (void *)m + align;
63824
63825 trace_kmalloc_node(_RET_IP_, ret,
63826 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
63827 gfp |= __GFP_COMP;
63828 ret = slob_new_pages(gfp, order, node);
63829 if (ret) {
63830 - struct page *page;
63831 - page = virt_to_page(ret);
63832 - page->private = size;
63833 + struct slob_page *sp;
63834 + sp = slob_page(ret);
63835 + sp->size = size;
63836 }
63837
63838 trace_kmalloc_node(_RET_IP_, ret,
63839 size, PAGE_SIZE << order, gfp, node);
63840 }
63841
63842 - kmemleak_alloc(ret, size, 1, gfp);
63843 + return ret;
63844 +}
63845 +
63846 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63847 +{
63848 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63849 + void *ret = __kmalloc_node_align(size, gfp, node, align);
63850 +
63851 + if (!ZERO_OR_NULL_PTR(ret))
63852 + kmemleak_alloc(ret, size, 1, gfp);
63853 return ret;
63854 }
63855 EXPORT_SYMBOL(__kmalloc_node);
63856 @@ -531,13 +545,88 @@ void kfree(const void *block)
63857 sp = slob_page(block);
63858 if (is_slob_page(sp)) {
63859 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63860 - unsigned int *m = (unsigned int *)(block - align);
63861 - slob_free(m, *m + align);
63862 - } else
63863 + slob_t *m = (slob_t *)(block - align);
63864 + slob_free(m, m[0].units + align);
63865 + } else {
63866 + clear_slob_page(sp);
63867 + free_slob_page(sp);
63868 + sp->size = 0;
63869 put_page(&sp->page);
63870 + }
63871 }
63872 EXPORT_SYMBOL(kfree);
63873
63874 +void check_object_size(const void *ptr, unsigned long n, bool to)
63875 +{
63876 +
63877 +#ifdef CONFIG_PAX_USERCOPY
63878 + struct slob_page *sp;
63879 + const slob_t *free;
63880 + const void *base;
63881 + unsigned long flags;
63882 +
63883 + if (!n)
63884 + return;
63885 +
63886 + if (ZERO_OR_NULL_PTR(ptr))
63887 + goto report;
63888 +
63889 + if (!virt_addr_valid(ptr))
63890 + return;
63891 +
63892 + sp = slob_page(ptr);
63893 + if (!PageSlab((struct page*)sp)) {
63894 + if (object_is_on_stack(ptr, n) == -1)
63895 + goto report;
63896 + return;
63897 + }
63898 +
63899 + if (sp->size) {
63900 + base = page_address(&sp->page);
63901 + if (base <= ptr && n <= sp->size - (ptr - base))
63902 + return;
63903 + goto report;
63904 + }
63905 +
63906 + /* some tricky double walking to find the chunk */
63907 + spin_lock_irqsave(&slob_lock, flags);
63908 + base = (void *)((unsigned long)ptr & PAGE_MASK);
63909 + free = sp->free;
63910 +
63911 + while (!slob_last(free) && (void *)free <= ptr) {
63912 + base = free + slob_units(free);
63913 + free = slob_next(free);
63914 + }
63915 +
63916 + while (base < (void *)free) {
63917 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
63918 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
63919 + int offset;
63920 +
63921 + if (ptr < base + align)
63922 + break;
63923 +
63924 + offset = ptr - base - align;
63925 + if (offset >= m) {
63926 + base += size;
63927 + continue;
63928 + }
63929 +
63930 + if (n > m - offset)
63931 + break;
63932 +
63933 + spin_unlock_irqrestore(&slob_lock, flags);
63934 + return;
63935 + }
63936 +
63937 + spin_unlock_irqrestore(&slob_lock, flags);
63938 +report:
63939 + pax_report_usercopy(ptr, n, to, NULL);
63940 +#endif
63941 +
63942 +}
63943 +EXPORT_SYMBOL(check_object_size);
63944 +
63945 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
63946 size_t ksize(const void *block)
63947 {
63948 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
63949 sp = slob_page(block);
63950 if (is_slob_page(sp)) {
63951 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63952 - unsigned int *m = (unsigned int *)(block - align);
63953 - return SLOB_UNITS(*m) * SLOB_UNIT;
63954 + slob_t *m = (slob_t *)(block - align);
63955 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
63956 } else
63957 - return sp->page.private;
63958 + return sp->size;
63959 }
63960 EXPORT_SYMBOL(ksize);
63961
63962 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
63963 {
63964 struct kmem_cache *c;
63965
63966 +#ifdef CONFIG_PAX_USERCOPY
63967 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
63968 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
63969 +#else
63970 c = slob_alloc(sizeof(struct kmem_cache),
63971 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
63972 +#endif
63973
63974 if (c) {
63975 c->name = name;
63976 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
63977 {
63978 void *b;
63979
63980 +#ifdef CONFIG_PAX_USERCOPY
63981 + b = __kmalloc_node_align(c->size, flags, node, c->align);
63982 +#else
63983 if (c->size < PAGE_SIZE) {
63984 b = slob_alloc(c->size, flags, c->align, node);
63985 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
63986 SLOB_UNITS(c->size) * SLOB_UNIT,
63987 flags, node);
63988 } else {
63989 + struct slob_page *sp;
63990 +
63991 b = slob_new_pages(flags, get_order(c->size), node);
63992 + sp = slob_page(b);
63993 + sp->size = c->size;
63994 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
63995 PAGE_SIZE << get_order(c->size),
63996 flags, node);
63997 }
63998 +#endif
63999
64000 if (c->ctor)
64001 c->ctor(b);
64002 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
64003
64004 static void __kmem_cache_free(void *b, int size)
64005 {
64006 - if (size < PAGE_SIZE)
64007 + struct slob_page *sp = slob_page(b);
64008 +
64009 + if (is_slob_page(sp))
64010 slob_free(b, size);
64011 - else
64012 + else {
64013 + clear_slob_page(sp);
64014 + free_slob_page(sp);
64015 + sp->size = 0;
64016 slob_free_pages(b, get_order(size));
64017 + }
64018 }
64019
64020 static void kmem_rcu_free(struct rcu_head *head)
64021 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64022
64023 void kmem_cache_free(struct kmem_cache *c, void *b)
64024 {
64025 + int size = c->size;
64026 +
64027 +#ifdef CONFIG_PAX_USERCOPY
64028 + if (size + c->align < PAGE_SIZE) {
64029 + size += c->align;
64030 + b -= c->align;
64031 + }
64032 +#endif
64033 +
64034 kmemleak_free_recursive(b, c->flags);
64035 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64036 struct slob_rcu *slob_rcu;
64037 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64038 - slob_rcu->size = c->size;
64039 + slob_rcu = b + (size - sizeof(struct slob_rcu));
64040 + slob_rcu->size = size;
64041 call_rcu(&slob_rcu->head, kmem_rcu_free);
64042 } else {
64043 - __kmem_cache_free(b, c->size);
64044 + __kmem_cache_free(b, size);
64045 }
64046
64047 +#ifdef CONFIG_PAX_USERCOPY
64048 + trace_kfree(_RET_IP_, b);
64049 +#else
64050 trace_kmem_cache_free(_RET_IP_, b);
64051 +#endif
64052 +
64053 }
64054 EXPORT_SYMBOL(kmem_cache_free);
64055
64056 diff -urNp linux-3.0.3/mm/slub.c linux-3.0.3/mm/slub.c
64057 --- linux-3.0.3/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
64058 +++ linux-3.0.3/mm/slub.c 2011-08-23 21:48:14.000000000 -0400
64059 @@ -442,7 +442,7 @@ static void print_track(const char *s, s
64060 if (!t->addr)
64061 return;
64062
64063 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64064 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64065 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64066 }
64067
64068 @@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
64069
64070 page = virt_to_head_page(x);
64071
64072 + BUG_ON(!PageSlab(page));
64073 +
64074 slab_free(s, page, x, _RET_IP_);
64075
64076 trace_kmem_cache_free(_RET_IP_, x);
64077 @@ -2170,7 +2172,7 @@ static int slub_min_objects;
64078 * Merge control. If this is set then no merging of slab caches will occur.
64079 * (Could be removed. This was introduced to pacify the merge skeptics.)
64080 */
64081 -static int slub_nomerge;
64082 +static int slub_nomerge = 1;
64083
64084 /*
64085 * Calculate the order of allocation given an slab object size.
64086 @@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
64087 * list to avoid pounding the page allocator excessively.
64088 */
64089 set_min_partial(s, ilog2(s->size));
64090 - s->refcount = 1;
64091 + atomic_set(&s->refcount, 1);
64092 #ifdef CONFIG_NUMA
64093 s->remote_node_defrag_ratio = 1000;
64094 #endif
64095 @@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
64096 void kmem_cache_destroy(struct kmem_cache *s)
64097 {
64098 down_write(&slub_lock);
64099 - s->refcount--;
64100 - if (!s->refcount) {
64101 + if (atomic_dec_and_test(&s->refcount)) {
64102 list_del(&s->list);
64103 if (kmem_cache_close(s)) {
64104 printk(KERN_ERR "SLUB %s: %s called for cache that "
64105 @@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
64106 EXPORT_SYMBOL(__kmalloc_node);
64107 #endif
64108
64109 +void check_object_size(const void *ptr, unsigned long n, bool to)
64110 +{
64111 +
64112 +#ifdef CONFIG_PAX_USERCOPY
64113 + struct page *page;
64114 + struct kmem_cache *s = NULL;
64115 + unsigned long offset;
64116 +
64117 + if (!n)
64118 + return;
64119 +
64120 + if (ZERO_OR_NULL_PTR(ptr))
64121 + goto report;
64122 +
64123 + if (!virt_addr_valid(ptr))
64124 + return;
64125 +
64126 + page = virt_to_head_page(ptr);
64127 +
64128 + if (!PageSlab(page)) {
64129 + if (object_is_on_stack(ptr, n) == -1)
64130 + goto report;
64131 + return;
64132 + }
64133 +
64134 + s = page->slab;
64135 + if (!(s->flags & SLAB_USERCOPY))
64136 + goto report;
64137 +
64138 + offset = (ptr - page_address(page)) % s->size;
64139 + if (offset <= s->objsize && n <= s->objsize - offset)
64140 + return;
64141 +
64142 +report:
64143 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64144 +#endif
64145 +
64146 +}
64147 +EXPORT_SYMBOL(check_object_size);
64148 +
64149 size_t ksize(const void *object)
64150 {
64151 struct page *page;
64152 @@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
64153 int node;
64154
64155 list_add(&s->list, &slab_caches);
64156 - s->refcount = -1;
64157 + atomic_set(&s->refcount, -1);
64158
64159 for_each_node_state(node, N_NORMAL_MEMORY) {
64160 struct kmem_cache_node *n = get_node(s, node);
64161 @@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
64162
64163 /* Caches that are not of the two-to-the-power-of size */
64164 if (KMALLOC_MIN_SIZE <= 32) {
64165 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64166 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64167 caches++;
64168 }
64169
64170 if (KMALLOC_MIN_SIZE <= 64) {
64171 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64172 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64173 caches++;
64174 }
64175
64176 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64177 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64178 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64179 caches++;
64180 }
64181
64182 @@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
64183 /*
64184 * We may have set a slab to be unmergeable during bootstrap.
64185 */
64186 - if (s->refcount < 0)
64187 + if (atomic_read(&s->refcount) < 0)
64188 return 1;
64189
64190 return 0;
64191 @@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
64192 down_write(&slub_lock);
64193 s = find_mergeable(size, align, flags, name, ctor);
64194 if (s) {
64195 - s->refcount++;
64196 + atomic_inc(&s->refcount);
64197 /*
64198 * Adjust the object sizes so that we clear
64199 * the complete object on kzalloc.
64200 @@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
64201 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64202
64203 if (sysfs_slab_alias(s, name)) {
64204 - s->refcount--;
64205 + atomic_dec(&s->refcount);
64206 goto err;
64207 }
64208 up_write(&slub_lock);
64209 @@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
64210
64211 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64212 {
64213 - return sprintf(buf, "%d\n", s->refcount - 1);
64214 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64215 }
64216 SLAB_ATTR_RO(aliases);
64217
64218 @@ -4894,7 +4935,13 @@ static const struct file_operations proc
64219
64220 static int __init slab_proc_init(void)
64221 {
64222 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64223 + mode_t gr_mode = S_IRUGO;
64224 +
64225 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64226 + gr_mode = S_IRUSR;
64227 +#endif
64228 +
64229 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64230 return 0;
64231 }
64232 module_init(slab_proc_init);
64233 diff -urNp linux-3.0.3/mm/swap.c linux-3.0.3/mm/swap.c
64234 --- linux-3.0.3/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
64235 +++ linux-3.0.3/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
64236 @@ -31,6 +31,7 @@
64237 #include <linux/backing-dev.h>
64238 #include <linux/memcontrol.h>
64239 #include <linux/gfp.h>
64240 +#include <linux/hugetlb.h>
64241
64242 #include "internal.h"
64243
64244 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64245
64246 __page_cache_release(page);
64247 dtor = get_compound_page_dtor(page);
64248 + if (!PageHuge(page))
64249 + BUG_ON(dtor != free_compound_page);
64250 (*dtor)(page);
64251 }
64252
64253 diff -urNp linux-3.0.3/mm/swapfile.c linux-3.0.3/mm/swapfile.c
64254 --- linux-3.0.3/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
64255 +++ linux-3.0.3/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
64256 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
64257
64258 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64259 /* Activity counter to indicate that a swapon or swapoff has occurred */
64260 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
64261 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64262
64263 static inline unsigned char swap_count(unsigned char ent)
64264 {
64265 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64266 }
64267 filp_close(swap_file, NULL);
64268 err = 0;
64269 - atomic_inc(&proc_poll_event);
64270 + atomic_inc_unchecked(&proc_poll_event);
64271 wake_up_interruptible(&proc_poll_wait);
64272
64273 out_dput:
64274 @@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
64275
64276 poll_wait(file, &proc_poll_wait, wait);
64277
64278 - if (s->event != atomic_read(&proc_poll_event)) {
64279 - s->event = atomic_read(&proc_poll_event);
64280 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64281 + s->event = atomic_read_unchecked(&proc_poll_event);
64282 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64283 }
64284
64285 @@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
64286 }
64287
64288 s->seq.private = s;
64289 - s->event = atomic_read(&proc_poll_event);
64290 + s->event = atomic_read_unchecked(&proc_poll_event);
64291 return ret;
64292 }
64293
64294 @@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64295 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64296
64297 mutex_unlock(&swapon_mutex);
64298 - atomic_inc(&proc_poll_event);
64299 + atomic_inc_unchecked(&proc_poll_event);
64300 wake_up_interruptible(&proc_poll_wait);
64301
64302 if (S_ISREG(inode->i_mode))
64303 diff -urNp linux-3.0.3/mm/util.c linux-3.0.3/mm/util.c
64304 --- linux-3.0.3/mm/util.c 2011-07-21 22:17:23.000000000 -0400
64305 +++ linux-3.0.3/mm/util.c 2011-08-23 21:47:56.000000000 -0400
64306 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
64307 * allocated buffer. Use this if you don't want to free the buffer immediately
64308 * like, for example, with RCU.
64309 */
64310 +#undef __krealloc
64311 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64312 {
64313 void *ret;
64314 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
64315 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64316 * %NULL pointer, the object pointed to is freed.
64317 */
64318 +#undef krealloc
64319 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64320 {
64321 void *ret;
64322 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
64323 void arch_pick_mmap_layout(struct mm_struct *mm)
64324 {
64325 mm->mmap_base = TASK_UNMAPPED_BASE;
64326 +
64327 +#ifdef CONFIG_PAX_RANDMMAP
64328 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64329 + mm->mmap_base += mm->delta_mmap;
64330 +#endif
64331 +
64332 mm->get_unmapped_area = arch_get_unmapped_area;
64333 mm->unmap_area = arch_unmap_area;
64334 }
64335 diff -urNp linux-3.0.3/mm/vmalloc.c linux-3.0.3/mm/vmalloc.c
64336 --- linux-3.0.3/mm/vmalloc.c 2011-08-23 21:44:40.000000000 -0400
64337 +++ linux-3.0.3/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
64338 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64339
64340 pte = pte_offset_kernel(pmd, addr);
64341 do {
64342 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64343 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64344 +
64345 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64346 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64347 + BUG_ON(!pte_exec(*pte));
64348 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64349 + continue;
64350 + }
64351 +#endif
64352 +
64353 + {
64354 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64355 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64356 + }
64357 } while (pte++, addr += PAGE_SIZE, addr != end);
64358 }
64359
64360 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64361 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64362 {
64363 pte_t *pte;
64364 + int ret = -ENOMEM;
64365
64366 /*
64367 * nr is a running index into the array which helps higher level
64368 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64369 pte = pte_alloc_kernel(pmd, addr);
64370 if (!pte)
64371 return -ENOMEM;
64372 +
64373 + pax_open_kernel();
64374 do {
64375 struct page *page = pages[*nr];
64376
64377 - if (WARN_ON(!pte_none(*pte)))
64378 - return -EBUSY;
64379 - if (WARN_ON(!page))
64380 - return -ENOMEM;
64381 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64382 + if (pgprot_val(prot) & _PAGE_NX)
64383 +#endif
64384 +
64385 + if (WARN_ON(!pte_none(*pte))) {
64386 + ret = -EBUSY;
64387 + goto out;
64388 + }
64389 + if (WARN_ON(!page)) {
64390 + ret = -ENOMEM;
64391 + goto out;
64392 + }
64393 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64394 (*nr)++;
64395 } while (pte++, addr += PAGE_SIZE, addr != end);
64396 - return 0;
64397 + ret = 0;
64398 +out:
64399 + pax_close_kernel();
64400 + return ret;
64401 }
64402
64403 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64404 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64405 * and fall back on vmalloc() if that fails. Others
64406 * just put it in the vmalloc space.
64407 */
64408 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64409 +#ifdef CONFIG_MODULES
64410 +#ifdef MODULES_VADDR
64411 unsigned long addr = (unsigned long)x;
64412 if (addr >= MODULES_VADDR && addr < MODULES_END)
64413 return 1;
64414 #endif
64415 +
64416 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64417 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64418 + return 1;
64419 +#endif
64420 +
64421 +#endif
64422 +
64423 return is_vmalloc_addr(x);
64424 }
64425
64426 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64427
64428 if (!pgd_none(*pgd)) {
64429 pud_t *pud = pud_offset(pgd, addr);
64430 +#ifdef CONFIG_X86
64431 + if (!pud_large(*pud))
64432 +#endif
64433 if (!pud_none(*pud)) {
64434 pmd_t *pmd = pmd_offset(pud, addr);
64435 +#ifdef CONFIG_X86
64436 + if (!pmd_large(*pmd))
64437 +#endif
64438 if (!pmd_none(*pmd)) {
64439 pte_t *ptep, pte;
64440
64441 @@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
64442 struct vm_struct *area;
64443
64444 BUG_ON(in_interrupt());
64445 +
64446 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64447 + if (flags & VM_KERNEXEC) {
64448 + if (start != VMALLOC_START || end != VMALLOC_END)
64449 + return NULL;
64450 + start = (unsigned long)MODULES_EXEC_VADDR;
64451 + end = (unsigned long)MODULES_EXEC_END;
64452 + }
64453 +#endif
64454 +
64455 if (flags & VM_IOREMAP) {
64456 int bit = fls(size);
64457
64458 @@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
64459 if (count > totalram_pages)
64460 return NULL;
64461
64462 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64463 + if (!(pgprot_val(prot) & _PAGE_NX))
64464 + flags |= VM_KERNEXEC;
64465 +#endif
64466 +
64467 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64468 __builtin_return_address(0));
64469 if (!area)
64470 @@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
64471 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64472 return NULL;
64473
64474 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64475 + if (!(pgprot_val(prot) & _PAGE_NX))
64476 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64477 + node, gfp_mask, caller);
64478 + else
64479 +#endif
64480 +
64481 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64482 gfp_mask, caller);
64483
64484 @@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
64485 gfp_mask, prot, node, caller);
64486 }
64487
64488 +#undef __vmalloc
64489 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64490 {
64491 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64492 @@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
64493 * For tight control over page level allocator and protection flags
64494 * use __vmalloc() instead.
64495 */
64496 +#undef vmalloc
64497 void *vmalloc(unsigned long size)
64498 {
64499 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64500 @@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
64501 * For tight control over page level allocator and protection flags
64502 * use __vmalloc() instead.
64503 */
64504 +#undef vzalloc
64505 void *vzalloc(unsigned long size)
64506 {
64507 return __vmalloc_node_flags(size, -1,
64508 @@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
64509 * The resulting memory area is zeroed so it can be mapped to userspace
64510 * without leaking data.
64511 */
64512 +#undef vmalloc_user
64513 void *vmalloc_user(unsigned long size)
64514 {
64515 struct vm_struct *area;
64516 @@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
64517 * For tight control over page level allocator and protection flags
64518 * use __vmalloc() instead.
64519 */
64520 +#undef vmalloc_node
64521 void *vmalloc_node(unsigned long size, int node)
64522 {
64523 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64524 @@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
64525 * For tight control over page level allocator and protection flags
64526 * use __vmalloc_node() instead.
64527 */
64528 +#undef vzalloc_node
64529 void *vzalloc_node(unsigned long size, int node)
64530 {
64531 return __vmalloc_node_flags(size, node,
64532 @@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
64533 * For tight control over page level allocator and protection flags
64534 * use __vmalloc() instead.
64535 */
64536 -
64537 +#undef vmalloc_exec
64538 void *vmalloc_exec(unsigned long size)
64539 {
64540 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64541 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64542 -1, __builtin_return_address(0));
64543 }
64544
64545 @@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
64546 * Allocate enough 32bit PA addressable pages to cover @size from the
64547 * page level allocator and map them into contiguous kernel virtual space.
64548 */
64549 +#undef vmalloc_32
64550 void *vmalloc_32(unsigned long size)
64551 {
64552 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64553 @@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
64554 * The resulting memory area is 32bit addressable and zeroed so it can be
64555 * mapped to userspace without leaking data.
64556 */
64557 +#undef vmalloc_32_user
64558 void *vmalloc_32_user(unsigned long size)
64559 {
64560 struct vm_struct *area;
64561 @@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
64562 unsigned long uaddr = vma->vm_start;
64563 unsigned long usize = vma->vm_end - vma->vm_start;
64564
64565 + BUG_ON(vma->vm_mirror);
64566 +
64567 if ((PAGE_SIZE-1) & (unsigned long)addr)
64568 return -EINVAL;
64569
64570 diff -urNp linux-3.0.3/mm/vmstat.c linux-3.0.3/mm/vmstat.c
64571 --- linux-3.0.3/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
64572 +++ linux-3.0.3/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
64573 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64574 *
64575 * vm_stat contains the global counters
64576 */
64577 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64578 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64579 EXPORT_SYMBOL(vm_stat);
64580
64581 #ifdef CONFIG_SMP
64582 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64583 v = p->vm_stat_diff[i];
64584 p->vm_stat_diff[i] = 0;
64585 local_irq_restore(flags);
64586 - atomic_long_add(v, &zone->vm_stat[i]);
64587 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64588 global_diff[i] += v;
64589 #ifdef CONFIG_NUMA
64590 /* 3 seconds idle till flush */
64591 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64592
64593 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64594 if (global_diff[i])
64595 - atomic_long_add(global_diff[i], &vm_stat[i]);
64596 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64597 }
64598
64599 #endif
64600 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
64601 start_cpu_timer(cpu);
64602 #endif
64603 #ifdef CONFIG_PROC_FS
64604 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64605 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64606 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64607 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64608 + {
64609 + mode_t gr_mode = S_IRUGO;
64610 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64611 + gr_mode = S_IRUSR;
64612 +#endif
64613 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64614 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64615 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64616 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64617 +#else
64618 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64619 +#endif
64620 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64621 + }
64622 #endif
64623 return 0;
64624 }
64625 diff -urNp linux-3.0.3/net/8021q/vlan.c linux-3.0.3/net/8021q/vlan.c
64626 --- linux-3.0.3/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
64627 +++ linux-3.0.3/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
64628 @@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
64629 err = -EPERM;
64630 if (!capable(CAP_NET_ADMIN))
64631 break;
64632 - if ((args.u.name_type >= 0) &&
64633 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64634 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64635 struct vlan_net *vn;
64636
64637 vn = net_generic(net, vlan_net_id);
64638 diff -urNp linux-3.0.3/net/atm/atm_misc.c linux-3.0.3/net/atm/atm_misc.c
64639 --- linux-3.0.3/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
64640 +++ linux-3.0.3/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
64641 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64642 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64643 return 1;
64644 atm_return(vcc, truesize);
64645 - atomic_inc(&vcc->stats->rx_drop);
64646 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64647 return 0;
64648 }
64649 EXPORT_SYMBOL(atm_charge);
64650 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
64651 }
64652 }
64653 atm_return(vcc, guess);
64654 - atomic_inc(&vcc->stats->rx_drop);
64655 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64656 return NULL;
64657 }
64658 EXPORT_SYMBOL(atm_alloc_charge);
64659 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
64660
64661 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64662 {
64663 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64664 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64665 __SONET_ITEMS
64666 #undef __HANDLE_ITEM
64667 }
64668 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
64669
64670 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64671 {
64672 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64673 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
64674 __SONET_ITEMS
64675 #undef __HANDLE_ITEM
64676 }
64677 diff -urNp linux-3.0.3/net/atm/lec.h linux-3.0.3/net/atm/lec.h
64678 --- linux-3.0.3/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
64679 +++ linux-3.0.3/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
64680 @@ -48,7 +48,7 @@ struct lane2_ops {
64681 const u8 *tlvs, u32 sizeoftlvs);
64682 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
64683 const u8 *tlvs, u32 sizeoftlvs);
64684 -};
64685 +} __no_const;
64686
64687 /*
64688 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
64689 diff -urNp linux-3.0.3/net/atm/mpc.h linux-3.0.3/net/atm/mpc.h
64690 --- linux-3.0.3/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
64691 +++ linux-3.0.3/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
64692 @@ -33,7 +33,7 @@ struct mpoa_client {
64693 struct mpc_parameters parameters; /* parameters for this client */
64694
64695 const struct net_device_ops *old_ops;
64696 - struct net_device_ops new_ops;
64697 + net_device_ops_no_const new_ops;
64698 };
64699
64700
64701 diff -urNp linux-3.0.3/net/atm/mpoa_caches.c linux-3.0.3/net/atm/mpoa_caches.c
64702 --- linux-3.0.3/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
64703 +++ linux-3.0.3/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
64704 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
64705 struct timeval now;
64706 struct k_message msg;
64707
64708 + pax_track_stack();
64709 +
64710 do_gettimeofday(&now);
64711
64712 read_lock_bh(&client->ingress_lock);
64713 diff -urNp linux-3.0.3/net/atm/proc.c linux-3.0.3/net/atm/proc.c
64714 --- linux-3.0.3/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
64715 +++ linux-3.0.3/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
64716 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
64717 const struct k_atm_aal_stats *stats)
64718 {
64719 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
64720 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
64721 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
64722 - atomic_read(&stats->rx_drop));
64723 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
64724 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
64725 + atomic_read_unchecked(&stats->rx_drop));
64726 }
64727
64728 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
64729 diff -urNp linux-3.0.3/net/atm/resources.c linux-3.0.3/net/atm/resources.c
64730 --- linux-3.0.3/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
64731 +++ linux-3.0.3/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
64732 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
64733 static void copy_aal_stats(struct k_atm_aal_stats *from,
64734 struct atm_aal_stats *to)
64735 {
64736 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64737 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64738 __AAL_STAT_ITEMS
64739 #undef __HANDLE_ITEM
64740 }
64741 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
64742 static void subtract_aal_stats(struct k_atm_aal_stats *from,
64743 struct atm_aal_stats *to)
64744 {
64745 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64746 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
64747 __AAL_STAT_ITEMS
64748 #undef __HANDLE_ITEM
64749 }
64750 diff -urNp linux-3.0.3/net/batman-adv/hard-interface.c linux-3.0.3/net/batman-adv/hard-interface.c
64751 --- linux-3.0.3/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
64752 +++ linux-3.0.3/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
64753 @@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
64754 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
64755 dev_add_pack(&hard_iface->batman_adv_ptype);
64756
64757 - atomic_set(&hard_iface->seqno, 1);
64758 - atomic_set(&hard_iface->frag_seqno, 1);
64759 + atomic_set_unchecked(&hard_iface->seqno, 1);
64760 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
64761 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
64762 hard_iface->net_dev->name);
64763
64764 diff -urNp linux-3.0.3/net/batman-adv/routing.c linux-3.0.3/net/batman-adv/routing.c
64765 --- linux-3.0.3/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
64766 +++ linux-3.0.3/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
64767 @@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
64768 return;
64769
64770 /* could be changed by schedule_own_packet() */
64771 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
64772 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
64773
64774 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
64775
64776 diff -urNp linux-3.0.3/net/batman-adv/send.c linux-3.0.3/net/batman-adv/send.c
64777 --- linux-3.0.3/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
64778 +++ linux-3.0.3/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
64779 @@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
64780
64781 /* change sequence number to network order */
64782 batman_packet->seqno =
64783 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
64784 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
64785
64786 if (vis_server == VIS_TYPE_SERVER_SYNC)
64787 batman_packet->flags |= VIS_SERVER;
64788 @@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
64789 else
64790 batman_packet->gw_flags = 0;
64791
64792 - atomic_inc(&hard_iface->seqno);
64793 + atomic_inc_unchecked(&hard_iface->seqno);
64794
64795 slide_own_bcast_window(hard_iface);
64796 send_time = own_send_time(bat_priv);
64797 diff -urNp linux-3.0.3/net/batman-adv/soft-interface.c linux-3.0.3/net/batman-adv/soft-interface.c
64798 --- linux-3.0.3/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
64799 +++ linux-3.0.3/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
64800 @@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
64801
64802 /* set broadcast sequence number */
64803 bcast_packet->seqno =
64804 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
64805 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
64806
64807 add_bcast_packet_to_list(bat_priv, skb);
64808
64809 @@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
64810 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
64811
64812 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
64813 - atomic_set(&bat_priv->bcast_seqno, 1);
64814 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
64815 atomic_set(&bat_priv->tt_local_changed, 0);
64816
64817 bat_priv->primary_if = NULL;
64818 diff -urNp linux-3.0.3/net/batman-adv/types.h linux-3.0.3/net/batman-adv/types.h
64819 --- linux-3.0.3/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
64820 +++ linux-3.0.3/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
64821 @@ -38,8 +38,8 @@ struct hard_iface {
64822 int16_t if_num;
64823 char if_status;
64824 struct net_device *net_dev;
64825 - atomic_t seqno;
64826 - atomic_t frag_seqno;
64827 + atomic_unchecked_t seqno;
64828 + atomic_unchecked_t frag_seqno;
64829 unsigned char *packet_buff;
64830 int packet_len;
64831 struct kobject *hardif_obj;
64832 @@ -142,7 +142,7 @@ struct bat_priv {
64833 atomic_t orig_interval; /* uint */
64834 atomic_t hop_penalty; /* uint */
64835 atomic_t log_level; /* uint */
64836 - atomic_t bcast_seqno;
64837 + atomic_unchecked_t bcast_seqno;
64838 atomic_t bcast_queue_left;
64839 atomic_t batman_queue_left;
64840 char num_ifaces;
64841 diff -urNp linux-3.0.3/net/batman-adv/unicast.c linux-3.0.3/net/batman-adv/unicast.c
64842 --- linux-3.0.3/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
64843 +++ linux-3.0.3/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
64844 @@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
64845 frag1->flags = UNI_FRAG_HEAD | large_tail;
64846 frag2->flags = large_tail;
64847
64848 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
64849 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
64850 frag1->seqno = htons(seqno - 1);
64851 frag2->seqno = htons(seqno);
64852
64853 diff -urNp linux-3.0.3/net/bridge/br_multicast.c linux-3.0.3/net/bridge/br_multicast.c
64854 --- linux-3.0.3/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
64855 +++ linux-3.0.3/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
64856 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
64857 nexthdr = ip6h->nexthdr;
64858 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
64859
64860 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
64861 + if (nexthdr != IPPROTO_ICMPV6)
64862 return 0;
64863
64864 /* Okay, we found ICMPv6 header */
64865 diff -urNp linux-3.0.3/net/bridge/netfilter/ebtables.c linux-3.0.3/net/bridge/netfilter/ebtables.c
64866 --- linux-3.0.3/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
64867 +++ linux-3.0.3/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
64868 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
64869 tmp.valid_hooks = t->table->valid_hooks;
64870 }
64871 mutex_unlock(&ebt_mutex);
64872 - if (copy_to_user(user, &tmp, *len) != 0){
64873 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
64874 BUGPRINT("c2u Didn't work\n");
64875 ret = -EFAULT;
64876 break;
64877 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
64878 int ret;
64879 void __user *pos;
64880
64881 + pax_track_stack();
64882 +
64883 memset(&tinfo, 0, sizeof(tinfo));
64884
64885 if (cmd == EBT_SO_GET_ENTRIES) {
64886 diff -urNp linux-3.0.3/net/caif/caif_socket.c linux-3.0.3/net/caif/caif_socket.c
64887 --- linux-3.0.3/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
64888 +++ linux-3.0.3/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
64889 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
64890 #ifdef CONFIG_DEBUG_FS
64891 struct debug_fs_counter {
64892 atomic_t caif_nr_socks;
64893 - atomic_t caif_sock_create;
64894 - atomic_t num_connect_req;
64895 - atomic_t num_connect_resp;
64896 - atomic_t num_connect_fail_resp;
64897 - atomic_t num_disconnect;
64898 - atomic_t num_remote_shutdown_ind;
64899 - atomic_t num_tx_flow_off_ind;
64900 - atomic_t num_tx_flow_on_ind;
64901 - atomic_t num_rx_flow_off;
64902 - atomic_t num_rx_flow_on;
64903 + atomic_unchecked_t caif_sock_create;
64904 + atomic_unchecked_t num_connect_req;
64905 + atomic_unchecked_t num_connect_resp;
64906 + atomic_unchecked_t num_connect_fail_resp;
64907 + atomic_unchecked_t num_disconnect;
64908 + atomic_unchecked_t num_remote_shutdown_ind;
64909 + atomic_unchecked_t num_tx_flow_off_ind;
64910 + atomic_unchecked_t num_tx_flow_on_ind;
64911 + atomic_unchecked_t num_rx_flow_off;
64912 + atomic_unchecked_t num_rx_flow_on;
64913 };
64914 static struct debug_fs_counter cnt;
64915 #define dbfs_atomic_inc(v) atomic_inc_return(v)
64916 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
64917 #define dbfs_atomic_dec(v) atomic_dec_return(v)
64918 #else
64919 #define dbfs_atomic_inc(v) 0
64920 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
64921 atomic_read(&cf_sk->sk.sk_rmem_alloc),
64922 sk_rcvbuf_lowwater(cf_sk));
64923 set_rx_flow_off(cf_sk);
64924 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
64925 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64926 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64927 }
64928
64929 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
64930 set_rx_flow_off(cf_sk);
64931 if (net_ratelimit())
64932 pr_debug("sending flow OFF due to rmem_schedule\n");
64933 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
64934 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64935 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64936 }
64937 skb->dev = NULL;
64938 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
64939 switch (flow) {
64940 case CAIF_CTRLCMD_FLOW_ON_IND:
64941 /* OK from modem to start sending again */
64942 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
64943 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
64944 set_tx_flow_on(cf_sk);
64945 cf_sk->sk.sk_state_change(&cf_sk->sk);
64946 break;
64947
64948 case CAIF_CTRLCMD_FLOW_OFF_IND:
64949 /* Modem asks us to shut up */
64950 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
64951 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
64952 set_tx_flow_off(cf_sk);
64953 cf_sk->sk.sk_state_change(&cf_sk->sk);
64954 break;
64955 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
64956 /* We're now connected */
64957 caif_client_register_refcnt(&cf_sk->layer,
64958 cfsk_hold, cfsk_put);
64959 - dbfs_atomic_inc(&cnt.num_connect_resp);
64960 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
64961 cf_sk->sk.sk_state = CAIF_CONNECTED;
64962 set_tx_flow_on(cf_sk);
64963 cf_sk->sk.sk_state_change(&cf_sk->sk);
64964 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
64965
64966 case CAIF_CTRLCMD_INIT_FAIL_RSP:
64967 /* Connect request failed */
64968 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
64969 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
64970 cf_sk->sk.sk_err = ECONNREFUSED;
64971 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
64972 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
64973 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
64974
64975 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
64976 /* Modem has closed this connection, or device is down. */
64977 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
64978 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
64979 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
64980 cf_sk->sk.sk_err = ECONNRESET;
64981 set_rx_flow_on(cf_sk);
64982 @@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
64983 return;
64984
64985 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
64986 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
64987 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
64988 set_rx_flow_on(cf_sk);
64989 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
64990 }
64991 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
64992 /*ifindex = id of the interface.*/
64993 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
64994
64995 - dbfs_atomic_inc(&cnt.num_connect_req);
64996 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
64997 cf_sk->layer.receive = caif_sktrecv_cb;
64998
64999 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
65000 @@ -943,7 +944,7 @@ static int caif_release(struct socket *s
65001 spin_unlock_bh(&sk->sk_receive_queue.lock);
65002 sock->sk = NULL;
65003
65004 - dbfs_atomic_inc(&cnt.num_disconnect);
65005 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
65006
65007 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
65008 if (cf_sk->debugfs_socket_dir != NULL)
65009 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
65010 cf_sk->conn_req.protocol = protocol;
65011 /* Increase the number of sockets created. */
65012 dbfs_atomic_inc(&cnt.caif_nr_socks);
65013 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
65014 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
65015 #ifdef CONFIG_DEBUG_FS
65016 if (!IS_ERR(debugfsdir)) {
65017
65018 diff -urNp linux-3.0.3/net/caif/cfctrl.c linux-3.0.3/net/caif/cfctrl.c
65019 --- linux-3.0.3/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
65020 +++ linux-3.0.3/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
65021 @@ -9,6 +9,7 @@
65022 #include <linux/stddef.h>
65023 #include <linux/spinlock.h>
65024 #include <linux/slab.h>
65025 +#include <linux/sched.h>
65026 #include <net/caif/caif_layer.h>
65027 #include <net/caif/cfpkt.h>
65028 #include <net/caif/cfctrl.h>
65029 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
65030 dev_info.id = 0xff;
65031 memset(this, 0, sizeof(*this));
65032 cfsrvl_init(&this->serv, 0, &dev_info, false);
65033 - atomic_set(&this->req_seq_no, 1);
65034 - atomic_set(&this->rsp_seq_no, 1);
65035 + atomic_set_unchecked(&this->req_seq_no, 1);
65036 + atomic_set_unchecked(&this->rsp_seq_no, 1);
65037 this->serv.layer.receive = cfctrl_recv;
65038 sprintf(this->serv.layer.name, "ctrl");
65039 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65040 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
65041 struct cfctrl_request_info *req)
65042 {
65043 spin_lock_bh(&ctrl->info_list_lock);
65044 - atomic_inc(&ctrl->req_seq_no);
65045 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
65046 + atomic_inc_unchecked(&ctrl->req_seq_no);
65047 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65048 list_add_tail(&req->list, &ctrl->list);
65049 spin_unlock_bh(&ctrl->info_list_lock);
65050 }
65051 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
65052 if (p != first)
65053 pr_warn("Requests are not received in order\n");
65054
65055 - atomic_set(&ctrl->rsp_seq_no,
65056 + atomic_set_unchecked(&ctrl->rsp_seq_no,
65057 p->sequence_no);
65058 list_del(&p->list);
65059 goto out;
65060 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
65061 struct cfctrl *cfctrl = container_obj(layer);
65062 struct cfctrl_request_info rsp, *req;
65063
65064 + pax_track_stack();
65065
65066 cfpkt_extr_head(pkt, &cmdrsp, 1);
65067 cmd = cmdrsp & CFCTRL_CMD_MASK;
65068 diff -urNp linux-3.0.3/net/core/datagram.c linux-3.0.3/net/core/datagram.c
65069 --- linux-3.0.3/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
65070 +++ linux-3.0.3/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
65071 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65072 }
65073
65074 kfree_skb(skb);
65075 - atomic_inc(&sk->sk_drops);
65076 + atomic_inc_unchecked(&sk->sk_drops);
65077 sk_mem_reclaim_partial(sk);
65078
65079 return err;
65080 diff -urNp linux-3.0.3/net/core/dev.c linux-3.0.3/net/core/dev.c
65081 --- linux-3.0.3/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
65082 +++ linux-3.0.3/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
65083 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65084 if (no_module && capable(CAP_NET_ADMIN))
65085 no_module = request_module("netdev-%s", name);
65086 if (no_module && capable(CAP_SYS_MODULE)) {
65087 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65088 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
65089 +#else
65090 if (!request_module("%s", name))
65091 pr_err("Loading kernel module for a network device "
65092 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65093 "instead\n", name);
65094 +#endif
65095 }
65096 }
65097 EXPORT_SYMBOL(dev_load);
65098 @@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
65099
65100 struct dev_gso_cb {
65101 void (*destructor)(struct sk_buff *skb);
65102 -};
65103 +} __no_const;
65104
65105 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65106
65107 @@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
65108 }
65109 EXPORT_SYMBOL(netif_rx_ni);
65110
65111 -static void net_tx_action(struct softirq_action *h)
65112 +static void net_tx_action(void)
65113 {
65114 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65115
65116 @@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
65117 }
65118 EXPORT_SYMBOL(netif_napi_del);
65119
65120 -static void net_rx_action(struct softirq_action *h)
65121 +static void net_rx_action(void)
65122 {
65123 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65124 unsigned long time_limit = jiffies + 2;
65125 diff -urNp linux-3.0.3/net/core/flow.c linux-3.0.3/net/core/flow.c
65126 --- linux-3.0.3/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
65127 +++ linux-3.0.3/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
65128 @@ -60,7 +60,7 @@ struct flow_cache {
65129 struct timer_list rnd_timer;
65130 };
65131
65132 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
65133 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65134 EXPORT_SYMBOL(flow_cache_genid);
65135 static struct flow_cache flow_cache_global;
65136 static struct kmem_cache *flow_cachep __read_mostly;
65137 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65138
65139 static int flow_entry_valid(struct flow_cache_entry *fle)
65140 {
65141 - if (atomic_read(&flow_cache_genid) != fle->genid)
65142 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65143 return 0;
65144 if (fle->object && !fle->object->ops->check(fle->object))
65145 return 0;
65146 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65147 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65148 fcp->hash_count++;
65149 }
65150 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65151 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65152 flo = fle->object;
65153 if (!flo)
65154 goto ret_object;
65155 @@ -274,7 +274,7 @@ nocache:
65156 }
65157 flo = resolver(net, key, family, dir, flo, ctx);
65158 if (fle) {
65159 - fle->genid = atomic_read(&flow_cache_genid);
65160 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
65161 if (!IS_ERR(flo))
65162 fle->object = flo;
65163 else
65164 diff -urNp linux-3.0.3/net/core/rtnetlink.c linux-3.0.3/net/core/rtnetlink.c
65165 --- linux-3.0.3/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
65166 +++ linux-3.0.3/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
65167 @@ -56,7 +56,7 @@
65168 struct rtnl_link {
65169 rtnl_doit_func doit;
65170 rtnl_dumpit_func dumpit;
65171 -};
65172 +} __no_const;
65173
65174 static DEFINE_MUTEX(rtnl_mutex);
65175
65176 diff -urNp linux-3.0.3/net/core/skbuff.c linux-3.0.3/net/core/skbuff.c
65177 --- linux-3.0.3/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
65178 +++ linux-3.0.3/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
65179 @@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
65180 struct sock *sk = skb->sk;
65181 int ret = 0;
65182
65183 + pax_track_stack();
65184 +
65185 if (splice_grow_spd(pipe, &spd))
65186 return -ENOMEM;
65187
65188 diff -urNp linux-3.0.3/net/core/sock.c linux-3.0.3/net/core/sock.c
65189 --- linux-3.0.3/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
65190 +++ linux-3.0.3/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
65191 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65192 */
65193 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65194 (unsigned)sk->sk_rcvbuf) {
65195 - atomic_inc(&sk->sk_drops);
65196 + atomic_inc_unchecked(&sk->sk_drops);
65197 return -ENOMEM;
65198 }
65199
65200 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65201 return err;
65202
65203 if (!sk_rmem_schedule(sk, skb->truesize)) {
65204 - atomic_inc(&sk->sk_drops);
65205 + atomic_inc_unchecked(&sk->sk_drops);
65206 return -ENOBUFS;
65207 }
65208
65209 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65210 skb_dst_force(skb);
65211
65212 spin_lock_irqsave(&list->lock, flags);
65213 - skb->dropcount = atomic_read(&sk->sk_drops);
65214 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65215 __skb_queue_tail(list, skb);
65216 spin_unlock_irqrestore(&list->lock, flags);
65217
65218 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65219 skb->dev = NULL;
65220
65221 if (sk_rcvqueues_full(sk, skb)) {
65222 - atomic_inc(&sk->sk_drops);
65223 + atomic_inc_unchecked(&sk->sk_drops);
65224 goto discard_and_relse;
65225 }
65226 if (nested)
65227 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65228 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65229 } else if (sk_add_backlog(sk, skb)) {
65230 bh_unlock_sock(sk);
65231 - atomic_inc(&sk->sk_drops);
65232 + atomic_inc_unchecked(&sk->sk_drops);
65233 goto discard_and_relse;
65234 }
65235
65236 @@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
65237 if (len > sizeof(peercred))
65238 len = sizeof(peercred);
65239 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
65240 - if (copy_to_user(optval, &peercred, len))
65241 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
65242 return -EFAULT;
65243 goto lenout;
65244 }
65245 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65246 return -ENOTCONN;
65247 if (lv < len)
65248 return -EINVAL;
65249 - if (copy_to_user(optval, address, len))
65250 + if (len > sizeof(address) || copy_to_user(optval, address, len))
65251 return -EFAULT;
65252 goto lenout;
65253 }
65254 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65255
65256 if (len > lv)
65257 len = lv;
65258 - if (copy_to_user(optval, &v, len))
65259 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
65260 return -EFAULT;
65261 lenout:
65262 if (put_user(len, optlen))
65263 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65264 */
65265 smp_wmb();
65266 atomic_set(&sk->sk_refcnt, 1);
65267 - atomic_set(&sk->sk_drops, 0);
65268 + atomic_set_unchecked(&sk->sk_drops, 0);
65269 }
65270 EXPORT_SYMBOL(sock_init_data);
65271
65272 diff -urNp linux-3.0.3/net/decnet/sysctl_net_decnet.c linux-3.0.3/net/decnet/sysctl_net_decnet.c
65273 --- linux-3.0.3/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
65274 +++ linux-3.0.3/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
65275 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65276
65277 if (len > *lenp) len = *lenp;
65278
65279 - if (copy_to_user(buffer, addr, len))
65280 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
65281 return -EFAULT;
65282
65283 *lenp = len;
65284 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65285
65286 if (len > *lenp) len = *lenp;
65287
65288 - if (copy_to_user(buffer, devname, len))
65289 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
65290 return -EFAULT;
65291
65292 *lenp = len;
65293 diff -urNp linux-3.0.3/net/econet/Kconfig linux-3.0.3/net/econet/Kconfig
65294 --- linux-3.0.3/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
65295 +++ linux-3.0.3/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
65296 @@ -4,7 +4,7 @@
65297
65298 config ECONET
65299 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65300 - depends on EXPERIMENTAL && INET
65301 + depends on EXPERIMENTAL && INET && BROKEN
65302 ---help---
65303 Econet is a fairly old and slow networking protocol mainly used by
65304 Acorn computers to access file and print servers. It uses native
65305 diff -urNp linux-3.0.3/net/ipv4/fib_frontend.c linux-3.0.3/net/ipv4/fib_frontend.c
65306 --- linux-3.0.3/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
65307 +++ linux-3.0.3/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
65308 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
65309 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65310 fib_sync_up(dev);
65311 #endif
65312 - atomic_inc(&net->ipv4.dev_addr_genid);
65313 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65314 rt_cache_flush(dev_net(dev), -1);
65315 break;
65316 case NETDEV_DOWN:
65317 fib_del_ifaddr(ifa, NULL);
65318 - atomic_inc(&net->ipv4.dev_addr_genid);
65319 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65320 if (ifa->ifa_dev->ifa_list == NULL) {
65321 /* Last address was deleted from this interface.
65322 * Disable IP.
65323 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
65324 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65325 fib_sync_up(dev);
65326 #endif
65327 - atomic_inc(&net->ipv4.dev_addr_genid);
65328 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65329 rt_cache_flush(dev_net(dev), -1);
65330 break;
65331 case NETDEV_DOWN:
65332 diff -urNp linux-3.0.3/net/ipv4/fib_semantics.c linux-3.0.3/net/ipv4/fib_semantics.c
65333 --- linux-3.0.3/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
65334 +++ linux-3.0.3/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
65335 @@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
65336 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65337 nh->nh_gw,
65338 nh->nh_parent->fib_scope);
65339 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65340 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65341
65342 return nh->nh_saddr;
65343 }
65344 diff -urNp linux-3.0.3/net/ipv4/inet_diag.c linux-3.0.3/net/ipv4/inet_diag.c
65345 --- linux-3.0.3/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
65346 +++ linux-3.0.3/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
65347 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65348 r->idiag_retrans = 0;
65349
65350 r->id.idiag_if = sk->sk_bound_dev_if;
65351 +
65352 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65353 + r->id.idiag_cookie[0] = 0;
65354 + r->id.idiag_cookie[1] = 0;
65355 +#else
65356 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65357 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65358 +#endif
65359
65360 r->id.idiag_sport = inet->inet_sport;
65361 r->id.idiag_dport = inet->inet_dport;
65362 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65363 r->idiag_family = tw->tw_family;
65364 r->idiag_retrans = 0;
65365 r->id.idiag_if = tw->tw_bound_dev_if;
65366 +
65367 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65368 + r->id.idiag_cookie[0] = 0;
65369 + r->id.idiag_cookie[1] = 0;
65370 +#else
65371 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65372 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65373 +#endif
65374 +
65375 r->id.idiag_sport = tw->tw_sport;
65376 r->id.idiag_dport = tw->tw_dport;
65377 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65378 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65379 if (sk == NULL)
65380 goto unlock;
65381
65382 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65383 err = -ESTALE;
65384 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65385 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65386 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65387 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65388 goto out;
65389 +#endif
65390
65391 err = -ENOMEM;
65392 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65393 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65394 r->idiag_retrans = req->retrans;
65395
65396 r->id.idiag_if = sk->sk_bound_dev_if;
65397 +
65398 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65399 + r->id.idiag_cookie[0] = 0;
65400 + r->id.idiag_cookie[1] = 0;
65401 +#else
65402 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65403 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65404 +#endif
65405
65406 tmo = req->expires - jiffies;
65407 if (tmo < 0)
65408 diff -urNp linux-3.0.3/net/ipv4/inet_hashtables.c linux-3.0.3/net/ipv4/inet_hashtables.c
65409 --- linux-3.0.3/net/ipv4/inet_hashtables.c 2011-08-23 21:44:40.000000000 -0400
65410 +++ linux-3.0.3/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
65411 @@ -18,12 +18,15 @@
65412 #include <linux/sched.h>
65413 #include <linux/slab.h>
65414 #include <linux/wait.h>
65415 +#include <linux/security.h>
65416
65417 #include <net/inet_connection_sock.h>
65418 #include <net/inet_hashtables.h>
65419 #include <net/secure_seq.h>
65420 #include <net/ip.h>
65421
65422 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65423 +
65424 /*
65425 * Allocate and initialize a new local port bind bucket.
65426 * The bindhash mutex for snum's hash chain must be held here.
65427 @@ -530,6 +533,8 @@ ok:
65428 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65429 spin_unlock(&head->lock);
65430
65431 + gr_update_task_in_ip_table(current, inet_sk(sk));
65432 +
65433 if (tw) {
65434 inet_twsk_deschedule(tw, death_row);
65435 while (twrefcnt) {
65436 diff -urNp linux-3.0.3/net/ipv4/inetpeer.c linux-3.0.3/net/ipv4/inetpeer.c
65437 --- linux-3.0.3/net/ipv4/inetpeer.c 2011-08-23 21:44:40.000000000 -0400
65438 +++ linux-3.0.3/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
65439 @@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
65440 unsigned int sequence;
65441 int invalidated, newrefcnt = 0;
65442
65443 + pax_track_stack();
65444 +
65445 /* Look up for the address quickly, lockless.
65446 * Because of a concurrent writer, we might not find an existing entry.
65447 */
65448 @@ -517,8 +519,8 @@ found: /* The existing node has been fo
65449 if (p) {
65450 p->daddr = *daddr;
65451 atomic_set(&p->refcnt, 1);
65452 - atomic_set(&p->rid, 0);
65453 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65454 + atomic_set_unchecked(&p->rid, 0);
65455 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65456 p->tcp_ts_stamp = 0;
65457 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65458 p->rate_tokens = 0;
65459 diff -urNp linux-3.0.3/net/ipv4/ip_fragment.c linux-3.0.3/net/ipv4/ip_fragment.c
65460 --- linux-3.0.3/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
65461 +++ linux-3.0.3/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
65462 @@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
65463 return 0;
65464
65465 start = qp->rid;
65466 - end = atomic_inc_return(&peer->rid);
65467 + end = atomic_inc_return_unchecked(&peer->rid);
65468 qp->rid = end;
65469
65470 rc = qp->q.fragments && (end - start) > max;
65471 diff -urNp linux-3.0.3/net/ipv4/ip_sockglue.c linux-3.0.3/net/ipv4/ip_sockglue.c
65472 --- linux-3.0.3/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65473 +++ linux-3.0.3/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65474 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
65475 int val;
65476 int len;
65477
65478 + pax_track_stack();
65479 +
65480 if (level != SOL_IP)
65481 return -EOPNOTSUPP;
65482
65483 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
65484 len = min_t(unsigned int, len, opt->optlen);
65485 if (put_user(len, optlen))
65486 return -EFAULT;
65487 - if (copy_to_user(optval, opt->__data, len))
65488 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
65489 + copy_to_user(optval, opt->__data, len))
65490 return -EFAULT;
65491 return 0;
65492 }
65493 diff -urNp linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c
65494 --- linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
65495 +++ linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
65496 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65497
65498 *len = 0;
65499
65500 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65501 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65502 if (*octets == NULL) {
65503 if (net_ratelimit())
65504 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65505 diff -urNp linux-3.0.3/net/ipv4/ping.c linux-3.0.3/net/ipv4/ping.c
65506 --- linux-3.0.3/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
65507 +++ linux-3.0.3/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
65508 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
65509 sk_rmem_alloc_get(sp),
65510 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65511 atomic_read(&sp->sk_refcnt), sp,
65512 - atomic_read(&sp->sk_drops), len);
65513 + atomic_read_unchecked(&sp->sk_drops), len);
65514 }
65515
65516 static int ping_seq_show(struct seq_file *seq, void *v)
65517 diff -urNp linux-3.0.3/net/ipv4/raw.c linux-3.0.3/net/ipv4/raw.c
65518 --- linux-3.0.3/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
65519 +++ linux-3.0.3/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
65520 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65521 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65522 {
65523 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65524 - atomic_inc(&sk->sk_drops);
65525 + atomic_inc_unchecked(&sk->sk_drops);
65526 kfree_skb(skb);
65527 return NET_RX_DROP;
65528 }
65529 @@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
65530
65531 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65532 {
65533 + struct icmp_filter filter;
65534 +
65535 if (optlen > sizeof(struct icmp_filter))
65536 optlen = sizeof(struct icmp_filter);
65537 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65538 + if (copy_from_user(&filter, optval, optlen))
65539 return -EFAULT;
65540 + raw_sk(sk)->filter = filter;
65541 return 0;
65542 }
65543
65544 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65545 {
65546 int len, ret = -EFAULT;
65547 + struct icmp_filter filter;
65548
65549 if (get_user(len, optlen))
65550 goto out;
65551 @@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
65552 if (len > sizeof(struct icmp_filter))
65553 len = sizeof(struct icmp_filter);
65554 ret = -EFAULT;
65555 - if (put_user(len, optlen) ||
65556 - copy_to_user(optval, &raw_sk(sk)->filter, len))
65557 + filter = raw_sk(sk)->filter;
65558 + if (put_user(len, optlen) || len > sizeof filter ||
65559 + copy_to_user(optval, &filter, len))
65560 goto out;
65561 ret = 0;
65562 out: return ret;
65563 @@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
65564 sk_wmem_alloc_get(sp),
65565 sk_rmem_alloc_get(sp),
65566 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65567 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65568 + atomic_read(&sp->sk_refcnt),
65569 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65570 + NULL,
65571 +#else
65572 + sp,
65573 +#endif
65574 + atomic_read_unchecked(&sp->sk_drops));
65575 }
65576
65577 static int raw_seq_show(struct seq_file *seq, void *v)
65578 diff -urNp linux-3.0.3/net/ipv4/route.c linux-3.0.3/net/ipv4/route.c
65579 --- linux-3.0.3/net/ipv4/route.c 2011-08-23 21:44:40.000000000 -0400
65580 +++ linux-3.0.3/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
65581 @@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
65582
65583 static inline int rt_genid(struct net *net)
65584 {
65585 - return atomic_read(&net->ipv4.rt_genid);
65586 + return atomic_read_unchecked(&net->ipv4.rt_genid);
65587 }
65588
65589 #ifdef CONFIG_PROC_FS
65590 @@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
65591 unsigned char shuffle;
65592
65593 get_random_bytes(&shuffle, sizeof(shuffle));
65594 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65595 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65596 }
65597
65598 /*
65599 @@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
65600 error = rt->dst.error;
65601 if (peer) {
65602 inet_peer_refcheck(rt->peer);
65603 - id = atomic_read(&peer->ip_id_count) & 0xffff;
65604 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
65605 if (peer->tcp_ts_stamp) {
65606 ts = peer->tcp_ts;
65607 tsage = get_seconds() - peer->tcp_ts_stamp;
65608 diff -urNp linux-3.0.3/net/ipv4/tcp.c linux-3.0.3/net/ipv4/tcp.c
65609 --- linux-3.0.3/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
65610 +++ linux-3.0.3/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
65611 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
65612 int val;
65613 int err = 0;
65614
65615 + pax_track_stack();
65616 +
65617 /* These are data/string values, all the others are ints */
65618 switch (optname) {
65619 case TCP_CONGESTION: {
65620 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
65621 struct tcp_sock *tp = tcp_sk(sk);
65622 int val, len;
65623
65624 + pax_track_stack();
65625 +
65626 if (get_user(len, optlen))
65627 return -EFAULT;
65628
65629 diff -urNp linux-3.0.3/net/ipv4/tcp_ipv4.c linux-3.0.3/net/ipv4/tcp_ipv4.c
65630 --- linux-3.0.3/net/ipv4/tcp_ipv4.c 2011-08-23 21:44:40.000000000 -0400
65631 +++ linux-3.0.3/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
65632 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
65633 int sysctl_tcp_low_latency __read_mostly;
65634 EXPORT_SYMBOL(sysctl_tcp_low_latency);
65635
65636 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65637 +extern int grsec_enable_blackhole;
65638 +#endif
65639
65640 #ifdef CONFIG_TCP_MD5SIG
65641 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
65642 @@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
65643 return 0;
65644
65645 reset:
65646 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65647 + if (!grsec_enable_blackhole)
65648 +#endif
65649 tcp_v4_send_reset(rsk, skb);
65650 discard:
65651 kfree_skb(skb);
65652 @@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
65653 TCP_SKB_CB(skb)->sacked = 0;
65654
65655 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
65656 - if (!sk)
65657 + if (!sk) {
65658 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65659 + ret = 1;
65660 +#endif
65661 goto no_tcp_socket;
65662 -
65663 + }
65664 process:
65665 - if (sk->sk_state == TCP_TIME_WAIT)
65666 + if (sk->sk_state == TCP_TIME_WAIT) {
65667 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65668 + ret = 2;
65669 +#endif
65670 goto do_time_wait;
65671 + }
65672
65673 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
65674 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
65675 @@ -1724,6 +1737,10 @@ no_tcp_socket:
65676 bad_packet:
65677 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
65678 } else {
65679 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65680 + if (!grsec_enable_blackhole || (ret == 1 &&
65681 + (skb->dev->flags & IFF_LOOPBACK)))
65682 +#endif
65683 tcp_v4_send_reset(NULL, skb);
65684 }
65685
65686 @@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
65687 0, /* non standard timer */
65688 0, /* open_requests have no inode */
65689 atomic_read(&sk->sk_refcnt),
65690 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65691 + NULL,
65692 +#else
65693 req,
65694 +#endif
65695 len);
65696 }
65697
65698 @@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
65699 sock_i_uid(sk),
65700 icsk->icsk_probes_out,
65701 sock_i_ino(sk),
65702 - atomic_read(&sk->sk_refcnt), sk,
65703 + atomic_read(&sk->sk_refcnt),
65704 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65705 + NULL,
65706 +#else
65707 + sk,
65708 +#endif
65709 jiffies_to_clock_t(icsk->icsk_rto),
65710 jiffies_to_clock_t(icsk->icsk_ack.ato),
65711 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
65712 @@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
65713 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
65714 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
65715 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
65716 - atomic_read(&tw->tw_refcnt), tw, len);
65717 + atomic_read(&tw->tw_refcnt),
65718 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65719 + NULL,
65720 +#else
65721 + tw,
65722 +#endif
65723 + len);
65724 }
65725
65726 #define TMPSZ 150
65727 diff -urNp linux-3.0.3/net/ipv4/tcp_minisocks.c linux-3.0.3/net/ipv4/tcp_minisocks.c
65728 --- linux-3.0.3/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
65729 +++ linux-3.0.3/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
65730 @@ -27,6 +27,10 @@
65731 #include <net/inet_common.h>
65732 #include <net/xfrm.h>
65733
65734 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65735 +extern int grsec_enable_blackhole;
65736 +#endif
65737 +
65738 int sysctl_tcp_syncookies __read_mostly = 1;
65739 EXPORT_SYMBOL(sysctl_tcp_syncookies);
65740
65741 @@ -745,6 +749,10 @@ listen_overflow:
65742
65743 embryonic_reset:
65744 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
65745 +
65746 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65747 + if (!grsec_enable_blackhole)
65748 +#endif
65749 if (!(flg & TCP_FLAG_RST))
65750 req->rsk_ops->send_reset(sk, skb);
65751
65752 diff -urNp linux-3.0.3/net/ipv4/tcp_output.c linux-3.0.3/net/ipv4/tcp_output.c
65753 --- linux-3.0.3/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
65754 +++ linux-3.0.3/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
65755 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
65756 int mss;
65757 int s_data_desired = 0;
65758
65759 + pax_track_stack();
65760 +
65761 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
65762 s_data_desired = cvp->s_data_desired;
65763 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
65764 diff -urNp linux-3.0.3/net/ipv4/tcp_probe.c linux-3.0.3/net/ipv4/tcp_probe.c
65765 --- linux-3.0.3/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
65766 +++ linux-3.0.3/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
65767 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
65768 if (cnt + width >= len)
65769 break;
65770
65771 - if (copy_to_user(buf + cnt, tbuf, width))
65772 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
65773 return -EFAULT;
65774 cnt += width;
65775 }
65776 diff -urNp linux-3.0.3/net/ipv4/tcp_timer.c linux-3.0.3/net/ipv4/tcp_timer.c
65777 --- linux-3.0.3/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
65778 +++ linux-3.0.3/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
65779 @@ -22,6 +22,10 @@
65780 #include <linux/gfp.h>
65781 #include <net/tcp.h>
65782
65783 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65784 +extern int grsec_lastack_retries;
65785 +#endif
65786 +
65787 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
65788 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
65789 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
65790 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
65791 }
65792 }
65793
65794 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65795 + if ((sk->sk_state == TCP_LAST_ACK) &&
65796 + (grsec_lastack_retries > 0) &&
65797 + (grsec_lastack_retries < retry_until))
65798 + retry_until = grsec_lastack_retries;
65799 +#endif
65800 +
65801 if (retransmits_timed_out(sk, retry_until,
65802 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
65803 /* Has it gone just too far? */
65804 diff -urNp linux-3.0.3/net/ipv4/udp.c linux-3.0.3/net/ipv4/udp.c
65805 --- linux-3.0.3/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
65806 +++ linux-3.0.3/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
65807 @@ -86,6 +86,7 @@
65808 #include <linux/types.h>
65809 #include <linux/fcntl.h>
65810 #include <linux/module.h>
65811 +#include <linux/security.h>
65812 #include <linux/socket.h>
65813 #include <linux/sockios.h>
65814 #include <linux/igmp.h>
65815 @@ -107,6 +108,10 @@
65816 #include <net/xfrm.h>
65817 #include "udp_impl.h"
65818
65819 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65820 +extern int grsec_enable_blackhole;
65821 +#endif
65822 +
65823 struct udp_table udp_table __read_mostly;
65824 EXPORT_SYMBOL(udp_table);
65825
65826 @@ -564,6 +569,9 @@ found:
65827 return s;
65828 }
65829
65830 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
65831 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
65832 +
65833 /*
65834 * This routine is called by the ICMP module when it gets some
65835 * sort of error condition. If err < 0 then the socket should
65836 @@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
65837 dport = usin->sin_port;
65838 if (dport == 0)
65839 return -EINVAL;
65840 +
65841 + err = gr_search_udp_sendmsg(sk, usin);
65842 + if (err)
65843 + return err;
65844 } else {
65845 if (sk->sk_state != TCP_ESTABLISHED)
65846 return -EDESTADDRREQ;
65847 +
65848 + err = gr_search_udp_sendmsg(sk, NULL);
65849 + if (err)
65850 + return err;
65851 +
65852 daddr = inet->inet_daddr;
65853 dport = inet->inet_dport;
65854 /* Open fast path for connected socket.
65855 @@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
65856 udp_lib_checksum_complete(skb)) {
65857 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65858 IS_UDPLITE(sk));
65859 - atomic_inc(&sk->sk_drops);
65860 + atomic_inc_unchecked(&sk->sk_drops);
65861 __skb_unlink(skb, rcvq);
65862 __skb_queue_tail(&list_kill, skb);
65863 }
65864 @@ -1184,6 +1201,10 @@ try_again:
65865 if (!skb)
65866 goto out;
65867
65868 + err = gr_search_udp_recvmsg(sk, skb);
65869 + if (err)
65870 + goto out_free;
65871 +
65872 ulen = skb->len - sizeof(struct udphdr);
65873 if (len > ulen)
65874 len = ulen;
65875 @@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
65876
65877 drop:
65878 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
65879 - atomic_inc(&sk->sk_drops);
65880 + atomic_inc_unchecked(&sk->sk_drops);
65881 kfree_skb(skb);
65882 return -1;
65883 }
65884 @@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
65885 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
65886
65887 if (!skb1) {
65888 - atomic_inc(&sk->sk_drops);
65889 + atomic_inc_unchecked(&sk->sk_drops);
65890 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
65891 IS_UDPLITE(sk));
65892 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65893 @@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
65894 goto csum_error;
65895
65896 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
65897 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65898 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
65899 +#endif
65900 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
65901
65902 /*
65903 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
65904 sk_wmem_alloc_get(sp),
65905 sk_rmem_alloc_get(sp),
65906 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65907 - atomic_read(&sp->sk_refcnt), sp,
65908 - atomic_read(&sp->sk_drops), len);
65909 + atomic_read(&sp->sk_refcnt),
65910 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65911 + NULL,
65912 +#else
65913 + sp,
65914 +#endif
65915 + atomic_read_unchecked(&sp->sk_drops), len);
65916 }
65917
65918 int udp4_seq_show(struct seq_file *seq, void *v)
65919 diff -urNp linux-3.0.3/net/ipv6/inet6_connection_sock.c linux-3.0.3/net/ipv6/inet6_connection_sock.c
65920 --- linux-3.0.3/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
65921 +++ linux-3.0.3/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
65922 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
65923 #ifdef CONFIG_XFRM
65924 {
65925 struct rt6_info *rt = (struct rt6_info *)dst;
65926 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
65927 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
65928 }
65929 #endif
65930 }
65931 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
65932 #ifdef CONFIG_XFRM
65933 if (dst) {
65934 struct rt6_info *rt = (struct rt6_info *)dst;
65935 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
65936 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
65937 __sk_dst_reset(sk);
65938 dst = NULL;
65939 }
65940 diff -urNp linux-3.0.3/net/ipv6/ipv6_sockglue.c linux-3.0.3/net/ipv6/ipv6_sockglue.c
65941 --- linux-3.0.3/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65942 +++ linux-3.0.3/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65943 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
65944 int val, valbool;
65945 int retv = -ENOPROTOOPT;
65946
65947 + pax_track_stack();
65948 +
65949 if (optval == NULL)
65950 val=0;
65951 else {
65952 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
65953 int len;
65954 int val;
65955
65956 + pax_track_stack();
65957 +
65958 if (ip6_mroute_opt(optname))
65959 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
65960
65961 diff -urNp linux-3.0.3/net/ipv6/raw.c linux-3.0.3/net/ipv6/raw.c
65962 --- linux-3.0.3/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
65963 +++ linux-3.0.3/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
65964 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
65965 {
65966 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
65967 skb_checksum_complete(skb)) {
65968 - atomic_inc(&sk->sk_drops);
65969 + atomic_inc_unchecked(&sk->sk_drops);
65970 kfree_skb(skb);
65971 return NET_RX_DROP;
65972 }
65973 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
65974 struct raw6_sock *rp = raw6_sk(sk);
65975
65976 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
65977 - atomic_inc(&sk->sk_drops);
65978 + atomic_inc_unchecked(&sk->sk_drops);
65979 kfree_skb(skb);
65980 return NET_RX_DROP;
65981 }
65982 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
65983
65984 if (inet->hdrincl) {
65985 if (skb_checksum_complete(skb)) {
65986 - atomic_inc(&sk->sk_drops);
65987 + atomic_inc_unchecked(&sk->sk_drops);
65988 kfree_skb(skb);
65989 return NET_RX_DROP;
65990 }
65991 @@ -601,7 +601,7 @@ out:
65992 return err;
65993 }
65994
65995 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
65996 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
65997 struct flowi6 *fl6, struct dst_entry **dstp,
65998 unsigned int flags)
65999 {
66000 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
66001 u16 proto;
66002 int err;
66003
66004 + pax_track_stack();
66005 +
66006 /* Rough check on arithmetic overflow,
66007 better check is made in ip6_append_data().
66008 */
66009 @@ -909,12 +911,15 @@ do_confirm:
66010 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
66011 char __user *optval, int optlen)
66012 {
66013 + struct icmp6_filter filter;
66014 +
66015 switch (optname) {
66016 case ICMPV6_FILTER:
66017 if (optlen > sizeof(struct icmp6_filter))
66018 optlen = sizeof(struct icmp6_filter);
66019 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
66020 + if (copy_from_user(&filter, optval, optlen))
66021 return -EFAULT;
66022 + raw6_sk(sk)->filter = filter;
66023 return 0;
66024 default:
66025 return -ENOPROTOOPT;
66026 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
66027 char __user *optval, int __user *optlen)
66028 {
66029 int len;
66030 + struct icmp6_filter filter;
66031
66032 switch (optname) {
66033 case ICMPV6_FILTER:
66034 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66035 len = sizeof(struct icmp6_filter);
66036 if (put_user(len, optlen))
66037 return -EFAULT;
66038 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66039 + filter = raw6_sk(sk)->filter;
66040 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
66041 return -EFAULT;
66042 return 0;
66043 default:
66044 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66045 0, 0L, 0,
66046 sock_i_uid(sp), 0,
66047 sock_i_ino(sp),
66048 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66049 + atomic_read(&sp->sk_refcnt),
66050 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66051 + NULL,
66052 +#else
66053 + sp,
66054 +#endif
66055 + atomic_read_unchecked(&sp->sk_drops));
66056 }
66057
66058 static int raw6_seq_show(struct seq_file *seq, void *v)
66059 diff -urNp linux-3.0.3/net/ipv6/tcp_ipv6.c linux-3.0.3/net/ipv6/tcp_ipv6.c
66060 --- linux-3.0.3/net/ipv6/tcp_ipv6.c 2011-08-23 21:44:40.000000000 -0400
66061 +++ linux-3.0.3/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
66062 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66063 }
66064 #endif
66065
66066 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66067 +extern int grsec_enable_blackhole;
66068 +#endif
66069 +
66070 static void tcp_v6_hash(struct sock *sk)
66071 {
66072 if (sk->sk_state != TCP_CLOSE) {
66073 @@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66074 return 0;
66075
66076 reset:
66077 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66078 + if (!grsec_enable_blackhole)
66079 +#endif
66080 tcp_v6_send_reset(sk, skb);
66081 discard:
66082 if (opt_skb)
66083 @@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66084 TCP_SKB_CB(skb)->sacked = 0;
66085
66086 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66087 - if (!sk)
66088 + if (!sk) {
66089 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66090 + ret = 1;
66091 +#endif
66092 goto no_tcp_socket;
66093 + }
66094
66095 process:
66096 - if (sk->sk_state == TCP_TIME_WAIT)
66097 + if (sk->sk_state == TCP_TIME_WAIT) {
66098 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66099 + ret = 2;
66100 +#endif
66101 goto do_time_wait;
66102 + }
66103
66104 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66105 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66106 @@ -1794,6 +1809,10 @@ no_tcp_socket:
66107 bad_packet:
66108 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66109 } else {
66110 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66111 + if (!grsec_enable_blackhole || (ret == 1 &&
66112 + (skb->dev->flags & IFF_LOOPBACK)))
66113 +#endif
66114 tcp_v6_send_reset(NULL, skb);
66115 }
66116
66117 @@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
66118 uid,
66119 0, /* non standard timer */
66120 0, /* open_requests have no inode */
66121 - 0, req);
66122 + 0,
66123 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66124 + NULL
66125 +#else
66126 + req
66127 +#endif
66128 + );
66129 }
66130
66131 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66132 @@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
66133 sock_i_uid(sp),
66134 icsk->icsk_probes_out,
66135 sock_i_ino(sp),
66136 - atomic_read(&sp->sk_refcnt), sp,
66137 + atomic_read(&sp->sk_refcnt),
66138 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66139 + NULL,
66140 +#else
66141 + sp,
66142 +#endif
66143 jiffies_to_clock_t(icsk->icsk_rto),
66144 jiffies_to_clock_t(icsk->icsk_ack.ato),
66145 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66146 @@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
66147 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66148 tw->tw_substate, 0, 0,
66149 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66150 - atomic_read(&tw->tw_refcnt), tw);
66151 + atomic_read(&tw->tw_refcnt),
66152 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66153 + NULL
66154 +#else
66155 + tw
66156 +#endif
66157 + );
66158 }
66159
66160 static int tcp6_seq_show(struct seq_file *seq, void *v)
66161 diff -urNp linux-3.0.3/net/ipv6/udp.c linux-3.0.3/net/ipv6/udp.c
66162 --- linux-3.0.3/net/ipv6/udp.c 2011-08-23 21:44:40.000000000 -0400
66163 +++ linux-3.0.3/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
66164 @@ -50,6 +50,10 @@
66165 #include <linux/seq_file.h>
66166 #include "udp_impl.h"
66167
66168 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66169 +extern int grsec_enable_blackhole;
66170 +#endif
66171 +
66172 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66173 {
66174 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66175 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66176
66177 return 0;
66178 drop:
66179 - atomic_inc(&sk->sk_drops);
66180 + atomic_inc_unchecked(&sk->sk_drops);
66181 drop_no_sk_drops_inc:
66182 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66183 kfree_skb(skb);
66184 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66185 continue;
66186 }
66187 drop:
66188 - atomic_inc(&sk->sk_drops);
66189 + atomic_inc_unchecked(&sk->sk_drops);
66190 UDP6_INC_STATS_BH(sock_net(sk),
66191 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66192 UDP6_INC_STATS_BH(sock_net(sk),
66193 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66194 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66195 proto == IPPROTO_UDPLITE);
66196
66197 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66198 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66199 +#endif
66200 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66201
66202 kfree_skb(skb);
66203 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66204 if (!sock_owned_by_user(sk))
66205 udpv6_queue_rcv_skb(sk, skb);
66206 else if (sk_add_backlog(sk, skb)) {
66207 - atomic_inc(&sk->sk_drops);
66208 + atomic_inc_unchecked(&sk->sk_drops);
66209 bh_unlock_sock(sk);
66210 sock_put(sk);
66211 goto discard;
66212 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66213 0, 0L, 0,
66214 sock_i_uid(sp), 0,
66215 sock_i_ino(sp),
66216 - atomic_read(&sp->sk_refcnt), sp,
66217 - atomic_read(&sp->sk_drops));
66218 + atomic_read(&sp->sk_refcnt),
66219 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66220 + NULL,
66221 +#else
66222 + sp,
66223 +#endif
66224 + atomic_read_unchecked(&sp->sk_drops));
66225 }
66226
66227 int udp6_seq_show(struct seq_file *seq, void *v)
66228 diff -urNp linux-3.0.3/net/irda/ircomm/ircomm_tty.c linux-3.0.3/net/irda/ircomm/ircomm_tty.c
66229 --- linux-3.0.3/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
66230 +++ linux-3.0.3/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
66231 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
66232 add_wait_queue(&self->open_wait, &wait);
66233
66234 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66235 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66236 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66237
66238 /* As far as I can see, we protect open_count - Jean II */
66239 spin_lock_irqsave(&self->spinlock, flags);
66240 if (!tty_hung_up_p(filp)) {
66241 extra_count = 1;
66242 - self->open_count--;
66243 + local_dec(&self->open_count);
66244 }
66245 spin_unlock_irqrestore(&self->spinlock, flags);
66246 - self->blocked_open++;
66247 + local_inc(&self->blocked_open);
66248
66249 while (1) {
66250 if (tty->termios->c_cflag & CBAUD) {
66251 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
66252 }
66253
66254 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66255 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66256 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66257
66258 schedule();
66259 }
66260 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
66261 if (extra_count) {
66262 /* ++ is not atomic, so this should be protected - Jean II */
66263 spin_lock_irqsave(&self->spinlock, flags);
66264 - self->open_count++;
66265 + local_inc(&self->open_count);
66266 spin_unlock_irqrestore(&self->spinlock, flags);
66267 }
66268 - self->blocked_open--;
66269 + local_dec(&self->blocked_open);
66270
66271 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66272 - __FILE__,__LINE__, tty->driver->name, self->open_count);
66273 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66274
66275 if (!retval)
66276 self->flags |= ASYNC_NORMAL_ACTIVE;
66277 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
66278 }
66279 /* ++ is not atomic, so this should be protected - Jean II */
66280 spin_lock_irqsave(&self->spinlock, flags);
66281 - self->open_count++;
66282 + local_inc(&self->open_count);
66283
66284 tty->driver_data = self;
66285 self->tty = tty;
66286 spin_unlock_irqrestore(&self->spinlock, flags);
66287
66288 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66289 - self->line, self->open_count);
66290 + self->line, local_read(&self->open_count));
66291
66292 /* Not really used by us, but lets do it anyway */
66293 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66294 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
66295 return;
66296 }
66297
66298 - if ((tty->count == 1) && (self->open_count != 1)) {
66299 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66300 /*
66301 * Uh, oh. tty->count is 1, which means that the tty
66302 * structure will be freed. state->count should always
66303 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
66304 */
66305 IRDA_DEBUG(0, "%s(), bad serial port count; "
66306 "tty->count is 1, state->count is %d\n", __func__ ,
66307 - self->open_count);
66308 - self->open_count = 1;
66309 + local_read(&self->open_count));
66310 + local_set(&self->open_count, 1);
66311 }
66312
66313 - if (--self->open_count < 0) {
66314 + if (local_dec_return(&self->open_count) < 0) {
66315 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66316 - __func__, self->line, self->open_count);
66317 - self->open_count = 0;
66318 + __func__, self->line, local_read(&self->open_count));
66319 + local_set(&self->open_count, 0);
66320 }
66321 - if (self->open_count) {
66322 + if (local_read(&self->open_count)) {
66323 spin_unlock_irqrestore(&self->spinlock, flags);
66324
66325 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66326 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
66327 tty->closing = 0;
66328 self->tty = NULL;
66329
66330 - if (self->blocked_open) {
66331 + if (local_read(&self->blocked_open)) {
66332 if (self->close_delay)
66333 schedule_timeout_interruptible(self->close_delay);
66334 wake_up_interruptible(&self->open_wait);
66335 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
66336 spin_lock_irqsave(&self->spinlock, flags);
66337 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66338 self->tty = NULL;
66339 - self->open_count = 0;
66340 + local_set(&self->open_count, 0);
66341 spin_unlock_irqrestore(&self->spinlock, flags);
66342
66343 wake_up_interruptible(&self->open_wait);
66344 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
66345 seq_putc(m, '\n');
66346
66347 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66348 - seq_printf(m, "Open count: %d\n", self->open_count);
66349 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66350 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66351 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66352
66353 diff -urNp linux-3.0.3/net/iucv/af_iucv.c linux-3.0.3/net/iucv/af_iucv.c
66354 --- linux-3.0.3/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
66355 +++ linux-3.0.3/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
66356 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
66357
66358 write_lock_bh(&iucv_sk_list.lock);
66359
66360 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66361 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66362 while (__iucv_get_sock_by_name(name)) {
66363 sprintf(name, "%08x",
66364 - atomic_inc_return(&iucv_sk_list.autobind_name));
66365 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66366 }
66367
66368 write_unlock_bh(&iucv_sk_list.lock);
66369 diff -urNp linux-3.0.3/net/key/af_key.c linux-3.0.3/net/key/af_key.c
66370 --- linux-3.0.3/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
66371 +++ linux-3.0.3/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
66372 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66373 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66374 struct xfrm_kmaddress k;
66375
66376 + pax_track_stack();
66377 +
66378 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66379 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66380 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66381 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66382 static u32 get_acqseq(void)
66383 {
66384 u32 res;
66385 - static atomic_t acqseq;
66386 + static atomic_unchecked_t acqseq;
66387
66388 do {
66389 - res = atomic_inc_return(&acqseq);
66390 + res = atomic_inc_return_unchecked(&acqseq);
66391 } while (!res);
66392 return res;
66393 }
66394 diff -urNp linux-3.0.3/net/lapb/lapb_iface.c linux-3.0.3/net/lapb/lapb_iface.c
66395 --- linux-3.0.3/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
66396 +++ linux-3.0.3/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
66397 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66398 goto out;
66399
66400 lapb->dev = dev;
66401 - lapb->callbacks = *callbacks;
66402 + lapb->callbacks = callbacks;
66403
66404 __lapb_insert_cb(lapb);
66405
66406 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66407
66408 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66409 {
66410 - if (lapb->callbacks.connect_confirmation)
66411 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
66412 + if (lapb->callbacks->connect_confirmation)
66413 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
66414 }
66415
66416 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66417 {
66418 - if (lapb->callbacks.connect_indication)
66419 - lapb->callbacks.connect_indication(lapb->dev, reason);
66420 + if (lapb->callbacks->connect_indication)
66421 + lapb->callbacks->connect_indication(lapb->dev, reason);
66422 }
66423
66424 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66425 {
66426 - if (lapb->callbacks.disconnect_confirmation)
66427 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66428 + if (lapb->callbacks->disconnect_confirmation)
66429 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66430 }
66431
66432 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66433 {
66434 - if (lapb->callbacks.disconnect_indication)
66435 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
66436 + if (lapb->callbacks->disconnect_indication)
66437 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
66438 }
66439
66440 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66441 {
66442 - if (lapb->callbacks.data_indication)
66443 - return lapb->callbacks.data_indication(lapb->dev, skb);
66444 + if (lapb->callbacks->data_indication)
66445 + return lapb->callbacks->data_indication(lapb->dev, skb);
66446
66447 kfree_skb(skb);
66448 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66449 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66450 {
66451 int used = 0;
66452
66453 - if (lapb->callbacks.data_transmit) {
66454 - lapb->callbacks.data_transmit(lapb->dev, skb);
66455 + if (lapb->callbacks->data_transmit) {
66456 + lapb->callbacks->data_transmit(lapb->dev, skb);
66457 used = 1;
66458 }
66459
66460 diff -urNp linux-3.0.3/net/mac80211/debugfs_sta.c linux-3.0.3/net/mac80211/debugfs_sta.c
66461 --- linux-3.0.3/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
66462 +++ linux-3.0.3/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
66463 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
66464 struct tid_ampdu_rx *tid_rx;
66465 struct tid_ampdu_tx *tid_tx;
66466
66467 + pax_track_stack();
66468 +
66469 rcu_read_lock();
66470
66471 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66472 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
66473 struct sta_info *sta = file->private_data;
66474 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66475
66476 + pax_track_stack();
66477 +
66478 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66479 htc->ht_supported ? "" : "not ");
66480 if (htc->ht_supported) {
66481 diff -urNp linux-3.0.3/net/mac80211/ieee80211_i.h linux-3.0.3/net/mac80211/ieee80211_i.h
66482 --- linux-3.0.3/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
66483 +++ linux-3.0.3/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
66484 @@ -27,6 +27,7 @@
66485 #include <net/ieee80211_radiotap.h>
66486 #include <net/cfg80211.h>
66487 #include <net/mac80211.h>
66488 +#include <asm/local.h>
66489 #include "key.h"
66490 #include "sta_info.h"
66491
66492 @@ -721,7 +722,7 @@ struct ieee80211_local {
66493 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66494 spinlock_t queue_stop_reason_lock;
66495
66496 - int open_count;
66497 + local_t open_count;
66498 int monitors, cooked_mntrs;
66499 /* number of interfaces with corresponding FIF_ flags */
66500 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66501 diff -urNp linux-3.0.3/net/mac80211/iface.c linux-3.0.3/net/mac80211/iface.c
66502 --- linux-3.0.3/net/mac80211/iface.c 2011-08-23 21:44:40.000000000 -0400
66503 +++ linux-3.0.3/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
66504 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66505 break;
66506 }
66507
66508 - if (local->open_count == 0) {
66509 + if (local_read(&local->open_count) == 0) {
66510 res = drv_start(local);
66511 if (res)
66512 goto err_del_bss;
66513 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66514 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66515
66516 if (!is_valid_ether_addr(dev->dev_addr)) {
66517 - if (!local->open_count)
66518 + if (!local_read(&local->open_count))
66519 drv_stop(local);
66520 return -EADDRNOTAVAIL;
66521 }
66522 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66523 mutex_unlock(&local->mtx);
66524
66525 if (coming_up)
66526 - local->open_count++;
66527 + local_inc(&local->open_count);
66528
66529 if (hw_reconf_flags) {
66530 ieee80211_hw_config(local, hw_reconf_flags);
66531 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66532 err_del_interface:
66533 drv_remove_interface(local, &sdata->vif);
66534 err_stop:
66535 - if (!local->open_count)
66536 + if (!local_read(&local->open_count))
66537 drv_stop(local);
66538 err_del_bss:
66539 sdata->bss = NULL;
66540 @@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
66541 }
66542
66543 if (going_down)
66544 - local->open_count--;
66545 + local_dec(&local->open_count);
66546
66547 switch (sdata->vif.type) {
66548 case NL80211_IFTYPE_AP_VLAN:
66549 @@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
66550
66551 ieee80211_recalc_ps(local, -1);
66552
66553 - if (local->open_count == 0) {
66554 + if (local_read(&local->open_count) == 0) {
66555 if (local->ops->napi_poll)
66556 napi_disable(&local->napi);
66557 ieee80211_clear_tx_pending(local);
66558 diff -urNp linux-3.0.3/net/mac80211/main.c linux-3.0.3/net/mac80211/main.c
66559 --- linux-3.0.3/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
66560 +++ linux-3.0.3/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
66561 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
66562 local->hw.conf.power_level = power;
66563 }
66564
66565 - if (changed && local->open_count) {
66566 + if (changed && local_read(&local->open_count)) {
66567 ret = drv_config(local, changed);
66568 /*
66569 * Goal:
66570 diff -urNp linux-3.0.3/net/mac80211/mlme.c linux-3.0.3/net/mac80211/mlme.c
66571 --- linux-3.0.3/net/mac80211/mlme.c 2011-08-23 21:44:40.000000000 -0400
66572 +++ linux-3.0.3/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
66573 @@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
66574 bool have_higher_than_11mbit = false;
66575 u16 ap_ht_cap_flags;
66576
66577 + pax_track_stack();
66578 +
66579 /* AssocResp and ReassocResp have identical structure */
66580
66581 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66582 diff -urNp linux-3.0.3/net/mac80211/pm.c linux-3.0.3/net/mac80211/pm.c
66583 --- linux-3.0.3/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
66584 +++ linux-3.0.3/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
66585 @@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
66586 cancel_work_sync(&local->dynamic_ps_enable_work);
66587 del_timer_sync(&local->dynamic_ps_timer);
66588
66589 - local->wowlan = wowlan && local->open_count;
66590 + local->wowlan = wowlan && local_read(&local->open_count);
66591 if (local->wowlan) {
66592 int err = drv_suspend(local, wowlan);
66593 if (err) {
66594 @@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
66595 }
66596
66597 /* stop hardware - this must stop RX */
66598 - if (local->open_count)
66599 + if (local_read(&local->open_count))
66600 ieee80211_stop_device(local);
66601
66602 suspend:
66603 diff -urNp linux-3.0.3/net/mac80211/rate.c linux-3.0.3/net/mac80211/rate.c
66604 --- linux-3.0.3/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
66605 +++ linux-3.0.3/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
66606 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66607
66608 ASSERT_RTNL();
66609
66610 - if (local->open_count)
66611 + if (local_read(&local->open_count))
66612 return -EBUSY;
66613
66614 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
66615 diff -urNp linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c
66616 --- linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
66617 +++ linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
66618 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
66619
66620 spin_unlock_irqrestore(&events->lock, status);
66621
66622 - if (copy_to_user(buf, pb, p))
66623 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
66624 return -EFAULT;
66625
66626 return p;
66627 diff -urNp linux-3.0.3/net/mac80211/util.c linux-3.0.3/net/mac80211/util.c
66628 --- linux-3.0.3/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
66629 +++ linux-3.0.3/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
66630 @@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
66631 #endif
66632
66633 /* restart hardware */
66634 - if (local->open_count) {
66635 + if (local_read(&local->open_count)) {
66636 /*
66637 * Upon resume hardware can sometimes be goofy due to
66638 * various platform / driver / bus issues, so restarting
66639 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c
66640 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
66641 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
66642 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
66643 /* Increase the refcnt counter of the dest */
66644 atomic_inc(&dest->refcnt);
66645
66646 - conn_flags = atomic_read(&dest->conn_flags);
66647 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
66648 if (cp->protocol != IPPROTO_UDP)
66649 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
66650 /* Bind with the destination and its corresponding transmitter */
66651 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
66652 atomic_set(&cp->refcnt, 1);
66653
66654 atomic_set(&cp->n_control, 0);
66655 - atomic_set(&cp->in_pkts, 0);
66656 + atomic_set_unchecked(&cp->in_pkts, 0);
66657
66658 atomic_inc(&ipvs->conn_count);
66659 if (flags & IP_VS_CONN_F_NO_CPORT)
66660 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
66661
66662 /* Don't drop the entry if its number of incoming packets is not
66663 located in [0, 8] */
66664 - i = atomic_read(&cp->in_pkts);
66665 + i = atomic_read_unchecked(&cp->in_pkts);
66666 if (i > 8 || i < 0) return 0;
66667
66668 if (!todrop_rate[i]) return 0;
66669 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c
66670 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
66671 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
66672 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
66673 ret = cp->packet_xmit(skb, cp, pd->pp);
66674 /* do not touch skb anymore */
66675
66676 - atomic_inc(&cp->in_pkts);
66677 + atomic_inc_unchecked(&cp->in_pkts);
66678 ip_vs_conn_put(cp);
66679 return ret;
66680 }
66681 @@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
66682 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
66683 pkts = sysctl_sync_threshold(ipvs);
66684 else
66685 - pkts = atomic_add_return(1, &cp->in_pkts);
66686 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66687
66688 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
66689 cp->protocol == IPPROTO_SCTP) {
66690 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c
66691 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:44:40.000000000 -0400
66692 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
66693 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
66694 ip_vs_rs_hash(ipvs, dest);
66695 write_unlock_bh(&ipvs->rs_lock);
66696 }
66697 - atomic_set(&dest->conn_flags, conn_flags);
66698 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
66699
66700 /* bind the service */
66701 if (!dest->svc) {
66702 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
66703 " %-7s %-6d %-10d %-10d\n",
66704 &dest->addr.in6,
66705 ntohs(dest->port),
66706 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66707 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66708 atomic_read(&dest->weight),
66709 atomic_read(&dest->activeconns),
66710 atomic_read(&dest->inactconns));
66711 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
66712 "%-7s %-6d %-10d %-10d\n",
66713 ntohl(dest->addr.ip),
66714 ntohs(dest->port),
66715 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66716 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66717 atomic_read(&dest->weight),
66718 atomic_read(&dest->activeconns),
66719 atomic_read(&dest->inactconns));
66720 @@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
66721 struct ip_vs_dest_user *udest_compat;
66722 struct ip_vs_dest_user_kern udest;
66723
66724 + pax_track_stack();
66725 +
66726 if (!capable(CAP_NET_ADMIN))
66727 return -EPERM;
66728
66729 @@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
66730
66731 entry.addr = dest->addr.ip;
66732 entry.port = dest->port;
66733 - entry.conn_flags = atomic_read(&dest->conn_flags);
66734 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
66735 entry.weight = atomic_read(&dest->weight);
66736 entry.u_threshold = dest->u_threshold;
66737 entry.l_threshold = dest->l_threshold;
66738 @@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
66739 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
66740
66741 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
66742 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66743 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66744 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
66745 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
66746 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
66747 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c
66748 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
66749 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
66750 @@ -648,7 +648,7 @@ control:
66751 * i.e only increment in_pkts for Templates.
66752 */
66753 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
66754 - int pkts = atomic_add_return(1, &cp->in_pkts);
66755 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66756
66757 if (pkts % sysctl_sync_period(ipvs) != 1)
66758 return;
66759 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
66760
66761 if (opt)
66762 memcpy(&cp->in_seq, opt, sizeof(*opt));
66763 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66764 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66765 cp->state = state;
66766 cp->old_state = cp->state;
66767 /*
66768 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c
66769 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
66770 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
66771 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
66772 else
66773 rc = NF_ACCEPT;
66774 /* do not touch skb anymore */
66775 - atomic_inc(&cp->in_pkts);
66776 + atomic_inc_unchecked(&cp->in_pkts);
66777 goto out;
66778 }
66779
66780 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
66781 else
66782 rc = NF_ACCEPT;
66783 /* do not touch skb anymore */
66784 - atomic_inc(&cp->in_pkts);
66785 + atomic_inc_unchecked(&cp->in_pkts);
66786 goto out;
66787 }
66788
66789 diff -urNp linux-3.0.3/net/netfilter/Kconfig linux-3.0.3/net/netfilter/Kconfig
66790 --- linux-3.0.3/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
66791 +++ linux-3.0.3/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
66792 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
66793
66794 To compile it as a module, choose M here. If unsure, say N.
66795
66796 +config NETFILTER_XT_MATCH_GRADM
66797 + tristate '"gradm" match support'
66798 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
66799 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
66800 + ---help---
66801 + The gradm match allows to match on grsecurity RBAC being enabled.
66802 + It is useful when iptables rules are applied early on bootup to
66803 + prevent connections to the machine (except from a trusted host)
66804 + while the RBAC system is disabled.
66805 +
66806 config NETFILTER_XT_MATCH_HASHLIMIT
66807 tristate '"hashlimit" match support'
66808 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
66809 diff -urNp linux-3.0.3/net/netfilter/Makefile linux-3.0.3/net/netfilter/Makefile
66810 --- linux-3.0.3/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
66811 +++ linux-3.0.3/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
66812 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
66813 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
66814 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
66815 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
66816 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
66817 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
66818 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
66819 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
66820 diff -urNp linux-3.0.3/net/netfilter/nfnetlink_log.c linux-3.0.3/net/netfilter/nfnetlink_log.c
66821 --- linux-3.0.3/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
66822 +++ linux-3.0.3/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
66823 @@ -70,7 +70,7 @@ struct nfulnl_instance {
66824 };
66825
66826 static DEFINE_SPINLOCK(instances_lock);
66827 -static atomic_t global_seq;
66828 +static atomic_unchecked_t global_seq;
66829
66830 #define INSTANCE_BUCKETS 16
66831 static struct hlist_head instance_table[INSTANCE_BUCKETS];
66832 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
66833 /* global sequence number */
66834 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
66835 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
66836 - htonl(atomic_inc_return(&global_seq)));
66837 + htonl(atomic_inc_return_unchecked(&global_seq)));
66838
66839 if (data_len) {
66840 struct nlattr *nla;
66841 diff -urNp linux-3.0.3/net/netfilter/nfnetlink_queue.c linux-3.0.3/net/netfilter/nfnetlink_queue.c
66842 --- linux-3.0.3/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
66843 +++ linux-3.0.3/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
66844 @@ -58,7 +58,7 @@ struct nfqnl_instance {
66845 */
66846 spinlock_t lock;
66847 unsigned int queue_total;
66848 - atomic_t id_sequence; /* 'sequence' of pkt ids */
66849 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
66850 struct list_head queue_list; /* packets in queue */
66851 };
66852
66853 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
66854 nfmsg->version = NFNETLINK_V0;
66855 nfmsg->res_id = htons(queue->queue_num);
66856
66857 - entry->id = atomic_inc_return(&queue->id_sequence);
66858 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
66859 pmsg.packet_id = htonl(entry->id);
66860 pmsg.hw_protocol = entskb->protocol;
66861 pmsg.hook = entry->hook;
66862 @@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
66863 inst->peer_pid, inst->queue_total,
66864 inst->copy_mode, inst->copy_range,
66865 inst->queue_dropped, inst->queue_user_dropped,
66866 - atomic_read(&inst->id_sequence), 1);
66867 + atomic_read_unchecked(&inst->id_sequence), 1);
66868 }
66869
66870 static const struct seq_operations nfqnl_seq_ops = {
66871 diff -urNp linux-3.0.3/net/netfilter/xt_gradm.c linux-3.0.3/net/netfilter/xt_gradm.c
66872 --- linux-3.0.3/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
66873 +++ linux-3.0.3/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
66874 @@ -0,0 +1,51 @@
66875 +/*
66876 + * gradm match for netfilter
66877 + * Copyright © Zbigniew Krzystolik, 2010
66878 + *
66879 + * This program is free software; you can redistribute it and/or modify
66880 + * it under the terms of the GNU General Public License; either version
66881 + * 2 or 3 as published by the Free Software Foundation.
66882 + */
66883 +#include <linux/module.h>
66884 +#include <linux/moduleparam.h>
66885 +#include <linux/skbuff.h>
66886 +#include <linux/netfilter/x_tables.h>
66887 +#include <linux/grsecurity.h>
66888 +#include <linux/netfilter/xt_gradm.h>
66889 +
66890 +static bool
66891 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
66892 +{
66893 + const struct xt_gradm_mtinfo *info = par->matchinfo;
66894 + bool retval = false;
66895 + if (gr_acl_is_enabled())
66896 + retval = true;
66897 + return retval ^ info->invflags;
66898 +}
66899 +
66900 +static struct xt_match gradm_mt_reg __read_mostly = {
66901 + .name = "gradm",
66902 + .revision = 0,
66903 + .family = NFPROTO_UNSPEC,
66904 + .match = gradm_mt,
66905 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
66906 + .me = THIS_MODULE,
66907 +};
66908 +
66909 +static int __init gradm_mt_init(void)
66910 +{
66911 + return xt_register_match(&gradm_mt_reg);
66912 +}
66913 +
66914 +static void __exit gradm_mt_exit(void)
66915 +{
66916 + xt_unregister_match(&gradm_mt_reg);
66917 +}
66918 +
66919 +module_init(gradm_mt_init);
66920 +module_exit(gradm_mt_exit);
66921 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
66922 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
66923 +MODULE_LICENSE("GPL");
66924 +MODULE_ALIAS("ipt_gradm");
66925 +MODULE_ALIAS("ip6t_gradm");
66926 diff -urNp linux-3.0.3/net/netfilter/xt_statistic.c linux-3.0.3/net/netfilter/xt_statistic.c
66927 --- linux-3.0.3/net/netfilter/xt_statistic.c 2011-07-21 22:17:23.000000000 -0400
66928 +++ linux-3.0.3/net/netfilter/xt_statistic.c 2011-08-23 21:47:56.000000000 -0400
66929 @@ -18,7 +18,7 @@
66930 #include <linux/netfilter/x_tables.h>
66931
66932 struct xt_statistic_priv {
66933 - atomic_t count;
66934 + atomic_unchecked_t count;
66935 } ____cacheline_aligned_in_smp;
66936
66937 MODULE_LICENSE("GPL");
66938 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
66939 break;
66940 case XT_STATISTIC_MODE_NTH:
66941 do {
66942 - oval = atomic_read(&info->master->count);
66943 + oval = atomic_read_unchecked(&info->master->count);
66944 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
66945 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
66946 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
66947 if (nval == 0)
66948 ret = !ret;
66949 break;
66950 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
66951 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
66952 if (info->master == NULL)
66953 return -ENOMEM;
66954 - atomic_set(&info->master->count, info->u.nth.count);
66955 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
66956
66957 return 0;
66958 }
66959 diff -urNp linux-3.0.3/net/netlink/af_netlink.c linux-3.0.3/net/netlink/af_netlink.c
66960 --- linux-3.0.3/net/netlink/af_netlink.c 2011-07-21 22:17:23.000000000 -0400
66961 +++ linux-3.0.3/net/netlink/af_netlink.c 2011-08-23 21:47:56.000000000 -0400
66962 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
66963 sk->sk_error_report(sk);
66964 }
66965 }
66966 - atomic_inc(&sk->sk_drops);
66967 + atomic_inc_unchecked(&sk->sk_drops);
66968 }
66969
66970 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
66971 @@ -1994,7 +1994,7 @@ static int netlink_seq_show(struct seq_f
66972 sk_wmem_alloc_get(s),
66973 nlk->cb,
66974 atomic_read(&s->sk_refcnt),
66975 - atomic_read(&s->sk_drops),
66976 + atomic_read_unchecked(&s->sk_drops),
66977 sock_i_ino(s)
66978 );
66979
66980 diff -urNp linux-3.0.3/net/netrom/af_netrom.c linux-3.0.3/net/netrom/af_netrom.c
66981 --- linux-3.0.3/net/netrom/af_netrom.c 2011-07-21 22:17:23.000000000 -0400
66982 +++ linux-3.0.3/net/netrom/af_netrom.c 2011-08-23 21:48:14.000000000 -0400
66983 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *soc
66984 struct sock *sk = sock->sk;
66985 struct nr_sock *nr = nr_sk(sk);
66986
66987 + memset(sax, 0, sizeof(*sax));
66988 lock_sock(sk);
66989 if (peer != 0) {
66990 if (sk->sk_state != TCP_ESTABLISHED) {
66991 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *soc
66992 *uaddr_len = sizeof(struct full_sockaddr_ax25);
66993 } else {
66994 sax->fsa_ax25.sax25_family = AF_NETROM;
66995 - sax->fsa_ax25.sax25_ndigis = 0;
66996 sax->fsa_ax25.sax25_call = nr->source_addr;
66997 *uaddr_len = sizeof(struct sockaddr_ax25);
66998 }
66999 diff -urNp linux-3.0.3/net/packet/af_packet.c linux-3.0.3/net/packet/af_packet.c
67000 --- linux-3.0.3/net/packet/af_packet.c 2011-07-21 22:17:23.000000000 -0400
67001 +++ linux-3.0.3/net/packet/af_packet.c 2011-08-23 21:47:56.000000000 -0400
67002 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
67003
67004 spin_lock(&sk->sk_receive_queue.lock);
67005 po->stats.tp_packets++;
67006 - skb->dropcount = atomic_read(&sk->sk_drops);
67007 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
67008 __skb_queue_tail(&sk->sk_receive_queue, skb);
67009 spin_unlock(&sk->sk_receive_queue.lock);
67010 sk->sk_data_ready(sk, skb->len);
67011 return 0;
67012
67013 drop_n_acct:
67014 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
67015 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
67016
67017 drop_n_restore:
67018 if (skb_head != skb->data && skb_shared(skb)) {
67019 @@ -2168,7 +2168,7 @@ static int packet_getsockopt(struct sock
67020 case PACKET_HDRLEN:
67021 if (len > sizeof(int))
67022 len = sizeof(int);
67023 - if (copy_from_user(&val, optval, len))
67024 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
67025 return -EFAULT;
67026 switch (val) {
67027 case TPACKET_V1:
67028 @@ -2206,7 +2206,7 @@ static int packet_getsockopt(struct sock
67029
67030 if (put_user(len, optlen))
67031 return -EFAULT;
67032 - if (copy_to_user(optval, data, len))
67033 + if (len > sizeof(st) || copy_to_user(optval, data, len))
67034 return -EFAULT;
67035 return 0;
67036 }
67037 diff -urNp linux-3.0.3/net/phonet/af_phonet.c linux-3.0.3/net/phonet/af_phonet.c
67038 --- linux-3.0.3/net/phonet/af_phonet.c 2011-07-21 22:17:23.000000000 -0400
67039 +++ linux-3.0.3/net/phonet/af_phonet.c 2011-08-23 21:48:14.000000000 -0400
67040 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
67041 {
67042 struct phonet_protocol *pp;
67043
67044 - if (protocol >= PHONET_NPROTO)
67045 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67046 return NULL;
67047
67048 rcu_read_lock();
67049 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
67050 {
67051 int err = 0;
67052
67053 - if (protocol >= PHONET_NPROTO)
67054 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67055 return -EINVAL;
67056
67057 err = proto_register(pp->prot, 1);
67058 diff -urNp linux-3.0.3/net/phonet/pep.c linux-3.0.3/net/phonet/pep.c
67059 --- linux-3.0.3/net/phonet/pep.c 2011-07-21 22:17:23.000000000 -0400
67060 +++ linux-3.0.3/net/phonet/pep.c 2011-08-23 21:47:56.000000000 -0400
67061 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
67062
67063 case PNS_PEP_CTRL_REQ:
67064 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
67065 - atomic_inc(&sk->sk_drops);
67066 + atomic_inc_unchecked(&sk->sk_drops);
67067 break;
67068 }
67069 __skb_pull(skb, 4);
67070 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
67071 }
67072
67073 if (pn->rx_credits == 0) {
67074 - atomic_inc(&sk->sk_drops);
67075 + atomic_inc_unchecked(&sk->sk_drops);
67076 err = -ENOBUFS;
67077 break;
67078 }
67079 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
67080 }
67081
67082 if (pn->rx_credits == 0) {
67083 - atomic_inc(&sk->sk_drops);
67084 + atomic_inc_unchecked(&sk->sk_drops);
67085 err = NET_RX_DROP;
67086 break;
67087 }
67088 diff -urNp linux-3.0.3/net/phonet/socket.c linux-3.0.3/net/phonet/socket.c
67089 --- linux-3.0.3/net/phonet/socket.c 2011-07-21 22:17:23.000000000 -0400
67090 +++ linux-3.0.3/net/phonet/socket.c 2011-08-23 21:48:14.000000000 -0400
67091 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_f
67092 pn->resource, sk->sk_state,
67093 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
67094 sock_i_uid(sk), sock_i_ino(sk),
67095 - atomic_read(&sk->sk_refcnt), sk,
67096 - atomic_read(&sk->sk_drops), &len);
67097 + atomic_read(&sk->sk_refcnt),
67098 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67099 + NULL,
67100 +#else
67101 + sk,
67102 +#endif
67103 + atomic_read_unchecked(&sk->sk_drops), &len);
67104 }
67105 seq_printf(seq, "%*s\n", 127 - len, "");
67106 return 0;
67107 diff -urNp linux-3.0.3/net/rds/cong.c linux-3.0.3/net/rds/cong.c
67108 --- linux-3.0.3/net/rds/cong.c 2011-07-21 22:17:23.000000000 -0400
67109 +++ linux-3.0.3/net/rds/cong.c 2011-08-23 21:47:56.000000000 -0400
67110 @@ -77,7 +77,7 @@
67111 * finds that the saved generation number is smaller than the global generation
67112 * number, it wakes up the process.
67113 */
67114 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
67115 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
67116
67117 /*
67118 * Congestion monitoring
67119 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
67120 rdsdebug("waking map %p for %pI4\n",
67121 map, &map->m_addr);
67122 rds_stats_inc(s_cong_update_received);
67123 - atomic_inc(&rds_cong_generation);
67124 + atomic_inc_unchecked(&rds_cong_generation);
67125 if (waitqueue_active(&map->m_waitq))
67126 wake_up(&map->m_waitq);
67127 if (waitqueue_active(&rds_poll_waitq))
67128 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
67129
67130 int rds_cong_updated_since(unsigned long *recent)
67131 {
67132 - unsigned long gen = atomic_read(&rds_cong_generation);
67133 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
67134
67135 if (likely(*recent == gen))
67136 return 0;
67137 diff -urNp linux-3.0.3/net/rds/ib_cm.c linux-3.0.3/net/rds/ib_cm.c
67138 --- linux-3.0.3/net/rds/ib_cm.c 2011-07-21 22:17:23.000000000 -0400
67139 +++ linux-3.0.3/net/rds/ib_cm.c 2011-08-23 21:47:56.000000000 -0400
67140 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
67141 /* Clear the ACK state */
67142 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67143 #ifdef KERNEL_HAS_ATOMIC64
67144 - atomic64_set(&ic->i_ack_next, 0);
67145 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67146 #else
67147 ic->i_ack_next = 0;
67148 #endif
67149 diff -urNp linux-3.0.3/net/rds/ib.h linux-3.0.3/net/rds/ib.h
67150 --- linux-3.0.3/net/rds/ib.h 2011-07-21 22:17:23.000000000 -0400
67151 +++ linux-3.0.3/net/rds/ib.h 2011-08-23 21:47:56.000000000 -0400
67152 @@ -127,7 +127,7 @@ struct rds_ib_connection {
67153 /* sending acks */
67154 unsigned long i_ack_flags;
67155 #ifdef KERNEL_HAS_ATOMIC64
67156 - atomic64_t i_ack_next; /* next ACK to send */
67157 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67158 #else
67159 spinlock_t i_ack_lock; /* protect i_ack_next */
67160 u64 i_ack_next; /* next ACK to send */
67161 diff -urNp linux-3.0.3/net/rds/ib_recv.c linux-3.0.3/net/rds/ib_recv.c
67162 --- linux-3.0.3/net/rds/ib_recv.c 2011-07-21 22:17:23.000000000 -0400
67163 +++ linux-3.0.3/net/rds/ib_recv.c 2011-08-23 21:47:56.000000000 -0400
67164 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67165 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
67166 int ack_required)
67167 {
67168 - atomic64_set(&ic->i_ack_next, seq);
67169 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67170 if (ack_required) {
67171 smp_mb__before_clear_bit();
67172 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67173 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67174 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67175 smp_mb__after_clear_bit();
67176
67177 - return atomic64_read(&ic->i_ack_next);
67178 + return atomic64_read_unchecked(&ic->i_ack_next);
67179 }
67180 #endif
67181
67182 diff -urNp linux-3.0.3/net/rds/iw_cm.c linux-3.0.3/net/rds/iw_cm.c
67183 --- linux-3.0.3/net/rds/iw_cm.c 2011-07-21 22:17:23.000000000 -0400
67184 +++ linux-3.0.3/net/rds/iw_cm.c 2011-08-23 21:47:56.000000000 -0400
67185 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
67186 /* Clear the ACK state */
67187 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67188 #ifdef KERNEL_HAS_ATOMIC64
67189 - atomic64_set(&ic->i_ack_next, 0);
67190 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67191 #else
67192 ic->i_ack_next = 0;
67193 #endif
67194 diff -urNp linux-3.0.3/net/rds/iw.h linux-3.0.3/net/rds/iw.h
67195 --- linux-3.0.3/net/rds/iw.h 2011-07-21 22:17:23.000000000 -0400
67196 +++ linux-3.0.3/net/rds/iw.h 2011-08-23 21:47:56.000000000 -0400
67197 @@ -133,7 +133,7 @@ struct rds_iw_connection {
67198 /* sending acks */
67199 unsigned long i_ack_flags;
67200 #ifdef KERNEL_HAS_ATOMIC64
67201 - atomic64_t i_ack_next; /* next ACK to send */
67202 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67203 #else
67204 spinlock_t i_ack_lock; /* protect i_ack_next */
67205 u64 i_ack_next; /* next ACK to send */
67206 diff -urNp linux-3.0.3/net/rds/iw_rdma.c linux-3.0.3/net/rds/iw_rdma.c
67207 --- linux-3.0.3/net/rds/iw_rdma.c 2011-07-21 22:17:23.000000000 -0400
67208 +++ linux-3.0.3/net/rds/iw_rdma.c 2011-08-23 21:48:14.000000000 -0400
67209 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
67210 struct rdma_cm_id *pcm_id;
67211 int rc;
67212
67213 + pax_track_stack();
67214 +
67215 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
67216 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
67217
67218 diff -urNp linux-3.0.3/net/rds/iw_recv.c linux-3.0.3/net/rds/iw_recv.c
67219 --- linux-3.0.3/net/rds/iw_recv.c 2011-07-21 22:17:23.000000000 -0400
67220 +++ linux-3.0.3/net/rds/iw_recv.c 2011-08-23 21:47:56.000000000 -0400
67221 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67222 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
67223 int ack_required)
67224 {
67225 - atomic64_set(&ic->i_ack_next, seq);
67226 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67227 if (ack_required) {
67228 smp_mb__before_clear_bit();
67229 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67230 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67231 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67232 smp_mb__after_clear_bit();
67233
67234 - return atomic64_read(&ic->i_ack_next);
67235 + return atomic64_read_unchecked(&ic->i_ack_next);
67236 }
67237 #endif
67238
67239 diff -urNp linux-3.0.3/net/rxrpc/af_rxrpc.c linux-3.0.3/net/rxrpc/af_rxrpc.c
67240 --- linux-3.0.3/net/rxrpc/af_rxrpc.c 2011-07-21 22:17:23.000000000 -0400
67241 +++ linux-3.0.3/net/rxrpc/af_rxrpc.c 2011-08-23 21:47:56.000000000 -0400
67242 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
67243 __be32 rxrpc_epoch;
67244
67245 /* current debugging ID */
67246 -atomic_t rxrpc_debug_id;
67247 +atomic_unchecked_t rxrpc_debug_id;
67248
67249 /* count of skbs currently in use */
67250 atomic_t rxrpc_n_skbs;
67251 diff -urNp linux-3.0.3/net/rxrpc/ar-ack.c linux-3.0.3/net/rxrpc/ar-ack.c
67252 --- linux-3.0.3/net/rxrpc/ar-ack.c 2011-07-21 22:17:23.000000000 -0400
67253 +++ linux-3.0.3/net/rxrpc/ar-ack.c 2011-08-23 21:48:14.000000000 -0400
67254 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
67255
67256 _enter("{%d,%d,%d,%d},",
67257 call->acks_hard, call->acks_unacked,
67258 - atomic_read(&call->sequence),
67259 + atomic_read_unchecked(&call->sequence),
67260 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
67261
67262 stop = 0;
67263 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
67264
67265 /* each Tx packet has a new serial number */
67266 sp->hdr.serial =
67267 - htonl(atomic_inc_return(&call->conn->serial));
67268 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
67269
67270 hdr = (struct rxrpc_header *) txb->head;
67271 hdr->serial = sp->hdr.serial;
67272 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
67273 */
67274 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
67275 {
67276 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
67277 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
67278 }
67279
67280 /*
67281 @@ -629,7 +629,7 @@ process_further:
67282
67283 latest = ntohl(sp->hdr.serial);
67284 hard = ntohl(ack.firstPacket);
67285 - tx = atomic_read(&call->sequence);
67286 + tx = atomic_read_unchecked(&call->sequence);
67287
67288 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67289 latest,
67290 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_stru
67291 u32 abort_code = RX_PROTOCOL_ERROR;
67292 u8 *acks = NULL;
67293
67294 + pax_track_stack();
67295 +
67296 //printk("\n--------------------\n");
67297 _enter("{%d,%s,%lx} [%lu]",
67298 call->debug_id, rxrpc_call_states[call->state], call->events,
67299 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_stru
67300 goto maybe_reschedule;
67301
67302 send_ACK_with_skew:
67303 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
67304 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
67305 ntohl(ack.serial));
67306 send_ACK:
67307 mtu = call->conn->trans->peer->if_mtu;
67308 @@ -1173,7 +1175,7 @@ send_ACK:
67309 ackinfo.rxMTU = htonl(5692);
67310 ackinfo.jumbo_max = htonl(4);
67311
67312 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67313 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67314 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67315 ntohl(hdr.serial),
67316 ntohs(ack.maxSkew),
67317 @@ -1191,7 +1193,7 @@ send_ACK:
67318 send_message:
67319 _debug("send message");
67320
67321 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67322 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67323 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
67324 send_message_2:
67325
67326 diff -urNp linux-3.0.3/net/rxrpc/ar-call.c linux-3.0.3/net/rxrpc/ar-call.c
67327 --- linux-3.0.3/net/rxrpc/ar-call.c 2011-07-21 22:17:23.000000000 -0400
67328 +++ linux-3.0.3/net/rxrpc/ar-call.c 2011-08-23 21:47:56.000000000 -0400
67329 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
67330 spin_lock_init(&call->lock);
67331 rwlock_init(&call->state_lock);
67332 atomic_set(&call->usage, 1);
67333 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
67334 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67335 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
67336
67337 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
67338 diff -urNp linux-3.0.3/net/rxrpc/ar-connection.c linux-3.0.3/net/rxrpc/ar-connection.c
67339 --- linux-3.0.3/net/rxrpc/ar-connection.c 2011-07-21 22:17:23.000000000 -0400
67340 +++ linux-3.0.3/net/rxrpc/ar-connection.c 2011-08-23 21:47:56.000000000 -0400
67341 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
67342 rwlock_init(&conn->lock);
67343 spin_lock_init(&conn->state_lock);
67344 atomic_set(&conn->usage, 1);
67345 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
67346 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67347 conn->avail_calls = RXRPC_MAXCALLS;
67348 conn->size_align = 4;
67349 conn->header_size = sizeof(struct rxrpc_header);
67350 diff -urNp linux-3.0.3/net/rxrpc/ar-connevent.c linux-3.0.3/net/rxrpc/ar-connevent.c
67351 --- linux-3.0.3/net/rxrpc/ar-connevent.c 2011-07-21 22:17:23.000000000 -0400
67352 +++ linux-3.0.3/net/rxrpc/ar-connevent.c 2011-08-23 21:47:56.000000000 -0400
67353 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
67354
67355 len = iov[0].iov_len + iov[1].iov_len;
67356
67357 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67358 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67359 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
67360
67361 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67362 diff -urNp linux-3.0.3/net/rxrpc/ar-input.c linux-3.0.3/net/rxrpc/ar-input.c
67363 --- linux-3.0.3/net/rxrpc/ar-input.c 2011-07-21 22:17:23.000000000 -0400
67364 +++ linux-3.0.3/net/rxrpc/ar-input.c 2011-08-23 21:47:56.000000000 -0400
67365 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
67366 /* track the latest serial number on this connection for ACK packet
67367 * information */
67368 serial = ntohl(sp->hdr.serial);
67369 - hi_serial = atomic_read(&call->conn->hi_serial);
67370 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
67371 while (serial > hi_serial)
67372 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
67373 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
67374 serial);
67375
67376 /* request ACK generation for any ACK or DATA packet that requests
67377 diff -urNp linux-3.0.3/net/rxrpc/ar-internal.h linux-3.0.3/net/rxrpc/ar-internal.h
67378 --- linux-3.0.3/net/rxrpc/ar-internal.h 2011-07-21 22:17:23.000000000 -0400
67379 +++ linux-3.0.3/net/rxrpc/ar-internal.h 2011-08-23 21:47:56.000000000 -0400
67380 @@ -272,8 +272,8 @@ struct rxrpc_connection {
67381 int error; /* error code for local abort */
67382 int debug_id; /* debug ID for printks */
67383 unsigned call_counter; /* call ID counter */
67384 - atomic_t serial; /* packet serial number counter */
67385 - atomic_t hi_serial; /* highest serial number received */
67386 + atomic_unchecked_t serial; /* packet serial number counter */
67387 + atomic_unchecked_t hi_serial; /* highest serial number received */
67388 u8 avail_calls; /* number of calls available */
67389 u8 size_align; /* data size alignment (for security) */
67390 u8 header_size; /* rxrpc + security header size */
67391 @@ -346,7 +346,7 @@ struct rxrpc_call {
67392 spinlock_t lock;
67393 rwlock_t state_lock; /* lock for state transition */
67394 atomic_t usage;
67395 - atomic_t sequence; /* Tx data packet sequence counter */
67396 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
67397 u32 abort_code; /* local/remote abort code */
67398 enum { /* current state of call */
67399 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
67400 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
67401 */
67402 extern atomic_t rxrpc_n_skbs;
67403 extern __be32 rxrpc_epoch;
67404 -extern atomic_t rxrpc_debug_id;
67405 +extern atomic_unchecked_t rxrpc_debug_id;
67406 extern struct workqueue_struct *rxrpc_workqueue;
67407
67408 /*
67409 diff -urNp linux-3.0.3/net/rxrpc/ar-local.c linux-3.0.3/net/rxrpc/ar-local.c
67410 --- linux-3.0.3/net/rxrpc/ar-local.c 2011-07-21 22:17:23.000000000 -0400
67411 +++ linux-3.0.3/net/rxrpc/ar-local.c 2011-08-23 21:47:56.000000000 -0400
67412 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
67413 spin_lock_init(&local->lock);
67414 rwlock_init(&local->services_lock);
67415 atomic_set(&local->usage, 1);
67416 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
67417 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67418 memcpy(&local->srx, srx, sizeof(*srx));
67419 }
67420
67421 diff -urNp linux-3.0.3/net/rxrpc/ar-output.c linux-3.0.3/net/rxrpc/ar-output.c
67422 --- linux-3.0.3/net/rxrpc/ar-output.c 2011-07-21 22:17:23.000000000 -0400
67423 +++ linux-3.0.3/net/rxrpc/ar-output.c 2011-08-23 21:47:56.000000000 -0400
67424 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
67425 sp->hdr.cid = call->cid;
67426 sp->hdr.callNumber = call->call_id;
67427 sp->hdr.seq =
67428 - htonl(atomic_inc_return(&call->sequence));
67429 + htonl(atomic_inc_return_unchecked(&call->sequence));
67430 sp->hdr.serial =
67431 - htonl(atomic_inc_return(&conn->serial));
67432 + htonl(atomic_inc_return_unchecked(&conn->serial));
67433 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
67434 sp->hdr.userStatus = 0;
67435 sp->hdr.securityIndex = conn->security_ix;
67436 diff -urNp linux-3.0.3/net/rxrpc/ar-peer.c linux-3.0.3/net/rxrpc/ar-peer.c
67437 --- linux-3.0.3/net/rxrpc/ar-peer.c 2011-07-21 22:17:23.000000000 -0400
67438 +++ linux-3.0.3/net/rxrpc/ar-peer.c 2011-08-23 21:47:56.000000000 -0400
67439 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
67440 INIT_LIST_HEAD(&peer->error_targets);
67441 spin_lock_init(&peer->lock);
67442 atomic_set(&peer->usage, 1);
67443 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
67444 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67445 memcpy(&peer->srx, srx, sizeof(*srx));
67446
67447 rxrpc_assess_MTU_size(peer);
67448 diff -urNp linux-3.0.3/net/rxrpc/ar-proc.c linux-3.0.3/net/rxrpc/ar-proc.c
67449 --- linux-3.0.3/net/rxrpc/ar-proc.c 2011-07-21 22:17:23.000000000 -0400
67450 +++ linux-3.0.3/net/rxrpc/ar-proc.c 2011-08-23 21:47:56.000000000 -0400
67451 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
67452 atomic_read(&conn->usage),
67453 rxrpc_conn_states[conn->state],
67454 key_serial(conn->key),
67455 - atomic_read(&conn->serial),
67456 - atomic_read(&conn->hi_serial));
67457 + atomic_read_unchecked(&conn->serial),
67458 + atomic_read_unchecked(&conn->hi_serial));
67459
67460 return 0;
67461 }
67462 diff -urNp linux-3.0.3/net/rxrpc/ar-transport.c linux-3.0.3/net/rxrpc/ar-transport.c
67463 --- linux-3.0.3/net/rxrpc/ar-transport.c 2011-07-21 22:17:23.000000000 -0400
67464 +++ linux-3.0.3/net/rxrpc/ar-transport.c 2011-08-23 21:47:56.000000000 -0400
67465 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
67466 spin_lock_init(&trans->client_lock);
67467 rwlock_init(&trans->conn_lock);
67468 atomic_set(&trans->usage, 1);
67469 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
67470 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67471
67472 if (peer->srx.transport.family == AF_INET) {
67473 switch (peer->srx.transport_type) {
67474 diff -urNp linux-3.0.3/net/rxrpc/rxkad.c linux-3.0.3/net/rxrpc/rxkad.c
67475 --- linux-3.0.3/net/rxrpc/rxkad.c 2011-07-21 22:17:23.000000000 -0400
67476 +++ linux-3.0.3/net/rxrpc/rxkad.c 2011-08-23 21:48:14.000000000 -0400
67477 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
67478 u16 check;
67479 int nsg;
67480
67481 + pax_track_stack();
67482 +
67483 sp = rxrpc_skb(skb);
67484
67485 _enter("");
67486 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
67487 u16 check;
67488 int nsg;
67489
67490 + pax_track_stack();
67491 +
67492 _enter("");
67493
67494 sp = rxrpc_skb(skb);
67495 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
67496
67497 len = iov[0].iov_len + iov[1].iov_len;
67498
67499 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67500 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67501 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
67502
67503 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67504 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
67505
67506 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
67507
67508 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
67509 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67510 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
67511
67512 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
67513 diff -urNp linux-3.0.3/net/sctp/proc.c linux-3.0.3/net/sctp/proc.c
67514 --- linux-3.0.3/net/sctp/proc.c 2011-07-21 22:17:23.000000000 -0400
67515 +++ linux-3.0.3/net/sctp/proc.c 2011-08-23 21:48:14.000000000 -0400
67516 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct s
67517 seq_printf(seq,
67518 "%8pK %8pK %-3d %-3d %-2d %-4d "
67519 "%4d %8d %8d %7d %5lu %-5d %5d ",
67520 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
67521 + assoc, sk,
67522 + sctp_sk(sk)->type, sk->sk_state,
67523 assoc->state, hash,
67524 assoc->assoc_id,
67525 assoc->sndbuf_used,
67526 diff -urNp linux-3.0.3/net/sctp/socket.c linux-3.0.3/net/sctp/socket.c
67527 --- linux-3.0.3/net/sctp/socket.c 2011-07-21 22:17:23.000000000 -0400
67528 +++ linux-3.0.3/net/sctp/socket.c 2011-08-23 21:47:56.000000000 -0400
67529 @@ -4452,7 +4452,7 @@ static int sctp_getsockopt_peer_addrs(st
67530 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
67531 if (space_left < addrlen)
67532 return -ENOMEM;
67533 - if (copy_to_user(to, &temp, addrlen))
67534 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
67535 return -EFAULT;
67536 to += addrlen;
67537 cnt++;
67538 diff -urNp linux-3.0.3/net/socket.c linux-3.0.3/net/socket.c
67539 --- linux-3.0.3/net/socket.c 2011-08-23 21:44:40.000000000 -0400
67540 +++ linux-3.0.3/net/socket.c 2011-08-23 21:48:14.000000000 -0400
67541 @@ -88,6 +88,7 @@
67542 #include <linux/nsproxy.h>
67543 #include <linux/magic.h>
67544 #include <linux/slab.h>
67545 +#include <linux/in.h>
67546
67547 #include <asm/uaccess.h>
67548 #include <asm/unistd.h>
67549 @@ -105,6 +106,8 @@
67550 #include <linux/sockios.h>
67551 #include <linux/atalk.h>
67552
67553 +#include <linux/grsock.h>
67554 +
67555 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
67556 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
67557 unsigned long nr_segs, loff_t pos);
67558 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struc
67559 &sockfs_dentry_operations, SOCKFS_MAGIC);
67560 }
67561
67562 -static struct vfsmount *sock_mnt __read_mostly;
67563 +struct vfsmount *sock_mnt __read_mostly;
67564
67565 static struct file_system_type sock_fs_type = {
67566 .name = "sockfs",
67567 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int f
67568 return -EAFNOSUPPORT;
67569 if (type < 0 || type >= SOCK_MAX)
67570 return -EINVAL;
67571 + if (protocol < 0)
67572 + return -EINVAL;
67573
67574 /* Compatibility.
67575
67576 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int
67577 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
67578 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
67579
67580 + if(!gr_search_socket(family, type, protocol)) {
67581 + retval = -EACCES;
67582 + goto out;
67583 + }
67584 +
67585 + if (gr_handle_sock_all(family, type, protocol)) {
67586 + retval = -EACCES;
67587 + goto out;
67588 + }
67589 +
67590 retval = sock_create(family, type, protocol, &sock);
67591 if (retval < 0)
67592 goto out;
67593 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
67594 if (sock) {
67595 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
67596 if (err >= 0) {
67597 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
67598 + err = -EACCES;
67599 + goto error;
67600 + }
67601 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
67602 + if (err)
67603 + goto error;
67604 +
67605 err = security_socket_bind(sock,
67606 (struct sockaddr *)&address,
67607 addrlen);
67608 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
67609 (struct sockaddr *)
67610 &address, addrlen);
67611 }
67612 +error:
67613 fput_light(sock->file, fput_needed);
67614 }
67615 return err;
67616 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
67617 if ((unsigned)backlog > somaxconn)
67618 backlog = somaxconn;
67619
67620 + if (gr_handle_sock_server_other(sock->sk)) {
67621 + err = -EPERM;
67622 + goto error;
67623 + }
67624 +
67625 + err = gr_search_listen(sock);
67626 + if (err)
67627 + goto error;
67628 +
67629 err = security_socket_listen(sock, backlog);
67630 if (!err)
67631 err = sock->ops->listen(sock, backlog);
67632
67633 +error:
67634 fput_light(sock->file, fput_needed);
67635 }
67636 return err;
67637 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
67638 newsock->type = sock->type;
67639 newsock->ops = sock->ops;
67640
67641 + if (gr_handle_sock_server_other(sock->sk)) {
67642 + err = -EPERM;
67643 + sock_release(newsock);
67644 + goto out_put;
67645 + }
67646 +
67647 + err = gr_search_accept(sock);
67648 + if (err) {
67649 + sock_release(newsock);
67650 + goto out_put;
67651 + }
67652 +
67653 /*
67654 * We don't need try_module_get here, as the listening socket (sock)
67655 * has the protocol module (sock->ops->owner) held.
67656 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
67657 fd_install(newfd, newfile);
67658 err = newfd;
67659
67660 + gr_attach_curr_ip(newsock->sk);
67661 +
67662 out_put:
67663 fput_light(sock->file, fput_needed);
67664 out:
67665 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
67666 int, addrlen)
67667 {
67668 struct socket *sock;
67669 + struct sockaddr *sck;
67670 struct sockaddr_storage address;
67671 int err, fput_needed;
67672
67673 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
67674 if (err < 0)
67675 goto out_put;
67676
67677 + sck = (struct sockaddr *)&address;
67678 +
67679 + if (gr_handle_sock_client(sck)) {
67680 + err = -EACCES;
67681 + goto out_put;
67682 + }
67683 +
67684 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
67685 + if (err)
67686 + goto out_put;
67687 +
67688 err =
67689 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
67690 if (err)
67691 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *
67692 unsigned char *ctl_buf = ctl;
67693 int err, ctl_len, iov_size, total_len;
67694
67695 + pax_track_stack();
67696 +
67697 err = -EFAULT;
67698 if (MSG_CMSG_COMPAT & flags) {
67699 if (get_compat_msghdr(msg_sys, msg_compat))
67700 diff -urNp linux-3.0.3/net/sunrpc/sched.c linux-3.0.3/net/sunrpc/sched.c
67701 --- linux-3.0.3/net/sunrpc/sched.c 2011-07-21 22:17:23.000000000 -0400
67702 +++ linux-3.0.3/net/sunrpc/sched.c 2011-08-23 21:47:56.000000000 -0400
67703 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
67704 #ifdef RPC_DEBUG
67705 static void rpc_task_set_debuginfo(struct rpc_task *task)
67706 {
67707 - static atomic_t rpc_pid;
67708 + static atomic_unchecked_t rpc_pid;
67709
67710 - task->tk_pid = atomic_inc_return(&rpc_pid);
67711 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
67712 }
67713 #else
67714 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
67715 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c
67716 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c 2011-07-21 22:17:23.000000000 -0400
67717 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-23 21:47:56.000000000 -0400
67718 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
67719 static unsigned int min_max_inline = 4096;
67720 static unsigned int max_max_inline = 65536;
67721
67722 -atomic_t rdma_stat_recv;
67723 -atomic_t rdma_stat_read;
67724 -atomic_t rdma_stat_write;
67725 -atomic_t rdma_stat_sq_starve;
67726 -atomic_t rdma_stat_rq_starve;
67727 -atomic_t rdma_stat_rq_poll;
67728 -atomic_t rdma_stat_rq_prod;
67729 -atomic_t rdma_stat_sq_poll;
67730 -atomic_t rdma_stat_sq_prod;
67731 +atomic_unchecked_t rdma_stat_recv;
67732 +atomic_unchecked_t rdma_stat_read;
67733 +atomic_unchecked_t rdma_stat_write;
67734 +atomic_unchecked_t rdma_stat_sq_starve;
67735 +atomic_unchecked_t rdma_stat_rq_starve;
67736 +atomic_unchecked_t rdma_stat_rq_poll;
67737 +atomic_unchecked_t rdma_stat_rq_prod;
67738 +atomic_unchecked_t rdma_stat_sq_poll;
67739 +atomic_unchecked_t rdma_stat_sq_prod;
67740
67741 /* Temporary NFS request map and context caches */
67742 struct kmem_cache *svc_rdma_map_cachep;
67743 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
67744 len -= *ppos;
67745 if (len > *lenp)
67746 len = *lenp;
67747 - if (len && copy_to_user(buffer, str_buf, len))
67748 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
67749 return -EFAULT;
67750 *lenp = len;
67751 *ppos += len;
67752 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
67753 {
67754 .procname = "rdma_stat_read",
67755 .data = &rdma_stat_read,
67756 - .maxlen = sizeof(atomic_t),
67757 + .maxlen = sizeof(atomic_unchecked_t),
67758 .mode = 0644,
67759 .proc_handler = read_reset_stat,
67760 },
67761 {
67762 .procname = "rdma_stat_recv",
67763 .data = &rdma_stat_recv,
67764 - .maxlen = sizeof(atomic_t),
67765 + .maxlen = sizeof(atomic_unchecked_t),
67766 .mode = 0644,
67767 .proc_handler = read_reset_stat,
67768 },
67769 {
67770 .procname = "rdma_stat_write",
67771 .data = &rdma_stat_write,
67772 - .maxlen = sizeof(atomic_t),
67773 + .maxlen = sizeof(atomic_unchecked_t),
67774 .mode = 0644,
67775 .proc_handler = read_reset_stat,
67776 },
67777 {
67778 .procname = "rdma_stat_sq_starve",
67779 .data = &rdma_stat_sq_starve,
67780 - .maxlen = sizeof(atomic_t),
67781 + .maxlen = sizeof(atomic_unchecked_t),
67782 .mode = 0644,
67783 .proc_handler = read_reset_stat,
67784 },
67785 {
67786 .procname = "rdma_stat_rq_starve",
67787 .data = &rdma_stat_rq_starve,
67788 - .maxlen = sizeof(atomic_t),
67789 + .maxlen = sizeof(atomic_unchecked_t),
67790 .mode = 0644,
67791 .proc_handler = read_reset_stat,
67792 },
67793 {
67794 .procname = "rdma_stat_rq_poll",
67795 .data = &rdma_stat_rq_poll,
67796 - .maxlen = sizeof(atomic_t),
67797 + .maxlen = sizeof(atomic_unchecked_t),
67798 .mode = 0644,
67799 .proc_handler = read_reset_stat,
67800 },
67801 {
67802 .procname = "rdma_stat_rq_prod",
67803 .data = &rdma_stat_rq_prod,
67804 - .maxlen = sizeof(atomic_t),
67805 + .maxlen = sizeof(atomic_unchecked_t),
67806 .mode = 0644,
67807 .proc_handler = read_reset_stat,
67808 },
67809 {
67810 .procname = "rdma_stat_sq_poll",
67811 .data = &rdma_stat_sq_poll,
67812 - .maxlen = sizeof(atomic_t),
67813 + .maxlen = sizeof(atomic_unchecked_t),
67814 .mode = 0644,
67815 .proc_handler = read_reset_stat,
67816 },
67817 {
67818 .procname = "rdma_stat_sq_prod",
67819 .data = &rdma_stat_sq_prod,
67820 - .maxlen = sizeof(atomic_t),
67821 + .maxlen = sizeof(atomic_unchecked_t),
67822 .mode = 0644,
67823 .proc_handler = read_reset_stat,
67824 },
67825 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
67826 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-07-21 22:17:23.000000000 -0400
67827 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-23 21:47:56.000000000 -0400
67828 @@ -499,7 +499,7 @@ next_sge:
67829 svc_rdma_put_context(ctxt, 0);
67830 goto out;
67831 }
67832 - atomic_inc(&rdma_stat_read);
67833 + atomic_inc_unchecked(&rdma_stat_read);
67834
67835 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
67836 chl_map->ch[ch_no].count -= read_wr.num_sge;
67837 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
67838 dto_q);
67839 list_del_init(&ctxt->dto_q);
67840 } else {
67841 - atomic_inc(&rdma_stat_rq_starve);
67842 + atomic_inc_unchecked(&rdma_stat_rq_starve);
67843 clear_bit(XPT_DATA, &xprt->xpt_flags);
67844 ctxt = NULL;
67845 }
67846 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
67847 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
67848 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
67849 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
67850 - atomic_inc(&rdma_stat_recv);
67851 + atomic_inc_unchecked(&rdma_stat_recv);
67852
67853 /* Build up the XDR from the receive buffers. */
67854 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
67855 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c
67856 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-07-21 22:17:23.000000000 -0400
67857 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-23 21:47:56.000000000 -0400
67858 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
67859 write_wr.wr.rdma.remote_addr = to;
67860
67861 /* Post It */
67862 - atomic_inc(&rdma_stat_write);
67863 + atomic_inc_unchecked(&rdma_stat_write);
67864 if (svc_rdma_send(xprt, &write_wr))
67865 goto err;
67866 return 0;
67867 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c
67868 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-07-21 22:17:23.000000000 -0400
67869 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-23 21:47:56.000000000 -0400
67870 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
67871 return;
67872
67873 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
67874 - atomic_inc(&rdma_stat_rq_poll);
67875 + atomic_inc_unchecked(&rdma_stat_rq_poll);
67876
67877 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
67878 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
67879 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
67880 }
67881
67882 if (ctxt)
67883 - atomic_inc(&rdma_stat_rq_prod);
67884 + atomic_inc_unchecked(&rdma_stat_rq_prod);
67885
67886 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
67887 /*
67888 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
67889 return;
67890
67891 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
67892 - atomic_inc(&rdma_stat_sq_poll);
67893 + atomic_inc_unchecked(&rdma_stat_sq_poll);
67894 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
67895 if (wc.status != IB_WC_SUCCESS)
67896 /* Close the transport */
67897 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
67898 }
67899
67900 if (ctxt)
67901 - atomic_inc(&rdma_stat_sq_prod);
67902 + atomic_inc_unchecked(&rdma_stat_sq_prod);
67903 }
67904
67905 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
67906 @@ -1272,7 +1272,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
67907 spin_lock_bh(&xprt->sc_lock);
67908 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
67909 spin_unlock_bh(&xprt->sc_lock);
67910 - atomic_inc(&rdma_stat_sq_starve);
67911 + atomic_inc_unchecked(&rdma_stat_sq_starve);
67912
67913 /* See if we can opportunistically reap SQ WR to make room */
67914 sq_cq_reap(xprt);
67915 diff -urNp linux-3.0.3/net/sysctl_net.c linux-3.0.3/net/sysctl_net.c
67916 --- linux-3.0.3/net/sysctl_net.c 2011-07-21 22:17:23.000000000 -0400
67917 +++ linux-3.0.3/net/sysctl_net.c 2011-08-23 21:48:14.000000000 -0400
67918 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
67919 struct ctl_table *table)
67920 {
67921 /* Allow network administrator to have same access as root. */
67922 - if (capable(CAP_NET_ADMIN)) {
67923 + if (capable_nolog(CAP_NET_ADMIN)) {
67924 int mode = (table->mode >> 6) & 7;
67925 return (mode << 6) | (mode << 3) | mode;
67926 }
67927 diff -urNp linux-3.0.3/net/unix/af_unix.c linux-3.0.3/net/unix/af_unix.c
67928 --- linux-3.0.3/net/unix/af_unix.c 2011-07-21 22:17:23.000000000 -0400
67929 +++ linux-3.0.3/net/unix/af_unix.c 2011-08-23 21:48:14.000000000 -0400
67930 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
67931 err = -ECONNREFUSED;
67932 if (!S_ISSOCK(inode->i_mode))
67933 goto put_fail;
67934 +
67935 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
67936 + err = -EACCES;
67937 + goto put_fail;
67938 + }
67939 +
67940 u = unix_find_socket_byinode(inode);
67941 if (!u)
67942 goto put_fail;
67943 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
67944 if (u) {
67945 struct dentry *dentry;
67946 dentry = unix_sk(u)->dentry;
67947 +
67948 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
67949 + err = -EPERM;
67950 + sock_put(u);
67951 + goto fail;
67952 + }
67953 +
67954 if (dentry)
67955 touch_atime(unix_sk(u)->mnt, dentry);
67956 } else
67957 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
67958 err = security_path_mknod(&nd.path, dentry, mode, 0);
67959 if (err)
67960 goto out_mknod_drop_write;
67961 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
67962 + err = -EACCES;
67963 + goto out_mknod_drop_write;
67964 + }
67965 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
67966 out_mknod_drop_write:
67967 mnt_drop_write(nd.path.mnt);
67968 if (err)
67969 goto out_mknod_dput;
67970 +
67971 + gr_handle_create(dentry, nd.path.mnt);
67972 +
67973 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
67974 dput(nd.path.dentry);
67975 nd.path.dentry = dentry;
67976 diff -urNp linux-3.0.3/net/wireless/core.h linux-3.0.3/net/wireless/core.h
67977 --- linux-3.0.3/net/wireless/core.h 2011-07-21 22:17:23.000000000 -0400
67978 +++ linux-3.0.3/net/wireless/core.h 2011-08-23 21:47:56.000000000 -0400
67979 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
67980 struct mutex mtx;
67981
67982 /* rfkill support */
67983 - struct rfkill_ops rfkill_ops;
67984 + rfkill_ops_no_const rfkill_ops;
67985 struct rfkill *rfkill;
67986 struct work_struct rfkill_sync;
67987
67988 diff -urNp linux-3.0.3/net/wireless/wext-core.c linux-3.0.3/net/wireless/wext-core.c
67989 --- linux-3.0.3/net/wireless/wext-core.c 2011-07-21 22:17:23.000000000 -0400
67990 +++ linux-3.0.3/net/wireless/wext-core.c 2011-08-23 21:47:56.000000000 -0400
67991 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
67992 */
67993
67994 /* Support for very large requests */
67995 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
67996 - (user_length > descr->max_tokens)) {
67997 + if (user_length > descr->max_tokens) {
67998 /* Allow userspace to GET more than max so
67999 * we can support any size GET requests.
68000 * There is still a limit : -ENOMEM.
68001 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
68002 }
68003 }
68004
68005 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
68006 - /*
68007 - * If this is a GET, but not NOMAX, it means that the extra
68008 - * data is not bounded by userspace, but by max_tokens. Thus
68009 - * set the length to max_tokens. This matches the extra data
68010 - * allocation.
68011 - * The driver should fill it with the number of tokens it
68012 - * provided, and it may check iwp->length rather than having
68013 - * knowledge of max_tokens. If the driver doesn't change the
68014 - * iwp->length, this ioctl just copies back max_token tokens
68015 - * filled with zeroes. Hopefully the driver isn't claiming
68016 - * them to be valid data.
68017 - */
68018 - iwp->length = descr->max_tokens;
68019 - }
68020 -
68021 err = handler(dev, info, (union iwreq_data *) iwp, extra);
68022
68023 iwp->length += essid_compat;
68024 diff -urNp linux-3.0.3/net/xfrm/xfrm_policy.c linux-3.0.3/net/xfrm/xfrm_policy.c
68025 --- linux-3.0.3/net/xfrm/xfrm_policy.c 2011-07-21 22:17:23.000000000 -0400
68026 +++ linux-3.0.3/net/xfrm/xfrm_policy.c 2011-08-23 21:47:56.000000000 -0400
68027 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
68028 {
68029 policy->walk.dead = 1;
68030
68031 - atomic_inc(&policy->genid);
68032 + atomic_inc_unchecked(&policy->genid);
68033
68034 if (del_timer(&policy->timer))
68035 xfrm_pol_put(policy);
68036 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
68037 hlist_add_head(&policy->bydst, chain);
68038 xfrm_pol_hold(policy);
68039 net->xfrm.policy_count[dir]++;
68040 - atomic_inc(&flow_cache_genid);
68041 + atomic_inc_unchecked(&flow_cache_genid);
68042 if (delpol)
68043 __xfrm_policy_unlink(delpol, dir);
68044 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
68045 @@ -1528,7 +1528,7 @@ free_dst:
68046 goto out;
68047 }
68048
68049 -static int inline
68050 +static inline int
68051 xfrm_dst_alloc_copy(void **target, const void *src, int size)
68052 {
68053 if (!*target) {
68054 @@ -1540,7 +1540,7 @@ xfrm_dst_alloc_copy(void **target, const
68055 return 0;
68056 }
68057
68058 -static int inline
68059 +static inline int
68060 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
68061 {
68062 #ifdef CONFIG_XFRM_SUB_POLICY
68063 @@ -1552,7 +1552,7 @@ xfrm_dst_update_parent(struct dst_entry
68064 #endif
68065 }
68066
68067 -static int inline
68068 +static inline int
68069 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
68070 {
68071 #ifdef CONFIG_XFRM_SUB_POLICY
68072 @@ -1646,7 +1646,7 @@ xfrm_resolve_and_create_bundle(struct xf
68073
68074 xdst->num_pols = num_pols;
68075 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
68076 - xdst->policy_genid = atomic_read(&pols[0]->genid);
68077 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
68078
68079 return xdst;
68080 }
68081 @@ -2333,7 +2333,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
68082 if (xdst->xfrm_genid != dst->xfrm->genid)
68083 return 0;
68084 if (xdst->num_pols > 0 &&
68085 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
68086 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
68087 return 0;
68088
68089 mtu = dst_mtu(dst->child);
68090 @@ -2861,7 +2861,7 @@ static int xfrm_policy_migrate(struct xf
68091 sizeof(pol->xfrm_vec[i].saddr));
68092 pol->xfrm_vec[i].encap_family = mp->new_family;
68093 /* flush bundles */
68094 - atomic_inc(&pol->genid);
68095 + atomic_inc_unchecked(&pol->genid);
68096 }
68097 }
68098
68099 diff -urNp linux-3.0.3/net/xfrm/xfrm_user.c linux-3.0.3/net/xfrm/xfrm_user.c
68100 --- linux-3.0.3/net/xfrm/xfrm_user.c 2011-07-21 22:17:23.000000000 -0400
68101 +++ linux-3.0.3/net/xfrm/xfrm_user.c 2011-08-23 21:48:14.000000000 -0400
68102 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
68103 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
68104 int i;
68105
68106 + pax_track_stack();
68107 +
68108 if (xp->xfrm_nr == 0)
68109 return 0;
68110
68111 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
68112 int err;
68113 int n = 0;
68114
68115 + pax_track_stack();
68116 +
68117 if (attrs[XFRMA_MIGRATE] == NULL)
68118 return -EINVAL;
68119
68120 diff -urNp linux-3.0.3/scripts/basic/fixdep.c linux-3.0.3/scripts/basic/fixdep.c
68121 --- linux-3.0.3/scripts/basic/fixdep.c 2011-07-21 22:17:23.000000000 -0400
68122 +++ linux-3.0.3/scripts/basic/fixdep.c 2011-08-23 21:47:56.000000000 -0400
68123 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
68124
68125 static void parse_config_file(const char *map, size_t len)
68126 {
68127 - const int *end = (const int *) (map + len);
68128 + const unsigned int *end = (const unsigned int *) (map + len);
68129 /* start at +1, so that p can never be < map */
68130 - const int *m = (const int *) map + 1;
68131 + const unsigned int *m = (const unsigned int *) map + 1;
68132 const char *p, *q;
68133
68134 for (; m < end; m++) {
68135 @@ -405,7 +405,7 @@ static void print_deps(void)
68136 static void traps(void)
68137 {
68138 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
68139 - int *p = (int *)test;
68140 + unsigned int *p = (unsigned int *)test;
68141
68142 if (*p != INT_CONF) {
68143 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
68144 diff -urNp linux-3.0.3/scripts/gcc-plugin.sh linux-3.0.3/scripts/gcc-plugin.sh
68145 --- linux-3.0.3/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
68146 +++ linux-3.0.3/scripts/gcc-plugin.sh 2011-08-23 21:47:56.000000000 -0400
68147 @@ -0,0 +1,2 @@
68148 +#!/bin/sh
68149 +echo "#include \"gcc-plugin.h\"" | $* -x c -shared - -o /dev/null -I`$* -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
68150 diff -urNp linux-3.0.3/scripts/Makefile.build linux-3.0.3/scripts/Makefile.build
68151 --- linux-3.0.3/scripts/Makefile.build 2011-07-21 22:17:23.000000000 -0400
68152 +++ linux-3.0.3/scripts/Makefile.build 2011-08-23 21:47:56.000000000 -0400
68153 @@ -109,7 +109,7 @@ endif
68154 endif
68155
68156 # Do not include host rules unless needed
68157 -ifneq ($(hostprogs-y)$(hostprogs-m),)
68158 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
68159 include scripts/Makefile.host
68160 endif
68161
68162 diff -urNp linux-3.0.3/scripts/Makefile.clean linux-3.0.3/scripts/Makefile.clean
68163 --- linux-3.0.3/scripts/Makefile.clean 2011-07-21 22:17:23.000000000 -0400
68164 +++ linux-3.0.3/scripts/Makefile.clean 2011-08-23 21:47:56.000000000 -0400
68165 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
68166 __clean-files := $(extra-y) $(always) \
68167 $(targets) $(clean-files) \
68168 $(host-progs) \
68169 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
68170 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
68171 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
68172
68173 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
68174
68175 diff -urNp linux-3.0.3/scripts/Makefile.host linux-3.0.3/scripts/Makefile.host
68176 --- linux-3.0.3/scripts/Makefile.host 2011-07-21 22:17:23.000000000 -0400
68177 +++ linux-3.0.3/scripts/Makefile.host 2011-08-23 21:47:56.000000000 -0400
68178 @@ -31,6 +31,7 @@
68179 # Note: Shared libraries consisting of C++ files are not supported
68180
68181 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
68182 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
68183
68184 # C code
68185 # Executables compiled from a single .c file
68186 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
68187 # Shared libaries (only .c supported)
68188 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
68189 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
68190 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
68191 # Remove .so files from "xxx-objs"
68192 host-cobjs := $(filter-out %.so,$(host-cobjs))
68193
68194 diff -urNp linux-3.0.3/scripts/mod/file2alias.c linux-3.0.3/scripts/mod/file2alias.c
68195 --- linux-3.0.3/scripts/mod/file2alias.c 2011-07-21 22:17:23.000000000 -0400
68196 +++ linux-3.0.3/scripts/mod/file2alias.c 2011-08-23 21:47:56.000000000 -0400
68197 @@ -72,7 +72,7 @@ static void device_id_check(const char *
68198 unsigned long size, unsigned long id_size,
68199 void *symval)
68200 {
68201 - int i;
68202 + unsigned int i;
68203
68204 if (size % id_size || size < id_size) {
68205 if (cross_build != 0)
68206 @@ -102,7 +102,7 @@ static void device_id_check(const char *
68207 /* USB is special because the bcdDevice can be matched against a numeric range */
68208 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
68209 static void do_usb_entry(struct usb_device_id *id,
68210 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
68211 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
68212 unsigned char range_lo, unsigned char range_hi,
68213 unsigned char max, struct module *mod)
68214 {
68215 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
68216 for (i = 0; i < count; i++) {
68217 const char *id = (char *)devs[i].id;
68218 char acpi_id[sizeof(devs[0].id)];
68219 - int j;
68220 + unsigned int j;
68221
68222 buf_printf(&mod->dev_table_buf,
68223 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68224 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
68225
68226 for (j = 0; j < PNP_MAX_DEVICES; j++) {
68227 const char *id = (char *)card->devs[j].id;
68228 - int i2, j2;
68229 + unsigned int i2, j2;
68230 int dup = 0;
68231
68232 if (!id[0])
68233 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
68234 /* add an individual alias for every device entry */
68235 if (!dup) {
68236 char acpi_id[sizeof(card->devs[0].id)];
68237 - int k;
68238 + unsigned int k;
68239
68240 buf_printf(&mod->dev_table_buf,
68241 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68242 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, co
68243 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
68244 char *alias)
68245 {
68246 - int i, j;
68247 + unsigned int i, j;
68248
68249 sprintf(alias, "dmi*");
68250
68251 diff -urNp linux-3.0.3/scripts/mod/modpost.c linux-3.0.3/scripts/mod/modpost.c
68252 --- linux-3.0.3/scripts/mod/modpost.c 2011-07-21 22:17:23.000000000 -0400
68253 +++ linux-3.0.3/scripts/mod/modpost.c 2011-08-23 21:47:56.000000000 -0400
68254 @@ -892,6 +892,7 @@ enum mismatch {
68255 ANY_INIT_TO_ANY_EXIT,
68256 ANY_EXIT_TO_ANY_INIT,
68257 EXPORT_TO_INIT_EXIT,
68258 + DATA_TO_TEXT
68259 };
68260
68261 struct sectioncheck {
68262 @@ -1000,6 +1001,12 @@ const struct sectioncheck sectioncheck[]
68263 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
68264 .mismatch = EXPORT_TO_INIT_EXIT,
68265 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
68266 +},
68267 +/* Do not reference code from writable data */
68268 +{
68269 + .fromsec = { DATA_SECTIONS, NULL },
68270 + .tosec = { TEXT_SECTIONS, NULL },
68271 + .mismatch = DATA_TO_TEXT
68272 }
68273 };
68274
68275 @@ -1122,10 +1129,10 @@ static Elf_Sym *find_elf_symbol(struct e
68276 continue;
68277 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
68278 continue;
68279 - if (sym->st_value == addr)
68280 - return sym;
68281 /* Find a symbol nearby - addr are maybe negative */
68282 d = sym->st_value - addr;
68283 + if (d == 0)
68284 + return sym;
68285 if (d < 0)
68286 d = addr - sym->st_value;
68287 if (d < distance) {
68288 @@ -1404,6 +1411,14 @@ static void report_sec_mismatch(const ch
68289 tosym, prl_to, prl_to, tosym);
68290 free(prl_to);
68291 break;
68292 + case DATA_TO_TEXT:
68293 +/*
68294 + fprintf(stderr,
68295 + "The variable %s references\n"
68296 + "the %s %s%s%s\n",
68297 + fromsym, to, sec2annotation(tosec), tosym, to_p);
68298 +*/
68299 + break;
68300 }
68301 fprintf(stderr, "\n");
68302 }
68303 @@ -1629,7 +1644,7 @@ static void section_rel(const char *modn
68304 static void check_sec_ref(struct module *mod, const char *modname,
68305 struct elf_info *elf)
68306 {
68307 - int i;
68308 + unsigned int i;
68309 Elf_Shdr *sechdrs = elf->sechdrs;
68310
68311 /* Walk through all sections */
68312 @@ -1727,7 +1742,7 @@ void __attribute__((format(printf, 2, 3)
68313 va_end(ap);
68314 }
68315
68316 -void buf_write(struct buffer *buf, const char *s, int len)
68317 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
68318 {
68319 if (buf->size - buf->pos < len) {
68320 buf->size += len + SZ;
68321 @@ -1939,7 +1954,7 @@ static void write_if_changed(struct buff
68322 if (fstat(fileno(file), &st) < 0)
68323 goto close_write;
68324
68325 - if (st.st_size != b->pos)
68326 + if (st.st_size != (off_t)b->pos)
68327 goto close_write;
68328
68329 tmp = NOFAIL(malloc(b->pos));
68330 diff -urNp linux-3.0.3/scripts/mod/modpost.h linux-3.0.3/scripts/mod/modpost.h
68331 --- linux-3.0.3/scripts/mod/modpost.h 2011-07-21 22:17:23.000000000 -0400
68332 +++ linux-3.0.3/scripts/mod/modpost.h 2011-08-23 21:47:56.000000000 -0400
68333 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
68334
68335 struct buffer {
68336 char *p;
68337 - int pos;
68338 - int size;
68339 + unsigned int pos;
68340 + unsigned int size;
68341 };
68342
68343 void __attribute__((format(printf, 2, 3)))
68344 buf_printf(struct buffer *buf, const char *fmt, ...);
68345
68346 void
68347 -buf_write(struct buffer *buf, const char *s, int len);
68348 +buf_write(struct buffer *buf, const char *s, unsigned int len);
68349
68350 struct module {
68351 struct module *next;
68352 diff -urNp linux-3.0.3/scripts/mod/sumversion.c linux-3.0.3/scripts/mod/sumversion.c
68353 --- linux-3.0.3/scripts/mod/sumversion.c 2011-07-21 22:17:23.000000000 -0400
68354 +++ linux-3.0.3/scripts/mod/sumversion.c 2011-08-23 21:47:56.000000000 -0400
68355 @@ -470,7 +470,7 @@ static void write_version(const char *fi
68356 goto out;
68357 }
68358
68359 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
68360 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
68361 warn("writing sum in %s failed: %s\n",
68362 filename, strerror(errno));
68363 goto out;
68364 diff -urNp linux-3.0.3/scripts/pnmtologo.c linux-3.0.3/scripts/pnmtologo.c
68365 --- linux-3.0.3/scripts/pnmtologo.c 2011-07-21 22:17:23.000000000 -0400
68366 +++ linux-3.0.3/scripts/pnmtologo.c 2011-08-23 21:47:56.000000000 -0400
68367 @@ -237,14 +237,14 @@ static void write_header(void)
68368 fprintf(out, " * Linux logo %s\n", logoname);
68369 fputs(" */\n\n", out);
68370 fputs("#include <linux/linux_logo.h>\n\n", out);
68371 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
68372 + fprintf(out, "static unsigned char %s_data[] = {\n",
68373 logoname);
68374 }
68375
68376 static void write_footer(void)
68377 {
68378 fputs("\n};\n\n", out);
68379 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
68380 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
68381 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
68382 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
68383 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
68384 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
68385 fputs("\n};\n\n", out);
68386
68387 /* write logo clut */
68388 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
68389 + fprintf(out, "static unsigned char %s_clut[] = {\n",
68390 logoname);
68391 write_hex_cnt = 0;
68392 for (i = 0; i < logo_clutsize; i++) {
68393 diff -urNp linux-3.0.3/security/apparmor/lsm.c linux-3.0.3/security/apparmor/lsm.c
68394 --- linux-3.0.3/security/apparmor/lsm.c 2011-08-23 21:44:40.000000000 -0400
68395 +++ linux-3.0.3/security/apparmor/lsm.c 2011-08-23 21:48:14.000000000 -0400
68396 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
68397 return error;
68398 }
68399
68400 -static struct security_operations apparmor_ops = {
68401 +static struct security_operations apparmor_ops __read_only = {
68402 .name = "apparmor",
68403
68404 .ptrace_access_check = apparmor_ptrace_access_check,
68405 diff -urNp linux-3.0.3/security/commoncap.c linux-3.0.3/security/commoncap.c
68406 --- linux-3.0.3/security/commoncap.c 2011-07-21 22:17:23.000000000 -0400
68407 +++ linux-3.0.3/security/commoncap.c 2011-08-23 21:48:14.000000000 -0400
68408 @@ -28,6 +28,7 @@
68409 #include <linux/prctl.h>
68410 #include <linux/securebits.h>
68411 #include <linux/user_namespace.h>
68412 +#include <net/sock.h>
68413
68414 /*
68415 * If a non-root user executes a setuid-root binary in
68416 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
68417
68418 int cap_netlink_recv(struct sk_buff *skb, int cap)
68419 {
68420 - if (!cap_raised(current_cap(), cap))
68421 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
68422 return -EPERM;
68423 return 0;
68424 }
68425 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_bin
68426 {
68427 const struct cred *cred = current_cred();
68428
68429 + if (gr_acl_enable_at_secure())
68430 + return 1;
68431 +
68432 if (cred->uid != 0) {
68433 if (bprm->cap_effective)
68434 return 1;
68435 diff -urNp linux-3.0.3/security/integrity/ima/ima_api.c linux-3.0.3/security/integrity/ima/ima_api.c
68436 --- linux-3.0.3/security/integrity/ima/ima_api.c 2011-07-21 22:17:23.000000000 -0400
68437 +++ linux-3.0.3/security/integrity/ima/ima_api.c 2011-08-23 21:47:56.000000000 -0400
68438 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
68439 int result;
68440
68441 /* can overflow, only indicator */
68442 - atomic_long_inc(&ima_htable.violations);
68443 + atomic_long_inc_unchecked(&ima_htable.violations);
68444
68445 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
68446 if (!entry) {
68447 diff -urNp linux-3.0.3/security/integrity/ima/ima_fs.c linux-3.0.3/security/integrity/ima/ima_fs.c
68448 --- linux-3.0.3/security/integrity/ima/ima_fs.c 2011-07-21 22:17:23.000000000 -0400
68449 +++ linux-3.0.3/security/integrity/ima/ima_fs.c 2011-08-23 21:47:56.000000000 -0400
68450 @@ -28,12 +28,12 @@
68451 static int valid_policy = 1;
68452 #define TMPBUFLEN 12
68453 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
68454 - loff_t *ppos, atomic_long_t *val)
68455 + loff_t *ppos, atomic_long_unchecked_t *val)
68456 {
68457 char tmpbuf[TMPBUFLEN];
68458 ssize_t len;
68459
68460 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
68461 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
68462 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
68463 }
68464
68465 diff -urNp linux-3.0.3/security/integrity/ima/ima.h linux-3.0.3/security/integrity/ima/ima.h
68466 --- linux-3.0.3/security/integrity/ima/ima.h 2011-07-21 22:17:23.000000000 -0400
68467 +++ linux-3.0.3/security/integrity/ima/ima.h 2011-08-23 21:47:56.000000000 -0400
68468 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
68469 extern spinlock_t ima_queue_lock;
68470
68471 struct ima_h_table {
68472 - atomic_long_t len; /* number of stored measurements in the list */
68473 - atomic_long_t violations;
68474 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
68475 + atomic_long_unchecked_t violations;
68476 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
68477 };
68478 extern struct ima_h_table ima_htable;
68479 diff -urNp linux-3.0.3/security/integrity/ima/ima_queue.c linux-3.0.3/security/integrity/ima/ima_queue.c
68480 --- linux-3.0.3/security/integrity/ima/ima_queue.c 2011-07-21 22:17:23.000000000 -0400
68481 +++ linux-3.0.3/security/integrity/ima/ima_queue.c 2011-08-23 21:47:56.000000000 -0400
68482 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
68483 INIT_LIST_HEAD(&qe->later);
68484 list_add_tail_rcu(&qe->later, &ima_measurements);
68485
68486 - atomic_long_inc(&ima_htable.len);
68487 + atomic_long_inc_unchecked(&ima_htable.len);
68488 key = ima_hash_key(entry->digest);
68489 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
68490 return 0;
68491 diff -urNp linux-3.0.3/security/Kconfig linux-3.0.3/security/Kconfig
68492 --- linux-3.0.3/security/Kconfig 2011-07-21 22:17:23.000000000 -0400
68493 +++ linux-3.0.3/security/Kconfig 2011-08-23 21:48:14.000000000 -0400
68494 @@ -4,6 +4,554 @@
68495
68496 menu "Security options"
68497
68498 +source grsecurity/Kconfig
68499 +
68500 +menu "PaX"
68501 +
68502 + config ARCH_TRACK_EXEC_LIMIT
68503 + bool
68504 +
68505 + config PAX_PER_CPU_PGD
68506 + bool
68507 +
68508 + config TASK_SIZE_MAX_SHIFT
68509 + int
68510 + depends on X86_64
68511 + default 47 if !PAX_PER_CPU_PGD
68512 + default 42 if PAX_PER_CPU_PGD
68513 +
68514 + config PAX_ENABLE_PAE
68515 + bool
68516 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
68517 +
68518 +config PAX
68519 + bool "Enable various PaX features"
68520 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
68521 + help
68522 + This allows you to enable various PaX features. PaX adds
68523 + intrusion prevention mechanisms to the kernel that reduce
68524 + the risks posed by exploitable memory corruption bugs.
68525 +
68526 +menu "PaX Control"
68527 + depends on PAX
68528 +
68529 +config PAX_SOFTMODE
68530 + bool 'Support soft mode'
68531 + select PAX_PT_PAX_FLAGS
68532 + help
68533 + Enabling this option will allow you to run PaX in soft mode, that
68534 + is, PaX features will not be enforced by default, only on executables
68535 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
68536 + is the only way to mark executables for soft mode use.
68537 +
68538 + Soft mode can be activated by using the "pax_softmode=1" kernel command
68539 + line option on boot. Furthermore you can control various PaX features
68540 + at runtime via the entries in /proc/sys/kernel/pax.
68541 +
68542 +config PAX_EI_PAX
68543 + bool 'Use legacy ELF header marking'
68544 + help
68545 + Enabling this option will allow you to control PaX features on
68546 + a per executable basis via the 'chpax' utility available at
68547 + http://pax.grsecurity.net/. The control flags will be read from
68548 + an otherwise reserved part of the ELF header. This marking has
68549 + numerous drawbacks (no support for soft-mode, toolchain does not
68550 + know about the non-standard use of the ELF header) therefore it
68551 + has been deprecated in favour of PT_PAX_FLAGS support.
68552 +
68553 + Note that if you enable PT_PAX_FLAGS marking support as well,
68554 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
68555 +
68556 +config PAX_PT_PAX_FLAGS
68557 + bool 'Use ELF program header marking'
68558 + help
68559 + Enabling this option will allow you to control PaX features on
68560 + a per executable basis via the 'paxctl' utility available at
68561 + http://pax.grsecurity.net/. The control flags will be read from
68562 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
68563 + has the benefits of supporting both soft mode and being fully
68564 + integrated into the toolchain (the binutils patch is available
68565 + from http://pax.grsecurity.net).
68566 +
68567 + If your toolchain does not support PT_PAX_FLAGS markings,
68568 + you can create one in most cases with 'paxctl -C'.
68569 +
68570 + Note that if you enable the legacy EI_PAX marking support as well,
68571 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
68572 +
68573 +choice
68574 + prompt 'MAC system integration'
68575 + default PAX_HAVE_ACL_FLAGS
68576 + help
68577 + Mandatory Access Control systems have the option of controlling
68578 + PaX flags on a per executable basis, choose the method supported
68579 + by your particular system.
68580 +
68581 + - "none": if your MAC system does not interact with PaX,
68582 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
68583 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
68584 +
68585 + NOTE: this option is for developers/integrators only.
68586 +
68587 + config PAX_NO_ACL_FLAGS
68588 + bool 'none'
68589 +
68590 + config PAX_HAVE_ACL_FLAGS
68591 + bool 'direct'
68592 +
68593 + config PAX_HOOK_ACL_FLAGS
68594 + bool 'hook'
68595 +endchoice
68596 +
68597 +endmenu
68598 +
68599 +menu "Non-executable pages"
68600 + depends on PAX
68601 +
68602 +config PAX_NOEXEC
68603 + bool "Enforce non-executable pages"
68604 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
68605 + help
68606 + By design some architectures do not allow for protecting memory
68607 + pages against execution or even if they do, Linux does not make
68608 + use of this feature. In practice this means that if a page is
68609 + readable (such as the stack or heap) it is also executable.
68610 +
68611 + There is a well known exploit technique that makes use of this
68612 + fact and a common programming mistake where an attacker can
68613 + introduce code of his choice somewhere in the attacked program's
68614 + memory (typically the stack or the heap) and then execute it.
68615 +
68616 + If the attacked program was running with different (typically
68617 + higher) privileges than that of the attacker, then he can elevate
68618 + his own privilege level (e.g. get a root shell, write to files for
68619 + which he does not have write access to, etc).
68620 +
68621 + Enabling this option will let you choose from various features
68622 + that prevent the injection and execution of 'foreign' code in
68623 + a program.
68624 +
68625 + This will also break programs that rely on the old behaviour and
68626 + expect that dynamically allocated memory via the malloc() family
68627 + of functions is executable (which it is not). Notable examples
68628 + are the XFree86 4.x server, the java runtime and wine.
68629 +
68630 +config PAX_PAGEEXEC
68631 + bool "Paging based non-executable pages"
68632 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
68633 + select S390_SWITCH_AMODE if S390
68634 + select S390_EXEC_PROTECT if S390
68635 + select ARCH_TRACK_EXEC_LIMIT if X86_32
68636 + help
68637 + This implementation is based on the paging feature of the CPU.
68638 + On i386 without hardware non-executable bit support there is a
68639 + variable but usually low performance impact, however on Intel's
68640 + P4 core based CPUs it is very high so you should not enable this
68641 + for kernels meant to be used on such CPUs.
68642 +
68643 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
68644 + with hardware non-executable bit support there is no performance
68645 + impact, on ppc the impact is negligible.
68646 +
68647 + Note that several architectures require various emulations due to
68648 + badly designed userland ABIs, this will cause a performance impact
68649 + but will disappear as soon as userland is fixed. For example, ppc
68650 + userland MUST have been built with secure-plt by a recent toolchain.
68651 +
68652 +config PAX_SEGMEXEC
68653 + bool "Segmentation based non-executable pages"
68654 + depends on PAX_NOEXEC && X86_32
68655 + help
68656 + This implementation is based on the segmentation feature of the
68657 + CPU and has a very small performance impact, however applications
68658 + will be limited to a 1.5 GB address space instead of the normal
68659 + 3 GB.
68660 +
68661 +config PAX_EMUTRAMP
68662 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
68663 + default y if PARISC
68664 + help
68665 + There are some programs and libraries that for one reason or
68666 + another attempt to execute special small code snippets from
68667 + non-executable memory pages. Most notable examples are the
68668 + signal handler return code generated by the kernel itself and
68669 + the GCC trampolines.
68670 +
68671 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
68672 + such programs will no longer work under your kernel.
68673 +
68674 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
68675 + utilities to enable trampoline emulation for the affected programs
68676 + yet still have the protection provided by the non-executable pages.
68677 +
68678 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
68679 + your system will not even boot.
68680 +
68681 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
68682 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
68683 + for the affected files.
68684 +
68685 + NOTE: enabling this feature *may* open up a loophole in the
68686 + protection provided by non-executable pages that an attacker
68687 + could abuse. Therefore the best solution is to not have any
68688 + files on your system that would require this option. This can
68689 + be achieved by not using libc5 (which relies on the kernel
68690 + signal handler return code) and not using or rewriting programs
68691 + that make use of the nested function implementation of GCC.
68692 + Skilled users can just fix GCC itself so that it implements
68693 + nested function calls in a way that does not interfere with PaX.
68694 +
68695 +config PAX_EMUSIGRT
68696 + bool "Automatically emulate sigreturn trampolines"
68697 + depends on PAX_EMUTRAMP && PARISC
68698 + default y
68699 + help
68700 + Enabling this option will have the kernel automatically detect
68701 + and emulate signal return trampolines executing on the stack
68702 + that would otherwise lead to task termination.
68703 +
68704 + This solution is intended as a temporary one for users with
68705 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
68706 + Modula-3 runtime, etc) or executables linked to such, basically
68707 + everything that does not specify its own SA_RESTORER function in
68708 + normal executable memory like glibc 2.1+ does.
68709 +
68710 + On parisc you MUST enable this option, otherwise your system will
68711 + not even boot.
68712 +
68713 + NOTE: this feature cannot be disabled on a per executable basis
68714 + and since it *does* open up a loophole in the protection provided
68715 + by non-executable pages, the best solution is to not have any
68716 + files on your system that would require this option.
68717 +
68718 +config PAX_MPROTECT
68719 + bool "Restrict mprotect()"
68720 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
68721 + help
68722 + Enabling this option will prevent programs from
68723 + - changing the executable status of memory pages that were
68724 + not originally created as executable,
68725 + - making read-only executable pages writable again,
68726 + - creating executable pages from anonymous memory,
68727 + - making read-only-after-relocations (RELRO) data pages writable again.
68728 +
68729 + You should say Y here to complete the protection provided by
68730 + the enforcement of non-executable pages.
68731 +
68732 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
68733 + this feature on a per file basis.
68734 +
68735 +config PAX_MPROTECT_COMPAT
68736 + bool "Use legacy/compat protection demoting (read help)"
68737 + depends on PAX_MPROTECT
68738 + default n
68739 + help
68740 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
68741 + by sending the proper error code to the application. For some broken
68742 + userland, this can cause problems with Python or other applications. The
68743 + current implementation however allows for applications like clamav to
68744 + detect if JIT compilation/execution is allowed and to fall back gracefully
68745 + to an interpreter-based mode if it does not. While we encourage everyone
68746 + to use the current implementation as-is and push upstream to fix broken
68747 + userland (note that the RWX logging option can assist with this), in some
68748 + environments this may not be possible. Having to disable MPROTECT
68749 + completely on certain binaries reduces the security benefit of PaX,
68750 + so this option is provided for those environments to revert to the old
68751 + behavior.
68752 +
68753 +config PAX_ELFRELOCS
68754 + bool "Allow ELF text relocations (read help)"
68755 + depends on PAX_MPROTECT
68756 + default n
68757 + help
68758 + Non-executable pages and mprotect() restrictions are effective
68759 + in preventing the introduction of new executable code into an
68760 + attacked task's address space. There remain only two venues
68761 + for this kind of attack: if the attacker can execute already
68762 + existing code in the attacked task then he can either have it
68763 + create and mmap() a file containing his code or have it mmap()
68764 + an already existing ELF library that does not have position
68765 + independent code in it and use mprotect() on it to make it
68766 + writable and copy his code there. While protecting against
68767 + the former approach is beyond PaX, the latter can be prevented
68768 + by having only PIC ELF libraries on one's system (which do not
68769 + need to relocate their code). If you are sure this is your case,
68770 + as is the case with all modern Linux distributions, then leave
68771 + this option disabled. You should say 'n' here.
68772 +
68773 +config PAX_ETEXECRELOCS
68774 + bool "Allow ELF ET_EXEC text relocations"
68775 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
68776 + select PAX_ELFRELOCS
68777 + default y
68778 + help
68779 + On some architectures there are incorrectly created applications
68780 + that require text relocations and would not work without enabling
68781 + this option. If you are an alpha, ia64 or parisc user, you should
68782 + enable this option and disable it once you have made sure that
68783 + none of your applications need it.
68784 +
68785 +config PAX_EMUPLT
68786 + bool "Automatically emulate ELF PLT"
68787 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
68788 + default y
68789 + help
68790 + Enabling this option will have the kernel automatically detect
68791 + and emulate the Procedure Linkage Table entries in ELF files.
68792 + On some architectures such entries are in writable memory, and
68793 + become non-executable leading to task termination. Therefore
68794 + it is mandatory that you enable this option on alpha, parisc,
68795 + sparc and sparc64, otherwise your system would not even boot.
68796 +
68797 + NOTE: this feature *does* open up a loophole in the protection
68798 + provided by the non-executable pages, therefore the proper
68799 + solution is to modify the toolchain to produce a PLT that does
68800 + not need to be writable.
68801 +
68802 +config PAX_DLRESOLVE
68803 + bool 'Emulate old glibc resolver stub'
68804 + depends on PAX_EMUPLT && SPARC
68805 + default n
68806 + help
68807 + This option is needed if userland has an old glibc (before 2.4)
68808 + that puts a 'save' instruction into the runtime generated resolver
68809 + stub that needs special emulation.
68810 +
68811 +config PAX_KERNEXEC
68812 + bool "Enforce non-executable kernel pages"
68813 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
68814 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
68815 + help
68816 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
68817 + that is, enabling this option will make it harder to inject
68818 + and execute 'foreign' code in kernel memory itself.
68819 +
68820 + Note that on x86_64 kernels there is a known regression when
68821 + this feature and KVM/VMX are both enabled in the host kernel.
68822 +
68823 +config PAX_KERNEXEC_MODULE_TEXT
68824 + int "Minimum amount of memory reserved for module code"
68825 + default "4"
68826 + depends on PAX_KERNEXEC && X86_32 && MODULES
68827 + help
68828 + Due to implementation details the kernel must reserve a fixed
68829 + amount of memory for module code at compile time that cannot be
68830 + changed at runtime. Here you can specify the minimum amount
68831 + in MB that will be reserved. Due to the same implementation
68832 + details this size will always be rounded up to the next 2/4 MB
68833 + boundary (depends on PAE) so the actually available memory for
68834 + module code will usually be more than this minimum.
68835 +
68836 + The default 4 MB should be enough for most users but if you have
68837 + an excessive number of modules (e.g., most distribution configs
68838 + compile many drivers as modules) or use huge modules such as
68839 + nvidia's kernel driver, you will need to adjust this amount.
68840 + A good rule of thumb is to look at your currently loaded kernel
68841 + modules and add up their sizes.
68842 +
68843 +endmenu
68844 +
68845 +menu "Address Space Layout Randomization"
68846 + depends on PAX
68847 +
68848 +config PAX_ASLR
68849 + bool "Address Space Layout Randomization"
68850 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
68851 + help
68852 + Many if not most exploit techniques rely on the knowledge of
68853 + certain addresses in the attacked program. The following options
68854 + will allow the kernel to apply a certain amount of randomization
68855 + to specific parts of the program thereby forcing an attacker to
68856 + guess them in most cases. Any failed guess will most likely crash
68857 + the attacked program which allows the kernel to detect such attempts
68858 + and react on them. PaX itself provides no reaction mechanisms,
68859 + instead it is strongly encouraged that you make use of Nergal's
68860 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
68861 + (http://www.grsecurity.net/) built-in crash detection features or
68862 + develop one yourself.
68863 +
68864 + By saying Y here you can choose to randomize the following areas:
68865 + - top of the task's kernel stack
68866 + - top of the task's userland stack
68867 + - base address for mmap() requests that do not specify one
68868 + (this includes all libraries)
68869 + - base address of the main executable
68870 +
68871 + It is strongly recommended to say Y here as address space layout
68872 + randomization has negligible impact on performance yet it provides
68873 + a very effective protection.
68874 +
68875 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
68876 + this feature on a per file basis.
68877 +
68878 +config PAX_RANDKSTACK
68879 + bool "Randomize kernel stack base"
68880 + depends on PAX_ASLR && X86_TSC && X86
68881 + help
68882 + By saying Y here the kernel will randomize every task's kernel
68883 + stack on every system call. This will not only force an attacker
68884 + to guess it but also prevent him from making use of possible
68885 + leaked information about it.
68886 +
68887 + Since the kernel stack is a rather scarce resource, randomization
68888 + may cause unexpected stack overflows, therefore you should very
68889 + carefully test your system. Note that once enabled in the kernel
68890 + configuration, this feature cannot be disabled on a per file basis.
68891 +
68892 +config PAX_RANDUSTACK
68893 + bool "Randomize user stack base"
68894 + depends on PAX_ASLR
68895 + help
68896 + By saying Y here the kernel will randomize every task's userland
68897 + stack. The randomization is done in two steps where the second
68898 + one may apply a big amount of shift to the top of the stack and
68899 + cause problems for programs that want to use lots of memory (more
68900 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
68901 + For this reason the second step can be controlled by 'chpax' or
68902 + 'paxctl' on a per file basis.
68903 +
68904 +config PAX_RANDMMAP
68905 + bool "Randomize mmap() base"
68906 + depends on PAX_ASLR
68907 + help
68908 + By saying Y here the kernel will use a randomized base address for
68909 + mmap() requests that do not specify one themselves. As a result
68910 + all dynamically loaded libraries will appear at random addresses
68911 + and therefore be harder to exploit by a technique where an attacker
68912 + attempts to execute library code for his purposes (e.g. spawn a
68913 + shell from an exploited program that is running at an elevated
68914 + privilege level).
68915 +
68916 + Furthermore, if a program is relinked as a dynamic ELF file, its
68917 + base address will be randomized as well, completing the full
68918 + randomization of the address space layout. Attacking such programs
68919 + becomes a guess game. You can find an example of doing this at
68920 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
68921 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
68922 +
68923 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
68924 + feature on a per file basis.
68925 +
68926 +endmenu
68927 +
68928 +menu "Miscellaneous hardening features"
68929 +
68930 +config PAX_MEMORY_SANITIZE
68931 + bool "Sanitize all freed memory"
68932 + help
68933 + By saying Y here the kernel will erase memory pages as soon as they
68934 + are freed. This in turn reduces the lifetime of data stored in the
68935 + pages, making it less likely that sensitive information such as
68936 + passwords, cryptographic secrets, etc stay in memory for too long.
68937 +
68938 + This is especially useful for programs whose runtime is short, long
68939 + lived processes and the kernel itself benefit from this as long as
68940 + they operate on whole memory pages and ensure timely freeing of pages
68941 + that may hold sensitive information.
68942 +
68943 + The tradeoff is performance impact, on a single CPU system kernel
68944 + compilation sees a 3% slowdown, other systems and workloads may vary
68945 + and you are advised to test this feature on your expected workload
68946 + before deploying it.
68947 +
68948 + Note that this feature does not protect data stored in live pages,
68949 + e.g., process memory swapped to disk may stay there for a long time.
68950 +
68951 +config PAX_MEMORY_STACKLEAK
68952 + bool "Sanitize kernel stack"
68953 + depends on X86
68954 + help
68955 + By saying Y here the kernel will erase the kernel stack before it
68956 + returns from a system call. This in turn reduces the information
68957 + that a kernel stack leak bug can reveal.
68958 +
68959 + Note that such a bug can still leak information that was put on
68960 + the stack by the current system call (the one eventually triggering
68961 + the bug) but traces of earlier system calls on the kernel stack
68962 + cannot leak anymore.
68963 +
68964 + The tradeoff is performance impact: on a single CPU system kernel
68965 + compilation sees a 1% slowdown, other systems and workloads may vary
68966 + and you are advised to test this feature on your expected workload
68967 + before deploying it.
68968 +
68969 + Note: full support for this feature requires gcc with plugin support
68970 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
68971 + is not supported). Using older gcc versions means that functions
68972 + with large enough stack frames may leave uninitialized memory behind
68973 + that may be exposed to a later syscall leaking the stack.
68974 +
68975 +config PAX_MEMORY_UDEREF
68976 + bool "Prevent invalid userland pointer dereference"
68977 + depends on X86 && !UML_X86 && !XEN
68978 + select PAX_PER_CPU_PGD if X86_64
68979 + help
68980 + By saying Y here the kernel will be prevented from dereferencing
68981 + userland pointers in contexts where the kernel expects only kernel
68982 + pointers. This is both a useful runtime debugging feature and a
68983 + security measure that prevents exploiting a class of kernel bugs.
68984 +
68985 + The tradeoff is that some virtualization solutions may experience
68986 + a huge slowdown and therefore you should not enable this feature
68987 + for kernels meant to run in such environments. Whether a given VM
68988 + solution is affected or not is best determined by simply trying it
68989 + out, the performance impact will be obvious right on boot as this
68990 + mechanism engages from very early on. A good rule of thumb is that
68991 + VMs running on CPUs without hardware virtualization support (i.e.,
68992 + the majority of IA-32 CPUs) will likely experience the slowdown.
68993 +
68994 +config PAX_REFCOUNT
68995 + bool "Prevent various kernel object reference counter overflows"
68996 + depends on GRKERNSEC && (X86 || SPARC64)
68997 + help
68998 + By saying Y here the kernel will detect and prevent overflowing
68999 + various (but not all) kinds of object reference counters. Such
69000 + overflows can normally occur due to bugs only and are often, if
69001 + not always, exploitable.
69002 +
69003 + The tradeoff is that data structures protected by an overflowed
69004 + refcount will never be freed and therefore will leak memory. Note
69005 + that this leak also happens even without this protection but in
69006 + that case the overflow can eventually trigger the freeing of the
69007 + data structure while it is still being used elsewhere, resulting
69008 + in the exploitable situation that this feature prevents.
69009 +
69010 + Since this has a negligible performance impact, you should enable
69011 + this feature.
69012 +
69013 +config PAX_USERCOPY
69014 + bool "Harden heap object copies between kernel and userland"
69015 + depends on X86 || PPC || SPARC || ARM
69016 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
69017 + help
69018 + By saying Y here the kernel will enforce the size of heap objects
69019 + when they are copied in either direction between the kernel and
69020 + userland, even if only a part of the heap object is copied.
69021 +
69022 + Specifically, this checking prevents information leaking from the
69023 + kernel heap during kernel to userland copies (if the kernel heap
69024 + object is otherwise fully initialized) and prevents kernel heap
69025 + overflows during userland to kernel copies.
69026 +
69027 + Note that the current implementation provides the strictest bounds
69028 + checks for the SLUB allocator.
69029 +
69030 + Enabling this option also enables per-slab cache protection against
69031 + data in a given cache being copied into/out of via userland
69032 + accessors. Though the whitelist of regions will be reduced over
69033 + time, it notably protects important data structures like task structs.
69034 +
69035 + If frame pointers are enabled on x86, this option will also restrict
69036 + copies into and out of the kernel stack to local variables within a
69037 + single frame.
69038 +
69039 + Since this has a negligible performance impact, you should enable
69040 + this feature.
69041 +
69042 +endmenu
69043 +
69044 +endmenu
69045 +
69046 config KEYS
69047 bool "Enable access key retention support"
69048 help
69049 @@ -167,7 +715,7 @@ config INTEL_TXT
69050 config LSM_MMAP_MIN_ADDR
69051 int "Low address space for LSM to protect from user allocation"
69052 depends on SECURITY && SECURITY_SELINUX
69053 - default 32768 if ARM
69054 + default 32768 if ALPHA || ARM || PARISC || SPARC32
69055 default 65536
69056 help
69057 This is the portion of low virtual memory which should be protected
69058 diff -urNp linux-3.0.3/security/keys/keyring.c linux-3.0.3/security/keys/keyring.c
69059 --- linux-3.0.3/security/keys/keyring.c 2011-07-21 22:17:23.000000000 -0400
69060 +++ linux-3.0.3/security/keys/keyring.c 2011-08-23 21:47:56.000000000 -0400
69061 @@ -215,15 +215,15 @@ static long keyring_read(const struct ke
69062 ret = -EFAULT;
69063
69064 for (loop = 0; loop < klist->nkeys; loop++) {
69065 + key_serial_t serial;
69066 key = klist->keys[loop];
69067 + serial = key->serial;
69068
69069 tmp = sizeof(key_serial_t);
69070 if (tmp > buflen)
69071 tmp = buflen;
69072
69073 - if (copy_to_user(buffer,
69074 - &key->serial,
69075 - tmp) != 0)
69076 + if (copy_to_user(buffer, &serial, tmp))
69077 goto error;
69078
69079 buflen -= tmp;
69080 diff -urNp linux-3.0.3/security/min_addr.c linux-3.0.3/security/min_addr.c
69081 --- linux-3.0.3/security/min_addr.c 2011-07-21 22:17:23.000000000 -0400
69082 +++ linux-3.0.3/security/min_addr.c 2011-08-23 21:48:14.000000000 -0400
69083 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
69084 */
69085 static void update_mmap_min_addr(void)
69086 {
69087 +#ifndef SPARC
69088 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
69089 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
69090 mmap_min_addr = dac_mmap_min_addr;
69091 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
69092 #else
69093 mmap_min_addr = dac_mmap_min_addr;
69094 #endif
69095 +#endif
69096 }
69097
69098 /*
69099 diff -urNp linux-3.0.3/security/security.c linux-3.0.3/security/security.c
69100 --- linux-3.0.3/security/security.c 2011-07-21 22:17:23.000000000 -0400
69101 +++ linux-3.0.3/security/security.c 2011-08-23 21:48:14.000000000 -0400
69102 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
69103 /* things that live in capability.c */
69104 extern void __init security_fixup_ops(struct security_operations *ops);
69105
69106 -static struct security_operations *security_ops;
69107 -static struct security_operations default_security_ops = {
69108 +static struct security_operations *security_ops __read_only;
69109 +static struct security_operations default_security_ops __read_only = {
69110 .name = "default",
69111 };
69112
69113 @@ -67,7 +67,9 @@ int __init security_init(void)
69114
69115 void reset_security_ops(void)
69116 {
69117 + pax_open_kernel();
69118 security_ops = &default_security_ops;
69119 + pax_close_kernel();
69120 }
69121
69122 /* Save user chosen LSM */
69123 diff -urNp linux-3.0.3/security/selinux/hooks.c linux-3.0.3/security/selinux/hooks.c
69124 --- linux-3.0.3/security/selinux/hooks.c 2011-07-21 22:17:23.000000000 -0400
69125 +++ linux-3.0.3/security/selinux/hooks.c 2011-08-23 21:48:14.000000000 -0400
69126 @@ -93,7 +93,6 @@
69127 #define NUM_SEL_MNT_OPTS 5
69128
69129 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
69130 -extern struct security_operations *security_ops;
69131
69132 /* SECMARK reference count */
69133 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
69134 @@ -5454,7 +5453,7 @@ static int selinux_key_getsecurity(struc
69135
69136 #endif
69137
69138 -static struct security_operations selinux_ops = {
69139 +static struct security_operations selinux_ops __read_only = {
69140 .name = "selinux",
69141
69142 .ptrace_access_check = selinux_ptrace_access_check,
69143 diff -urNp linux-3.0.3/security/selinux/include/xfrm.h linux-3.0.3/security/selinux/include/xfrm.h
69144 --- linux-3.0.3/security/selinux/include/xfrm.h 2011-07-21 22:17:23.000000000 -0400
69145 +++ linux-3.0.3/security/selinux/include/xfrm.h 2011-08-23 21:47:56.000000000 -0400
69146 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
69147
69148 static inline void selinux_xfrm_notify_policyload(void)
69149 {
69150 - atomic_inc(&flow_cache_genid);
69151 + atomic_inc_unchecked(&flow_cache_genid);
69152 }
69153 #else
69154 static inline int selinux_xfrm_enabled(void)
69155 diff -urNp linux-3.0.3/security/selinux/ss/services.c linux-3.0.3/security/selinux/ss/services.c
69156 --- linux-3.0.3/security/selinux/ss/services.c 2011-07-21 22:17:23.000000000 -0400
69157 +++ linux-3.0.3/security/selinux/ss/services.c 2011-08-23 21:48:14.000000000 -0400
69158 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, siz
69159 int rc = 0;
69160 struct policy_file file = { data, len }, *fp = &file;
69161
69162 + pax_track_stack();
69163 +
69164 if (!ss_initialized) {
69165 avtab_cache_init();
69166 rc = policydb_read(&policydb, fp);
69167 diff -urNp linux-3.0.3/security/smack/smack_lsm.c linux-3.0.3/security/smack/smack_lsm.c
69168 --- linux-3.0.3/security/smack/smack_lsm.c 2011-07-21 22:17:23.000000000 -0400
69169 +++ linux-3.0.3/security/smack/smack_lsm.c 2011-08-23 21:47:56.000000000 -0400
69170 @@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct
69171 return 0;
69172 }
69173
69174 -struct security_operations smack_ops = {
69175 +struct security_operations smack_ops __read_only = {
69176 .name = "smack",
69177
69178 .ptrace_access_check = smack_ptrace_access_check,
69179 diff -urNp linux-3.0.3/security/tomoyo/tomoyo.c linux-3.0.3/security/tomoyo/tomoyo.c
69180 --- linux-3.0.3/security/tomoyo/tomoyo.c 2011-07-21 22:17:23.000000000 -0400
69181 +++ linux-3.0.3/security/tomoyo/tomoyo.c 2011-08-23 21:47:56.000000000 -0400
69182 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
69183 * tomoyo_security_ops is a "struct security_operations" which is used for
69184 * registering TOMOYO.
69185 */
69186 -static struct security_operations tomoyo_security_ops = {
69187 +static struct security_operations tomoyo_security_ops __read_only = {
69188 .name = "tomoyo",
69189 .cred_alloc_blank = tomoyo_cred_alloc_blank,
69190 .cred_prepare = tomoyo_cred_prepare,
69191 diff -urNp linux-3.0.3/sound/aoa/codecs/onyx.c linux-3.0.3/sound/aoa/codecs/onyx.c
69192 --- linux-3.0.3/sound/aoa/codecs/onyx.c 2011-07-21 22:17:23.000000000 -0400
69193 +++ linux-3.0.3/sound/aoa/codecs/onyx.c 2011-08-23 21:47:56.000000000 -0400
69194 @@ -54,7 +54,7 @@ struct onyx {
69195 spdif_locked:1,
69196 analog_locked:1,
69197 original_mute:2;
69198 - int open_count;
69199 + local_t open_count;
69200 struct codec_info *codec_info;
69201
69202 /* mutex serializes concurrent access to the device
69203 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
69204 struct onyx *onyx = cii->codec_data;
69205
69206 mutex_lock(&onyx->mutex);
69207 - onyx->open_count++;
69208 + local_inc(&onyx->open_count);
69209 mutex_unlock(&onyx->mutex);
69210
69211 return 0;
69212 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
69213 struct onyx *onyx = cii->codec_data;
69214
69215 mutex_lock(&onyx->mutex);
69216 - onyx->open_count--;
69217 - if (!onyx->open_count)
69218 + if (local_dec_and_test(&onyx->open_count))
69219 onyx->spdif_locked = onyx->analog_locked = 0;
69220 mutex_unlock(&onyx->mutex);
69221
69222 diff -urNp linux-3.0.3/sound/aoa/codecs/onyx.h linux-3.0.3/sound/aoa/codecs/onyx.h
69223 --- linux-3.0.3/sound/aoa/codecs/onyx.h 2011-07-21 22:17:23.000000000 -0400
69224 +++ linux-3.0.3/sound/aoa/codecs/onyx.h 2011-08-23 21:47:56.000000000 -0400
69225 @@ -11,6 +11,7 @@
69226 #include <linux/i2c.h>
69227 #include <asm/pmac_low_i2c.h>
69228 #include <asm/prom.h>
69229 +#include <asm/local.h>
69230
69231 /* PCM3052 register definitions */
69232
69233 diff -urNp linux-3.0.3/sound/core/seq/seq_device.c linux-3.0.3/sound/core/seq/seq_device.c
69234 --- linux-3.0.3/sound/core/seq/seq_device.c 2011-07-21 22:17:23.000000000 -0400
69235 +++ linux-3.0.3/sound/core/seq/seq_device.c 2011-08-23 21:47:56.000000000 -0400
69236 @@ -63,7 +63,7 @@ struct ops_list {
69237 int argsize; /* argument size */
69238
69239 /* operators */
69240 - struct snd_seq_dev_ops ops;
69241 + struct snd_seq_dev_ops *ops;
69242
69243 /* registred devices */
69244 struct list_head dev_list; /* list of devices */
69245 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
69246
69247 mutex_lock(&ops->reg_mutex);
69248 /* copy driver operators */
69249 - ops->ops = *entry;
69250 + ops->ops = entry;
69251 ops->driver |= DRIVER_LOADED;
69252 ops->argsize = argsize;
69253
69254 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
69255 dev->name, ops->id, ops->argsize, dev->argsize);
69256 return -EINVAL;
69257 }
69258 - if (ops->ops.init_device(dev) >= 0) {
69259 + if (ops->ops->init_device(dev) >= 0) {
69260 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
69261 ops->num_init_devices++;
69262 } else {
69263 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
69264 dev->name, ops->id, ops->argsize, dev->argsize);
69265 return -EINVAL;
69266 }
69267 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
69268 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
69269 dev->status = SNDRV_SEQ_DEVICE_FREE;
69270 dev->driver_data = NULL;
69271 ops->num_init_devices--;
69272 diff -urNp linux-3.0.3/sound/drivers/mts64.c linux-3.0.3/sound/drivers/mts64.c
69273 --- linux-3.0.3/sound/drivers/mts64.c 2011-07-21 22:17:23.000000000 -0400
69274 +++ linux-3.0.3/sound/drivers/mts64.c 2011-08-23 21:47:56.000000000 -0400
69275 @@ -28,6 +28,7 @@
69276 #include <sound/initval.h>
69277 #include <sound/rawmidi.h>
69278 #include <sound/control.h>
69279 +#include <asm/local.h>
69280
69281 #define CARD_NAME "Miditerminal 4140"
69282 #define DRIVER_NAME "MTS64"
69283 @@ -66,7 +67,7 @@ struct mts64 {
69284 struct pardevice *pardev;
69285 int pardev_claimed;
69286
69287 - int open_count;
69288 + local_t open_count;
69289 int current_midi_output_port;
69290 int current_midi_input_port;
69291 u8 mode[MTS64_NUM_INPUT_PORTS];
69292 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
69293 {
69294 struct mts64 *mts = substream->rmidi->private_data;
69295
69296 - if (mts->open_count == 0) {
69297 + if (local_read(&mts->open_count) == 0) {
69298 /* We don't need a spinlock here, because this is just called
69299 if the device has not been opened before.
69300 So there aren't any IRQs from the device */
69301 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
69302
69303 msleep(50);
69304 }
69305 - ++(mts->open_count);
69306 + local_inc(&mts->open_count);
69307
69308 return 0;
69309 }
69310 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
69311 struct mts64 *mts = substream->rmidi->private_data;
69312 unsigned long flags;
69313
69314 - --(mts->open_count);
69315 - if (mts->open_count == 0) {
69316 + if (local_dec_return(&mts->open_count) == 0) {
69317 /* We need the spinlock_irqsave here because we can still
69318 have IRQs at this point */
69319 spin_lock_irqsave(&mts->lock, flags);
69320 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
69321
69322 msleep(500);
69323
69324 - } else if (mts->open_count < 0)
69325 - mts->open_count = 0;
69326 + } else if (local_read(&mts->open_count) < 0)
69327 + local_set(&mts->open_count, 0);
69328
69329 return 0;
69330 }
69331 diff -urNp linux-3.0.3/sound/drivers/opl4/opl4_lib.c linux-3.0.3/sound/drivers/opl4/opl4_lib.c
69332 --- linux-3.0.3/sound/drivers/opl4/opl4_lib.c 2011-07-21 22:17:23.000000000 -0400
69333 +++ linux-3.0.3/sound/drivers/opl4/opl4_lib.c 2011-08-23 21:47:56.000000000 -0400
69334 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
69335 MODULE_DESCRIPTION("OPL4 driver");
69336 MODULE_LICENSE("GPL");
69337
69338 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
69339 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
69340 {
69341 int timeout = 10;
69342 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
69343 diff -urNp linux-3.0.3/sound/drivers/portman2x4.c linux-3.0.3/sound/drivers/portman2x4.c
69344 --- linux-3.0.3/sound/drivers/portman2x4.c 2011-07-21 22:17:23.000000000 -0400
69345 +++ linux-3.0.3/sound/drivers/portman2x4.c 2011-08-23 21:47:56.000000000 -0400
69346 @@ -47,6 +47,7 @@
69347 #include <sound/initval.h>
69348 #include <sound/rawmidi.h>
69349 #include <sound/control.h>
69350 +#include <asm/local.h>
69351
69352 #define CARD_NAME "Portman 2x4"
69353 #define DRIVER_NAME "portman"
69354 @@ -84,7 +85,7 @@ struct portman {
69355 struct pardevice *pardev;
69356 int pardev_claimed;
69357
69358 - int open_count;
69359 + local_t open_count;
69360 int mode[PORTMAN_NUM_INPUT_PORTS];
69361 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
69362 };
69363 diff -urNp linux-3.0.3/sound/firewire/amdtp.c linux-3.0.3/sound/firewire/amdtp.c
69364 --- linux-3.0.3/sound/firewire/amdtp.c 2011-07-21 22:17:23.000000000 -0400
69365 +++ linux-3.0.3/sound/firewire/amdtp.c 2011-08-23 21:47:56.000000000 -0400
69366 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
69367 ptr = s->pcm_buffer_pointer + data_blocks;
69368 if (ptr >= pcm->runtime->buffer_size)
69369 ptr -= pcm->runtime->buffer_size;
69370 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
69371 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
69372
69373 s->pcm_period_pointer += data_blocks;
69374 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
69375 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
69376 */
69377 void amdtp_out_stream_update(struct amdtp_out_stream *s)
69378 {
69379 - ACCESS_ONCE(s->source_node_id_field) =
69380 + ACCESS_ONCE_RW(s->source_node_id_field) =
69381 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
69382 }
69383 EXPORT_SYMBOL(amdtp_out_stream_update);
69384 diff -urNp linux-3.0.3/sound/firewire/amdtp.h linux-3.0.3/sound/firewire/amdtp.h
69385 --- linux-3.0.3/sound/firewire/amdtp.h 2011-07-21 22:17:23.000000000 -0400
69386 +++ linux-3.0.3/sound/firewire/amdtp.h 2011-08-23 21:47:56.000000000 -0400
69387 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
69388 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
69389 struct snd_pcm_substream *pcm)
69390 {
69391 - ACCESS_ONCE(s->pcm) = pcm;
69392 + ACCESS_ONCE_RW(s->pcm) = pcm;
69393 }
69394
69395 /**
69396 diff -urNp linux-3.0.3/sound/firewire/isight.c linux-3.0.3/sound/firewire/isight.c
69397 --- linux-3.0.3/sound/firewire/isight.c 2011-07-21 22:17:23.000000000 -0400
69398 +++ linux-3.0.3/sound/firewire/isight.c 2011-08-23 21:47:56.000000000 -0400
69399 @@ -97,7 +97,7 @@ static void isight_update_pointers(struc
69400 ptr += count;
69401 if (ptr >= runtime->buffer_size)
69402 ptr -= runtime->buffer_size;
69403 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
69404 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
69405
69406 isight->period_counter += count;
69407 if (isight->period_counter >= runtime->period_size) {
69408 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_p
69409 if (err < 0)
69410 return err;
69411
69412 - ACCESS_ONCE(isight->pcm_active) = true;
69413 + ACCESS_ONCE_RW(isight->pcm_active) = true;
69414
69415 return 0;
69416 }
69417 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm
69418 {
69419 struct isight *isight = substream->private_data;
69420
69421 - ACCESS_ONCE(isight->pcm_active) = false;
69422 + ACCESS_ONCE_RW(isight->pcm_active) = false;
69423
69424 mutex_lock(&isight->mutex);
69425 isight_stop_streaming(isight);
69426 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm
69427
69428 switch (cmd) {
69429 case SNDRV_PCM_TRIGGER_START:
69430 - ACCESS_ONCE(isight->pcm_running) = true;
69431 + ACCESS_ONCE_RW(isight->pcm_running) = true;
69432 break;
69433 case SNDRV_PCM_TRIGGER_STOP:
69434 - ACCESS_ONCE(isight->pcm_running) = false;
69435 + ACCESS_ONCE_RW(isight->pcm_running) = false;
69436 break;
69437 default:
69438 return -EINVAL;
69439 diff -urNp linux-3.0.3/sound/isa/cmi8330.c linux-3.0.3/sound/isa/cmi8330.c
69440 --- linux-3.0.3/sound/isa/cmi8330.c 2011-07-21 22:17:23.000000000 -0400
69441 +++ linux-3.0.3/sound/isa/cmi8330.c 2011-08-23 21:47:56.000000000 -0400
69442 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
69443
69444 struct snd_pcm *pcm;
69445 struct snd_cmi8330_stream {
69446 - struct snd_pcm_ops ops;
69447 + snd_pcm_ops_no_const ops;
69448 snd_pcm_open_callback_t open;
69449 void *private_data; /* sb or wss */
69450 } streams[2];
69451 diff -urNp linux-3.0.3/sound/oss/sb_audio.c linux-3.0.3/sound/oss/sb_audio.c
69452 --- linux-3.0.3/sound/oss/sb_audio.c 2011-07-21 22:17:23.000000000 -0400
69453 +++ linux-3.0.3/sound/oss/sb_audio.c 2011-08-23 21:47:56.000000000 -0400
69454 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
69455 buf16 = (signed short *)(localbuf + localoffs);
69456 while (c)
69457 {
69458 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69459 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69460 if (copy_from_user(lbuf8,
69461 userbuf+useroffs + p,
69462 locallen))
69463 diff -urNp linux-3.0.3/sound/oss/swarm_cs4297a.c linux-3.0.3/sound/oss/swarm_cs4297a.c
69464 --- linux-3.0.3/sound/oss/swarm_cs4297a.c 2011-07-21 22:17:23.000000000 -0400
69465 +++ linux-3.0.3/sound/oss/swarm_cs4297a.c 2011-08-23 21:47:56.000000000 -0400
69466 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
69467 {
69468 struct cs4297a_state *s;
69469 u32 pwr, id;
69470 - mm_segment_t fs;
69471 int rval;
69472 #ifndef CONFIG_BCM_CS4297A_CSWARM
69473 u64 cfg;
69474 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
69475 if (!rval) {
69476 char *sb1250_duart_present;
69477
69478 +#if 0
69479 + mm_segment_t fs;
69480 fs = get_fs();
69481 set_fs(KERNEL_DS);
69482 -#if 0
69483 val = SOUND_MASK_LINE;
69484 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
69485 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
69486 val = initvol[i].vol;
69487 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
69488 }
69489 + set_fs(fs);
69490 // cs4297a_write_ac97(s, 0x18, 0x0808);
69491 #else
69492 // cs4297a_write_ac97(s, 0x5e, 0x180);
69493 cs4297a_write_ac97(s, 0x02, 0x0808);
69494 cs4297a_write_ac97(s, 0x18, 0x0808);
69495 #endif
69496 - set_fs(fs);
69497
69498 list_add(&s->list, &cs4297a_devs);
69499
69500 diff -urNp linux-3.0.3/sound/pci/hda/hda_codec.h linux-3.0.3/sound/pci/hda/hda_codec.h
69501 --- linux-3.0.3/sound/pci/hda/hda_codec.h 2011-07-21 22:17:23.000000000 -0400
69502 +++ linux-3.0.3/sound/pci/hda/hda_codec.h 2011-08-23 21:47:56.000000000 -0400
69503 @@ -615,7 +615,7 @@ struct hda_bus_ops {
69504 /* notify power-up/down from codec to controller */
69505 void (*pm_notify)(struct hda_bus *bus);
69506 #endif
69507 -};
69508 +} __no_const;
69509
69510 /* template to pass to the bus constructor */
69511 struct hda_bus_template {
69512 @@ -713,6 +713,7 @@ struct hda_codec_ops {
69513 #endif
69514 void (*reboot_notify)(struct hda_codec *codec);
69515 };
69516 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
69517
69518 /* record for amp information cache */
69519 struct hda_cache_head {
69520 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
69521 struct snd_pcm_substream *substream);
69522 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
69523 struct snd_pcm_substream *substream);
69524 -};
69525 +} __no_const;
69526
69527 /* PCM information for each substream */
69528 struct hda_pcm_stream {
69529 @@ -801,7 +802,7 @@ struct hda_codec {
69530 const char *modelname; /* model name for preset */
69531
69532 /* set by patch */
69533 - struct hda_codec_ops patch_ops;
69534 + hda_codec_ops_no_const patch_ops;
69535
69536 /* PCM to create, set by patch_ops.build_pcms callback */
69537 unsigned int num_pcms;
69538 diff -urNp linux-3.0.3/sound/pci/ice1712/ice1712.h linux-3.0.3/sound/pci/ice1712/ice1712.h
69539 --- linux-3.0.3/sound/pci/ice1712/ice1712.h 2011-07-21 22:17:23.000000000 -0400
69540 +++ linux-3.0.3/sound/pci/ice1712/ice1712.h 2011-08-23 21:47:56.000000000 -0400
69541 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
69542 unsigned int mask_flags; /* total mask bits */
69543 struct snd_akm4xxx_ops {
69544 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69545 - } ops;
69546 + } __no_const ops;
69547 };
69548
69549 struct snd_ice1712_spdif {
69550 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
69551 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69552 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69553 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69554 - } ops;
69555 + } __no_const ops;
69556 };
69557
69558
69559 diff -urNp linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c
69560 --- linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c 2011-07-21 22:17:23.000000000 -0400
69561 +++ linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c 2011-08-23 21:47:56.000000000 -0400
69562 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
69563 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
69564 break;
69565 }
69566 - if (atomic_read(&chip->interrupt_sleep_count)) {
69567 - atomic_set(&chip->interrupt_sleep_count, 0);
69568 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69569 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69570 wake_up(&chip->interrupt_sleep);
69571 }
69572 __end:
69573 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
69574 continue;
69575 init_waitqueue_entry(&wait, current);
69576 add_wait_queue(&chip->interrupt_sleep, &wait);
69577 - atomic_inc(&chip->interrupt_sleep_count);
69578 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
69579 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
69580 remove_wait_queue(&chip->interrupt_sleep, &wait);
69581 }
69582 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
69583 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
69584 spin_unlock(&chip->reg_lock);
69585
69586 - if (atomic_read(&chip->interrupt_sleep_count)) {
69587 - atomic_set(&chip->interrupt_sleep_count, 0);
69588 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69589 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69590 wake_up(&chip->interrupt_sleep);
69591 }
69592 }
69593 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
69594 spin_lock_init(&chip->reg_lock);
69595 spin_lock_init(&chip->voice_lock);
69596 init_waitqueue_head(&chip->interrupt_sleep);
69597 - atomic_set(&chip->interrupt_sleep_count, 0);
69598 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69599 chip->card = card;
69600 chip->pci = pci;
69601 chip->irq = -1;
69602 diff -urNp linux-3.0.3/sound/soc/soc-core.c linux-3.0.3/sound/soc/soc-core.c
69603 --- linux-3.0.3/sound/soc/soc-core.c 2011-08-23 21:44:40.000000000 -0400
69604 +++ linux-3.0.3/sound/soc/soc-core.c 2011-08-23 21:47:56.000000000 -0400
69605 @@ -1021,7 +1021,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
69606 }
69607
69608 /* ASoC PCM operations */
69609 -static struct snd_pcm_ops soc_pcm_ops = {
69610 +static snd_pcm_ops_no_const soc_pcm_ops = {
69611 .open = soc_pcm_open,
69612 .close = soc_codec_close,
69613 .hw_params = soc_pcm_hw_params,
69614 @@ -2128,6 +2128,7 @@ static int soc_new_pcm(struct snd_soc_pc
69615 rtd->pcm = pcm;
69616 pcm->private_data = rtd;
69617 if (platform->driver->ops) {
69618 + /* this whole logic is broken... */
69619 soc_pcm_ops.mmap = platform->driver->ops->mmap;
69620 soc_pcm_ops.pointer = platform->driver->ops->pointer;
69621 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
69622 diff -urNp linux-3.0.3/sound/usb/card.h linux-3.0.3/sound/usb/card.h
69623 --- linux-3.0.3/sound/usb/card.h 2011-07-21 22:17:23.000000000 -0400
69624 +++ linux-3.0.3/sound/usb/card.h 2011-08-23 21:47:56.000000000 -0400
69625 @@ -44,6 +44,7 @@ struct snd_urb_ops {
69626 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
69627 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
69628 };
69629 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
69630
69631 struct snd_usb_substream {
69632 struct snd_usb_stream *stream;
69633 @@ -93,7 +94,7 @@ struct snd_usb_substream {
69634 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
69635 spinlock_t lock;
69636
69637 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
69638 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
69639 };
69640
69641 struct snd_usb_stream {
69642 diff -urNp linux-3.0.3/tools/gcc/constify_plugin.c linux-3.0.3/tools/gcc/constify_plugin.c
69643 --- linux-3.0.3/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
69644 +++ linux-3.0.3/tools/gcc/constify_plugin.c 2011-08-29 22:01:36.000000000 -0400
69645 @@ -0,0 +1,289 @@
69646 +/*
69647 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
69648 + * Licensed under the GPL v2, or (at your option) v3
69649 + *
69650 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
69651 + *
69652 + * Usage:
69653 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
69654 + * $ gcc -fplugin=constify_plugin.so test.c -O2
69655 + */
69656 +
69657 +#include "gcc-plugin.h"
69658 +#include "config.h"
69659 +#include "system.h"
69660 +#include "coretypes.h"
69661 +#include "tree.h"
69662 +#include "tree-pass.h"
69663 +#include "intl.h"
69664 +#include "plugin-version.h"
69665 +#include "tm.h"
69666 +#include "toplev.h"
69667 +#include "function.h"
69668 +#include "tree-flow.h"
69669 +#include "plugin.h"
69670 +#include "diagnostic.h"
69671 +//#include "c-tree.h"
69672 +
69673 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
69674 +
69675 +int plugin_is_GPL_compatible;
69676 +
69677 +static struct plugin_info const_plugin_info = {
69678 + .version = "20110826",
69679 + .help = "no-constify\tturn off constification\n",
69680 +};
69681 +
69682 +static void constify_type(tree type);
69683 +static bool walk_struct(tree node);
69684 +
69685 +static tree deconstify_type(tree old_type)
69686 +{
69687 + tree new_type, field;
69688 +
69689 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
69690 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
69691 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
69692 + DECL_FIELD_CONTEXT(field) = new_type;
69693 + TYPE_READONLY(new_type) = 0;
69694 + C_TYPE_FIELDS_READONLY(new_type) = 0;
69695 + return new_type;
69696 +}
69697 +
69698 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
69699 +{
69700 + tree type;
69701 +
69702 + *no_add_attrs = true;
69703 + if (TREE_CODE(*node) == FUNCTION_DECL) {
69704 + error("%qE attribute does not apply to functions", name);
69705 + return NULL_TREE;
69706 + }
69707 +
69708 + if (TREE_CODE(*node) == VAR_DECL) {
69709 + error("%qE attribute does not apply to variables", name);
69710 + return NULL_TREE;
69711 + }
69712 +
69713 + if (TYPE_P(*node)) {
69714 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
69715 + *no_add_attrs = false;
69716 + else
69717 + error("%qE attribute applies to struct and union types only", name);
69718 + return NULL_TREE;
69719 + }
69720 +
69721 + type = TREE_TYPE(*node);
69722 +
69723 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
69724 + error("%qE attribute applies to struct and union types only", name);
69725 + return NULL_TREE;
69726 + }
69727 +
69728 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
69729 + error("%qE attribute is already applied to the type", name);
69730 + return NULL_TREE;
69731 + }
69732 +
69733 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
69734 + error("%qE attribute used on type that is not constified", name);
69735 + return NULL_TREE;
69736 + }
69737 +
69738 + if (TREE_CODE(*node) == TYPE_DECL) {
69739 + TREE_TYPE(*node) = deconstify_type(type);
69740 + TREE_READONLY(*node) = 0;
69741 + return NULL_TREE;
69742 + }
69743 +
69744 + return NULL_TREE;
69745 +}
69746 +
69747 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
69748 +{
69749 + *no_add_attrs = true;
69750 + if (!TYPE_P(*node)) {
69751 + error("%qE attribute applies to types only", name);
69752 + return NULL_TREE;
69753 + }
69754 +
69755 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
69756 + error("%qE attribute applies to struct and union types only", name);
69757 + return NULL_TREE;
69758 + }
69759 +
69760 + *no_add_attrs = false;
69761 + constify_type(*node);
69762 + return NULL_TREE;
69763 +}
69764 +
69765 +static struct attribute_spec no_const_attr = {
69766 + .name = "no_const",
69767 + .min_length = 0,
69768 + .max_length = 0,
69769 + .decl_required = false,
69770 + .type_required = false,
69771 + .function_type_required = false,
69772 + .handler = handle_no_const_attribute
69773 +};
69774 +
69775 +static struct attribute_spec do_const_attr = {
69776 + .name = "do_const",
69777 + .min_length = 0,
69778 + .max_length = 0,
69779 + .decl_required = false,
69780 + .type_required = false,
69781 + .function_type_required = false,
69782 + .handler = handle_do_const_attribute
69783 +};
69784 +
69785 +static void register_attributes(void *event_data, void *data)
69786 +{
69787 + register_attribute(&no_const_attr);
69788 + register_attribute(&do_const_attr);
69789 +}
69790 +
69791 +static void constify_type(tree type)
69792 +{
69793 + TYPE_READONLY(type) = 1;
69794 + C_TYPE_FIELDS_READONLY(type) = 1;
69795 +}
69796 +
69797 +static bool is_fptr(tree field)
69798 +{
69799 + tree ptr = TREE_TYPE(field);
69800 +
69801 + if (TREE_CODE(ptr) != POINTER_TYPE)
69802 + return false;
69803 +
69804 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
69805 +}
69806 +
69807 +static bool walk_struct(tree node)
69808 +{
69809 + tree field;
69810 +
69811 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
69812 + return false;
69813 +
69814 + if (TYPE_FIELDS(node) == NULL_TREE)
69815 + return false;
69816 +
69817 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
69818 + tree type = TREE_TYPE(field);
69819 + enum tree_code code = TREE_CODE(type);
69820 + if (code == RECORD_TYPE || code == UNION_TYPE) {
69821 + if (!(walk_struct(type)))
69822 + return false;
69823 + } else if (!is_fptr(field) && !TREE_READONLY(field))
69824 + return false;
69825 + }
69826 + return true;
69827 +}
69828 +
69829 +static void finish_type(void *event_data, void *data)
69830 +{
69831 + tree type = (tree)event_data;
69832 +
69833 + if (type == NULL_TREE)
69834 + return;
69835 +
69836 + if (TYPE_READONLY(type))
69837 + return;
69838 +
69839 + if (walk_struct(type))
69840 + constify_type(type);
69841 +}
69842 +
69843 +static unsigned int check_local_variables(void);
69844 +
69845 +struct gimple_opt_pass pass_local_variable = {
69846 + {
69847 + .type = GIMPLE_PASS,
69848 + .name = "check_local_variables",
69849 + .gate = NULL,
69850 + .execute = check_local_variables,
69851 + .sub = NULL,
69852 + .next = NULL,
69853 + .static_pass_number = 0,
69854 + .tv_id = TV_NONE,
69855 + .properties_required = 0,
69856 + .properties_provided = 0,
69857 + .properties_destroyed = 0,
69858 + .todo_flags_start = 0,
69859 + .todo_flags_finish = 0
69860 + }
69861 +};
69862 +
69863 +static unsigned int check_local_variables(void)
69864 +{
69865 + tree var;
69866 + referenced_var_iterator rvi;
69867 +
69868 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
69869 + FOR_EACH_REFERENCED_VAR(var, rvi) {
69870 +#else
69871 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
69872 +#endif
69873 + tree type = TREE_TYPE(var);
69874 +
69875 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
69876 + continue;
69877 +
69878 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
69879 + continue;
69880 +
69881 + if (!TYPE_READONLY(type))
69882 + continue;
69883 +
69884 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
69885 +// continue;
69886 +
69887 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
69888 +// continue;
69889 +
69890 + if (walk_struct(type)) {
69891 + error("constified variable %qE cannot be local", var);
69892 + return 1;
69893 + }
69894 + }
69895 + return 0;
69896 +}
69897 +
69898 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
69899 +{
69900 + const char * const plugin_name = plugin_info->base_name;
69901 + const int argc = plugin_info->argc;
69902 + const struct plugin_argument * const argv = plugin_info->argv;
69903 + int i;
69904 + bool constify = true;
69905 +
69906 + struct register_pass_info local_variable_pass_info = {
69907 + .pass = &pass_local_variable.pass,
69908 + .reference_pass_name = "*referenced_vars",
69909 + .ref_pass_instance_number = 0,
69910 + .pos_op = PASS_POS_INSERT_AFTER
69911 + };
69912 +
69913 + if (!plugin_default_version_check(version, &gcc_version)) {
69914 + error(G_("incompatible gcc/plugin versions"));
69915 + return 1;
69916 + }
69917 +
69918 + for (i = 0; i < argc; ++i) {
69919 + if (!(strcmp(argv[i].key, "no-constify"))) {
69920 + constify = false;
69921 + continue;
69922 + }
69923 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
69924 + }
69925 +
69926 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
69927 + if (constify) {
69928 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
69929 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
69930 + }
69931 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
69932 +
69933 + return 0;
69934 +}
69935 diff -urNp linux-3.0.3/tools/gcc/Makefile linux-3.0.3/tools/gcc/Makefile
69936 --- linux-3.0.3/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
69937 +++ linux-3.0.3/tools/gcc/Makefile 2011-08-23 21:47:56.000000000 -0400
69938 @@ -0,0 +1,12 @@
69939 +#CC := gcc
69940 +#PLUGIN_SOURCE_FILES := pax_plugin.c
69941 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
69942 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
69943 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
69944 +
69945 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
69946 +
69947 +hostlibs-y := stackleak_plugin.so constify_plugin.so
69948 +always := $(hostlibs-y)
69949 +stackleak_plugin-objs := stackleak_plugin.o
69950 +constify_plugin-objs := constify_plugin.o
69951 diff -urNp linux-3.0.3/tools/gcc/stackleak_plugin.c linux-3.0.3/tools/gcc/stackleak_plugin.c
69952 --- linux-3.0.3/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
69953 +++ linux-3.0.3/tools/gcc/stackleak_plugin.c 2011-08-23 21:47:56.000000000 -0400
69954 @@ -0,0 +1,243 @@
69955 +/*
69956 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
69957 + * Licensed under the GPL v2
69958 + *
69959 + * Note: the choice of the license means that the compilation process is
69960 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
69961 + * but for the kernel it doesn't matter since it doesn't link against
69962 + * any of the gcc libraries
69963 + *
69964 + * gcc plugin to help implement various PaX features
69965 + *
69966 + * - track lowest stack pointer
69967 + *
69968 + * TODO:
69969 + * - initialize all local variables
69970 + *
69971 + * BUGS:
69972 + * - cloned functions are instrumented twice
69973 + */
69974 +#include "gcc-plugin.h"
69975 +#include "config.h"
69976 +#include "system.h"
69977 +#include "coretypes.h"
69978 +#include "tree.h"
69979 +#include "tree-pass.h"
69980 +#include "intl.h"
69981 +#include "plugin-version.h"
69982 +#include "tm.h"
69983 +#include "toplev.h"
69984 +#include "basic-block.h"
69985 +#include "gimple.h"
69986 +//#include "expr.h" where are you...
69987 +#include "diagnostic.h"
69988 +#include "rtl.h"
69989 +#include "emit-rtl.h"
69990 +#include "function.h"
69991 +
69992 +int plugin_is_GPL_compatible;
69993 +
69994 +static int track_frame_size = -1;
69995 +static const char track_function[] = "pax_track_stack";
69996 +static bool init_locals;
69997 +
69998 +static struct plugin_info stackleak_plugin_info = {
69999 + .version = "201106030000",
70000 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
70001 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
70002 +};
70003 +
70004 +static bool gate_stackleak_track_stack(void);
70005 +static unsigned int execute_stackleak_tree_instrument(void);
70006 +static unsigned int execute_stackleak_final(void);
70007 +
70008 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
70009 + .pass = {
70010 + .type = GIMPLE_PASS,
70011 + .name = "stackleak_tree_instrument",
70012 + .gate = gate_stackleak_track_stack,
70013 + .execute = execute_stackleak_tree_instrument,
70014 + .sub = NULL,
70015 + .next = NULL,
70016 + .static_pass_number = 0,
70017 + .tv_id = TV_NONE,
70018 + .properties_required = PROP_gimple_leh | PROP_cfg,
70019 + .properties_provided = 0,
70020 + .properties_destroyed = 0,
70021 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
70022 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
70023 + }
70024 +};
70025 +
70026 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
70027 + .pass = {
70028 + .type = RTL_PASS,
70029 + .name = "stackleak_final",
70030 + .gate = gate_stackleak_track_stack,
70031 + .execute = execute_stackleak_final,
70032 + .sub = NULL,
70033 + .next = NULL,
70034 + .static_pass_number = 0,
70035 + .tv_id = TV_NONE,
70036 + .properties_required = 0,
70037 + .properties_provided = 0,
70038 + .properties_destroyed = 0,
70039 + .todo_flags_start = 0,
70040 + .todo_flags_finish = 0
70041 + }
70042 +};
70043 +
70044 +static bool gate_stackleak_track_stack(void)
70045 +{
70046 + return track_frame_size >= 0;
70047 +}
70048 +
70049 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
70050 +{
70051 + gimple call;
70052 + tree decl, type;
70053 +
70054 + // insert call to void pax_track_stack(void)
70055 + type = build_function_type_list(void_type_node, NULL_TREE);
70056 + decl = build_fn_decl(track_function, type);
70057 + DECL_ASSEMBLER_NAME(decl); // for LTO
70058 + call = gimple_build_call(decl, 0);
70059 + if (before)
70060 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
70061 + else
70062 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
70063 +}
70064 +
70065 +static unsigned int execute_stackleak_tree_instrument(void)
70066 +{
70067 + basic_block bb;
70068 + gimple_stmt_iterator gsi;
70069 +
70070 + // 1. loop through BBs and GIMPLE statements
70071 + FOR_EACH_BB(bb) {
70072 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
70073 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
70074 + tree decl;
70075 + gimple stmt = gsi_stmt(gsi);
70076 +
70077 + if (!is_gimple_call(stmt))
70078 + continue;
70079 + decl = gimple_call_fndecl(stmt);
70080 + if (!decl)
70081 + continue;
70082 + if (TREE_CODE(decl) != FUNCTION_DECL)
70083 + continue;
70084 + if (!DECL_BUILT_IN(decl))
70085 + continue;
70086 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
70087 + continue;
70088 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
70089 + continue;
70090 +
70091 + // 2. insert track call after each __builtin_alloca call
70092 + stackleak_add_instrumentation(&gsi, false);
70093 +// print_node(stderr, "pax", decl, 4);
70094 + }
70095 + }
70096 +
70097 + // 3. insert track call at the beginning
70098 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
70099 + gsi = gsi_start_bb(bb);
70100 + stackleak_add_instrumentation(&gsi, true);
70101 +
70102 + return 0;
70103 +}
70104 +
70105 +static unsigned int execute_stackleak_final(void)
70106 +{
70107 + rtx insn;
70108 +
70109 + if (cfun->calls_alloca)
70110 + return 0;
70111 +
70112 + // 1. find pax_track_stack calls
70113 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
70114 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
70115 + rtx body;
70116 +
70117 + if (!CALL_P(insn))
70118 + continue;
70119 + body = PATTERN(insn);
70120 + if (GET_CODE(body) != CALL)
70121 + continue;
70122 + body = XEXP(body, 0);
70123 + if (GET_CODE(body) != MEM)
70124 + continue;
70125 + body = XEXP(body, 0);
70126 + if (GET_CODE(body) != SYMBOL_REF)
70127 + continue;
70128 + if (strcmp(XSTR(body, 0), track_function))
70129 + continue;
70130 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70131 + // 2. delete call if function frame is not big enough
70132 + if (get_frame_size() >= track_frame_size)
70133 + continue;
70134 + delete_insn_and_edges(insn);
70135 + }
70136 +
70137 +// print_simple_rtl(stderr, get_insns());
70138 +// print_rtl(stderr, get_insns());
70139 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70140 +
70141 + return 0;
70142 +}
70143 +
70144 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70145 +{
70146 + const char * const plugin_name = plugin_info->base_name;
70147 + const int argc = plugin_info->argc;
70148 + const struct plugin_argument * const argv = plugin_info->argv;
70149 + int i;
70150 + struct register_pass_info stackleak_tree_instrument_pass_info = {
70151 + .pass = &stackleak_tree_instrument_pass.pass,
70152 +// .reference_pass_name = "tree_profile",
70153 + .reference_pass_name = "optimized",
70154 + .ref_pass_instance_number = 0,
70155 + .pos_op = PASS_POS_INSERT_AFTER
70156 + };
70157 + struct register_pass_info stackleak_final_pass_info = {
70158 + .pass = &stackleak_final_rtl_opt_pass.pass,
70159 + .reference_pass_name = "final",
70160 + .ref_pass_instance_number = 0,
70161 + .pos_op = PASS_POS_INSERT_BEFORE
70162 + };
70163 +
70164 + if (!plugin_default_version_check(version, &gcc_version)) {
70165 + error(G_("incompatible gcc/plugin versions"));
70166 + return 1;
70167 + }
70168 +
70169 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
70170 +
70171 + for (i = 0; i < argc; ++i) {
70172 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
70173 + if (!argv[i].value) {
70174 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70175 + continue;
70176 + }
70177 + track_frame_size = atoi(argv[i].value);
70178 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
70179 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70180 + continue;
70181 + }
70182 + if (!strcmp(argv[i].key, "initialize-locals")) {
70183 + if (argv[i].value) {
70184 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70185 + continue;
70186 + }
70187 + init_locals = true;
70188 + continue;
70189 + }
70190 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70191 + }
70192 +
70193 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
70194 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
70195 +
70196 + return 0;
70197 +}
70198 diff -urNp linux-3.0.3/usr/gen_init_cpio.c linux-3.0.3/usr/gen_init_cpio.c
70199 --- linux-3.0.3/usr/gen_init_cpio.c 2011-07-21 22:17:23.000000000 -0400
70200 +++ linux-3.0.3/usr/gen_init_cpio.c 2011-08-23 21:47:56.000000000 -0400
70201 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
70202 int retval;
70203 int rc = -1;
70204 int namesize;
70205 - int i;
70206 + unsigned int i;
70207
70208 mode |= S_IFREG;
70209
70210 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
70211 *env_var = *expanded = '\0';
70212 strncat(env_var, start + 2, end - start - 2);
70213 strncat(expanded, new_location, start - new_location);
70214 - strncat(expanded, getenv(env_var), PATH_MAX);
70215 - strncat(expanded, end + 1, PATH_MAX);
70216 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
70217 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
70218 strncpy(new_location, expanded, PATH_MAX);
70219 + new_location[PATH_MAX] = 0;
70220 } else
70221 break;
70222 }
70223 diff -urNp linux-3.0.3/virt/kvm/kvm_main.c linux-3.0.3/virt/kvm/kvm_main.c
70224 --- linux-3.0.3/virt/kvm/kvm_main.c 2011-07-21 22:17:23.000000000 -0400
70225 +++ linux-3.0.3/virt/kvm/kvm_main.c 2011-08-23 21:47:56.000000000 -0400
70226 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
70227
70228 static cpumask_var_t cpus_hardware_enabled;
70229 static int kvm_usage_count = 0;
70230 -static atomic_t hardware_enable_failed;
70231 +static atomic_unchecked_t hardware_enable_failed;
70232
70233 struct kmem_cache *kvm_vcpu_cache;
70234 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
70235 @@ -2176,7 +2176,7 @@ static void hardware_enable_nolock(void
70236
70237 if (r) {
70238 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
70239 - atomic_inc(&hardware_enable_failed);
70240 + atomic_inc_unchecked(&hardware_enable_failed);
70241 printk(KERN_INFO "kvm: enabling virtualization on "
70242 "CPU%d failed\n", cpu);
70243 }
70244 @@ -2230,10 +2230,10 @@ static int hardware_enable_all(void)
70245
70246 kvm_usage_count++;
70247 if (kvm_usage_count == 1) {
70248 - atomic_set(&hardware_enable_failed, 0);
70249 + atomic_set_unchecked(&hardware_enable_failed, 0);
70250 on_each_cpu(hardware_enable_nolock, NULL, 1);
70251
70252 - if (atomic_read(&hardware_enable_failed)) {
70253 + if (atomic_read_unchecked(&hardware_enable_failed)) {
70254 hardware_disable_all_nolock();
70255 r = -EBUSY;
70256 }
70257 @@ -2498,7 +2498,7 @@ static void kvm_sched_out(struct preempt
70258 kvm_arch_vcpu_put(vcpu);
70259 }
70260
70261 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70262 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70263 struct module *module)
70264 {
70265 int r;
70266 @@ -2561,7 +2561,7 @@ int kvm_init(void *opaque, unsigned vcpu
70267 if (!vcpu_align)
70268 vcpu_align = __alignof__(struct kvm_vcpu);
70269 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
70270 - 0, NULL);
70271 + SLAB_USERCOPY, NULL);
70272 if (!kvm_vcpu_cache) {
70273 r = -ENOMEM;
70274 goto out_free_3;
70275 @@ -2571,9 +2571,11 @@ int kvm_init(void *opaque, unsigned vcpu
70276 if (r)
70277 goto out_free;
70278
70279 - kvm_chardev_ops.owner = module;
70280 - kvm_vm_fops.owner = module;
70281 - kvm_vcpu_fops.owner = module;
70282 + pax_open_kernel();
70283 + *(void **)&kvm_chardev_ops.owner = module;
70284 + *(void **)&kvm_vm_fops.owner = module;
70285 + *(void **)&kvm_vcpu_fops.owner = module;
70286 + pax_close_kernel();
70287
70288 r = misc_register(&kvm_dev);
70289 if (r) {