]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.0.3-201108241901.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.3-201108241901.patch
1 diff -urNp linux-3.0.3/arch/alpha/include/asm/elf.h linux-3.0.3/arch/alpha/include/asm/elf.h
2 --- linux-3.0.3/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3 +++ linux-3.0.3/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-3.0.3/arch/alpha/include/asm/pgtable.h linux-3.0.3/arch/alpha/include/asm/pgtable.h
19 --- linux-3.0.3/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20 +++ linux-3.0.3/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-3.0.3/arch/alpha/kernel/module.c linux-3.0.3/arch/alpha/kernel/module.c
40 --- linux-3.0.3/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41 +++ linux-3.0.3/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-3.0.3/arch/alpha/kernel/osf_sys.c linux-3.0.3/arch/alpha/kernel/osf_sys.c
52 --- linux-3.0.3/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53 +++ linux-3.0.3/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-3.0.3/arch/alpha/mm/fault.c linux-3.0.3/arch/alpha/mm/fault.c
86 --- linux-3.0.3/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87 +++ linux-3.0.3/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-3.0.3/arch/arm/include/asm/elf.h linux-3.0.3/arch/arm/include/asm/elf.h
245 --- linux-3.0.3/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246 +++ linux-3.0.3/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-3.0.3/arch/arm/include/asm/kmap_types.h linux-3.0.3/arch/arm/include/asm/kmap_types.h
275 --- linux-3.0.3/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276 +++ linux-3.0.3/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-3.0.3/arch/arm/include/asm/uaccess.h linux-3.0.3/arch/arm/include/asm/uaccess.h
286 --- linux-3.0.3/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287 +++ linux-3.0.3/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-3.0.3/arch/arm/kernel/armksyms.c linux-3.0.3/arch/arm/kernel/armksyms.c
344 --- linux-3.0.3/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345 +++ linux-3.0.3/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-3.0.3/arch/arm/kernel/process.c linux-3.0.3/arch/arm/kernel/process.c
358 --- linux-3.0.3/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359 +++ linux-3.0.3/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-3.0.3/arch/arm/kernel/traps.c linux-3.0.3/arch/arm/kernel/traps.c
382 --- linux-3.0.3/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383 +++ linux-3.0.3/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384 @@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-3.0.3/arch/arm/lib/copy_from_user.S linux-3.0.3/arch/arm/lib/copy_from_user.S
404 --- linux-3.0.3/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405 +++ linux-3.0.3/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-3.0.3/arch/arm/lib/copy_to_user.S linux-3.0.3/arch/arm/lib/copy_to_user.S
430 --- linux-3.0.3/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431 +++ linux-3.0.3/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-3.0.3/arch/arm/lib/uaccess.S linux-3.0.3/arch/arm/lib/uaccess.S
456 --- linux-3.0.3/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457 +++ linux-3.0.3/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513 +++ linux-3.0.3/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525 +++ linux-3.0.3/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-3.0.3/arch/arm/mm/fault.c linux-3.0.3/arch/arm/mm/fault.c
536 --- linux-3.0.3/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537 +++ linux-3.0.3/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-3.0.3/arch/arm/mm/mmap.c linux-3.0.3/arch/arm/mm/mmap.c
587 --- linux-3.0.3/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588 +++ linux-3.0.3/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-3.0.3/arch/avr32/include/asm/elf.h linux-3.0.3/arch/avr32/include/asm/elf.h
639 --- linux-3.0.3/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640 +++ linux-3.0.3/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-3.0.3/arch/avr32/include/asm/kmap_types.h linux-3.0.3/arch/avr32/include/asm/kmap_types.h
658 --- linux-3.0.3/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659 +++ linux-3.0.3/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-3.0.3/arch/avr32/mm/fault.c linux-3.0.3/arch/avr32/mm/fault.c
671 --- linux-3.0.3/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672 +++ linux-3.0.3/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-3.0.3/arch/frv/include/asm/kmap_types.h linux-3.0.3/arch/frv/include/asm/kmap_types.h
715 --- linux-3.0.3/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716 +++ linux-3.0.3/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-3.0.3/arch/frv/mm/elf-fdpic.c linux-3.0.3/arch/frv/mm/elf-fdpic.c
726 --- linux-3.0.3/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727 +++ linux-3.0.3/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-3.0.3/arch/ia64/include/asm/elf.h linux-3.0.3/arch/ia64/include/asm/elf.h
757 --- linux-3.0.3/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758 +++ linux-3.0.3/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-3.0.3/arch/ia64/include/asm/pgtable.h linux-3.0.3/arch/ia64/include/asm/pgtable.h
774 --- linux-3.0.3/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775 +++ linux-3.0.3/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-3.0.3/arch/ia64/include/asm/spinlock.h linux-3.0.3/arch/ia64/include/asm/spinlock.h
804 --- linux-3.0.3/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805 +++ linux-3.0.3/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-3.0.3/arch/ia64/include/asm/uaccess.h linux-3.0.3/arch/ia64/include/asm/uaccess.h
816 --- linux-3.0.3/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817 +++ linux-3.0.3/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-3.0.3/arch/ia64/kernel/module.c linux-3.0.3/arch/ia64/kernel/module.c
837 --- linux-3.0.3/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838 +++ linux-3.0.3/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-3.0.3/arch/ia64/kernel/sys_ia64.c linux-3.0.3/arch/ia64/kernel/sys_ia64.c
928 --- linux-3.0.3/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929 +++ linux-3.0.3/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964 +++ linux-3.0.3/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-3.0.3/arch/ia64/mm/fault.c linux-3.0.3/arch/ia64/mm/fault.c
975 --- linux-3.0.3/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976 +++ linux-3.0.3/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-3.0.3/arch/ia64/mm/hugetlbpage.c linux-3.0.3/arch/ia64/mm/hugetlbpage.c
1027 --- linux-3.0.3/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028 +++ linux-3.0.3/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-3.0.3/arch/ia64/mm/init.c linux-3.0.3/arch/ia64/mm/init.c
1039 --- linux-3.0.3/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040 +++ linux-3.0.3/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-3.0.3/arch/m32r/lib/usercopy.c linux-3.0.3/arch/m32r/lib/usercopy.c
1062 --- linux-3.0.3/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063 +++ linux-3.0.3/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-3.0.3/arch/mips/include/asm/elf.h linux-3.0.3/arch/mips/include/asm/elf.h
1085 --- linux-3.0.3/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086 +++ linux-3.0.3/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-3.0.3/arch/mips/include/asm/page.h linux-3.0.3/arch/mips/include/asm/page.h
1109 --- linux-3.0.3/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110 +++ linux-3.0.3/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-3.0.3/arch/mips/include/asm/system.h linux-3.0.3/arch/mips/include/asm/system.h
1121 --- linux-3.0.3/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122 +++ linux-3.0.3/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133 +++ linux-3.0.3/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150 +++ linux-3.0.3/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-3.0.3/arch/mips/kernel/process.c linux-3.0.3/arch/mips/kernel/process.c
1166 --- linux-3.0.3/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167 +++ linux-3.0.3/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-3.0.3/arch/mips/mm/fault.c linux-3.0.3/arch/mips/mm/fault.c
1185 --- linux-3.0.3/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186 +++ linux-3.0.3/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187 @@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191 +#ifdef CONFIG_PAX_PAGEEXEC
1192 +void pax_report_insns(void *pc, void *sp)
1193 +{
1194 + unsigned long i;
1195 +
1196 + printk(KERN_ERR "PAX: bytes at PC: ");
1197 + for (i = 0; i < 5; i++) {
1198 + unsigned int c;
1199 + if (get_user(c, (unsigned int *)pc+i))
1200 + printk(KERN_CONT "???????? ");
1201 + else
1202 + printk(KERN_CONT "%08x ", c);
1203 + }
1204 + printk("\n");
1205 +}
1206 +#endif
1207 +
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211 diff -urNp linux-3.0.3/arch/mips/mm/mmap.c linux-3.0.3/arch/mips/mm/mmap.c
1212 --- linux-3.0.3/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213 +++ linux-3.0.3/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214 @@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218 +
1219 +#ifdef CONFIG_PAX_RANDMMAP
1220 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221 +#endif
1222 +
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229 - if (TASK_SIZE - len >= addr &&
1230 - (!vmm || addr + len <= vmm->vm_start))
1231 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235 @@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239 - if (!vmm || addr + len <= vmm->vm_start)
1240 + if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244 @@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248 -
1249 -static inline unsigned long brk_rnd(void)
1250 -{
1251 - unsigned long rnd = get_random_int();
1252 -
1253 - rnd = rnd << PAGE_SHIFT;
1254 - /* 8MB for 32bit, 256MB for 64bit */
1255 - if (TASK_IS_32BIT_ADDR)
1256 - rnd = rnd & 0x7ffffful;
1257 - else
1258 - rnd = rnd & 0xffffffful;
1259 -
1260 - return rnd;
1261 -}
1262 -
1263 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1264 -{
1265 - unsigned long base = mm->brk;
1266 - unsigned long ret;
1267 -
1268 - ret = PAGE_ALIGN(base + brk_rnd());
1269 -
1270 - if (ret < mm->brk)
1271 - return mm->brk;
1272 -
1273 - return ret;
1274 -}
1275 diff -urNp linux-3.0.3/arch/parisc/include/asm/elf.h linux-3.0.3/arch/parisc/include/asm/elf.h
1276 --- linux-3.0.3/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277 +++ linux-3.0.3/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282 +#ifdef CONFIG_PAX_ASLR
1283 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284 +
1285 +#define PAX_DELTA_MMAP_LEN 16
1286 +#define PAX_DELTA_STACK_LEN 16
1287 +#endif
1288 +
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292 diff -urNp linux-3.0.3/arch/parisc/include/asm/pgtable.h linux-3.0.3/arch/parisc/include/asm/pgtable.h
1293 --- linux-3.0.3/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294 +++ linux-3.0.3/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295 @@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299 +
1300 +#ifdef CONFIG_PAX_PAGEEXEC
1301 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304 +#else
1305 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306 +# define PAGE_COPY_NOEXEC PAGE_COPY
1307 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308 +#endif
1309 +
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313 diff -urNp linux-3.0.3/arch/parisc/kernel/module.c linux-3.0.3/arch/parisc/kernel/module.c
1314 --- linux-3.0.3/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315 +++ linux-3.0.3/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316 @@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320 +static inline int in_init_rx(struct module *me, void *loc)
1321 +{
1322 + return (loc >= me->module_init_rx &&
1323 + loc < (me->module_init_rx + me->init_size_rx));
1324 +}
1325 +
1326 +static inline int in_init_rw(struct module *me, void *loc)
1327 +{
1328 + return (loc >= me->module_init_rw &&
1329 + loc < (me->module_init_rw + me->init_size_rw));
1330 +}
1331 +
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334 - return (loc >= me->module_init &&
1335 - loc <= (me->module_init + me->init_size));
1336 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1337 +}
1338 +
1339 +static inline int in_core_rx(struct module *me, void *loc)
1340 +{
1341 + return (loc >= me->module_core_rx &&
1342 + loc < (me->module_core_rx + me->core_size_rx));
1343 +}
1344 +
1345 +static inline int in_core_rw(struct module *me, void *loc)
1346 +{
1347 + return (loc >= me->module_core_rw &&
1348 + loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353 - return (loc >= me->module_core &&
1354 - loc <= (me->module_core + me->core_size));
1355 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363 - me->core_size = ALIGN(me->core_size, 16);
1364 - me->arch.got_offset = me->core_size;
1365 - me->core_size += gots * sizeof(struct got_entry);
1366 -
1367 - me->core_size = ALIGN(me->core_size, 16);
1368 - me->arch.fdesc_offset = me->core_size;
1369 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1370 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371 + me->arch.got_offset = me->core_size_rw;
1372 + me->core_size_rw += gots * sizeof(struct got_entry);
1373 +
1374 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375 + me->arch.fdesc_offset = me->core_size_rw;
1376 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384 - got = me->module_core + me->arch.got_offset;
1385 + got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407 @@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416 diff -urNp linux-3.0.3/arch/parisc/kernel/sys_parisc.c linux-3.0.3/arch/parisc/kernel/sys_parisc.c
1417 --- linux-3.0.3/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418 +++ linux-3.0.3/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423 - if (!vma || addr + len <= vma->vm_start)
1424 + if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432 - if (!vma || addr + len <= vma->vm_start)
1433 + if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441 - addr = TASK_UNMAPPED_BASE;
1442 + addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446 diff -urNp linux-3.0.3/arch/parisc/kernel/traps.c linux-3.0.3/arch/parisc/kernel/traps.c
1447 --- linux-3.0.3/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448 +++ linux-3.0.3/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1454 - && (vma->vm_flags & VM_EXEC)) {
1455 -
1456 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460 diff -urNp linux-3.0.3/arch/parisc/mm/fault.c linux-3.0.3/arch/parisc/mm/fault.c
1461 --- linux-3.0.3/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462 +++ linux-3.0.3/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463 @@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467 +#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475 - if (code == 6 || code == 16)
1476 + if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484 +#ifdef CONFIG_PAX_PAGEEXEC
1485 +/*
1486 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487 + *
1488 + * returns 1 when task should be killed
1489 + * 2 when rt_sigreturn trampoline was detected
1490 + * 3 when unpatched PLT trampoline was detected
1491 + */
1492 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1493 +{
1494 +
1495 +#ifdef CONFIG_PAX_EMUPLT
1496 + int err;
1497 +
1498 + do { /* PaX: unpatched PLT emulation */
1499 + unsigned int bl, depwi;
1500 +
1501 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503 +
1504 + if (err)
1505 + break;
1506 +
1507 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509 +
1510 + err = get_user(ldw, (unsigned int *)addr);
1511 + err |= get_user(bv, (unsigned int *)(addr+4));
1512 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1513 +
1514 + if (err)
1515 + break;
1516 +
1517 + if (ldw == 0x0E801096U &&
1518 + bv == 0xEAC0C000U &&
1519 + ldw2 == 0x0E881095U)
1520 + {
1521 + unsigned int resolver, map;
1522 +
1523 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525 + if (err)
1526 + break;
1527 +
1528 + regs->gr[20] = instruction_pointer(regs)+8;
1529 + regs->gr[21] = map;
1530 + regs->gr[22] = resolver;
1531 + regs->iaoq[0] = resolver | 3UL;
1532 + regs->iaoq[1] = regs->iaoq[0] + 4;
1533 + return 3;
1534 + }
1535 + }
1536 + } while (0);
1537 +#endif
1538 +
1539 +#ifdef CONFIG_PAX_EMUTRAMP
1540 +
1541 +#ifndef CONFIG_PAX_EMUSIGRT
1542 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543 + return 1;
1544 +#endif
1545 +
1546 + do { /* PaX: rt_sigreturn emulation */
1547 + unsigned int ldi1, ldi2, bel, nop;
1548 +
1549 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553 +
1554 + if (err)
1555 + break;
1556 +
1557 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558 + ldi2 == 0x3414015AU &&
1559 + bel == 0xE4008200U &&
1560 + nop == 0x08000240U)
1561 + {
1562 + regs->gr[25] = (ldi1 & 2) >> 1;
1563 + regs->gr[20] = __NR_rt_sigreturn;
1564 + regs->gr[31] = regs->iaoq[1] + 16;
1565 + regs->sr[0] = regs->iasq[1];
1566 + regs->iaoq[0] = 0x100UL;
1567 + regs->iaoq[1] = regs->iaoq[0] + 4;
1568 + regs->iasq[0] = regs->sr[2];
1569 + regs->iasq[1] = regs->sr[2];
1570 + return 2;
1571 + }
1572 + } while (0);
1573 +#endif
1574 +
1575 + return 1;
1576 +}
1577 +
1578 +void pax_report_insns(void *pc, void *sp)
1579 +{
1580 + unsigned long i;
1581 +
1582 + printk(KERN_ERR "PAX: bytes at PC: ");
1583 + for (i = 0; i < 5; i++) {
1584 + unsigned int c;
1585 + if (get_user(c, (unsigned int *)pc+i))
1586 + printk(KERN_CONT "???????? ");
1587 + else
1588 + printk(KERN_CONT "%08x ", c);
1589 + }
1590 + printk("\n");
1591 +}
1592 +#endif
1593 +
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597 @@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601 - if ((vma->vm_flags & acc_type) != acc_type)
1602 + if ((vma->vm_flags & acc_type) != acc_type) {
1603 +
1604 +#ifdef CONFIG_PAX_PAGEEXEC
1605 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606 + (address & ~3UL) == instruction_pointer(regs))
1607 + {
1608 + up_read(&mm->mmap_sem);
1609 + switch (pax_handle_fetch_fault(regs)) {
1610 +
1611 +#ifdef CONFIG_PAX_EMUPLT
1612 + case 3:
1613 + return;
1614 +#endif
1615 +
1616 +#ifdef CONFIG_PAX_EMUTRAMP
1617 + case 2:
1618 + return;
1619 +#endif
1620 +
1621 + }
1622 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623 + do_group_exit(SIGKILL);
1624 + }
1625 +#endif
1626 +
1627 goto bad_area;
1628 + }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632 diff -urNp linux-3.0.3/arch/powerpc/include/asm/elf.h linux-3.0.3/arch/powerpc/include/asm/elf.h
1633 --- linux-3.0.3/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634 +++ linux-3.0.3/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639 -extern unsigned long randomize_et_dyn(unsigned long base);
1640 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641 +#define ELF_ET_DYN_BASE (0x20000000)
1642 +
1643 +#ifdef CONFIG_PAX_ASLR
1644 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645 +
1646 +#ifdef __powerpc64__
1647 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649 +#else
1650 +#define PAX_DELTA_MMAP_LEN 15
1651 +#define PAX_DELTA_STACK_LEN 15
1652 +#endif
1653 +#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662 -#define arch_randomize_brk arch_randomize_brk
1663 -
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667 diff -urNp linux-3.0.3/arch/powerpc/include/asm/kmap_types.h linux-3.0.3/arch/powerpc/include/asm/kmap_types.h
1668 --- linux-3.0.3/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669 +++ linux-3.0.3/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670 @@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674 + KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678 diff -urNp linux-3.0.3/arch/powerpc/include/asm/mman.h linux-3.0.3/arch/powerpc/include/asm/mman.h
1679 --- linux-3.0.3/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680 +++ linux-3.0.3/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690 diff -urNp linux-3.0.3/arch/powerpc/include/asm/page_64.h linux-3.0.3/arch/powerpc/include/asm/page_64.h
1691 --- linux-3.0.3/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692 +++ linux-3.0.3/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693 @@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699 +#define VM_STACK_DEFAULT_FLAGS32 \
1700 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706 +#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710 +#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714 diff -urNp linux-3.0.3/arch/powerpc/include/asm/page.h linux-3.0.3/arch/powerpc/include/asm/page.h
1715 --- linux-3.0.3/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716 +++ linux-3.0.3/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723 +#define VM_DATA_DEFAULT_FLAGS32 \
1724 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733 +#define ktla_ktva(addr) (addr)
1734 +#define ktva_ktla(addr) (addr)
1735 +
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739 diff -urNp linux-3.0.3/arch/powerpc/include/asm/pgtable.h linux-3.0.3/arch/powerpc/include/asm/pgtable.h
1740 --- linux-3.0.3/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741 +++ linux-3.0.3/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742 @@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746 +#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750 diff -urNp linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h
1751 --- linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752 +++ linux-3.0.3/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753 @@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757 +#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761 diff -urNp linux-3.0.3/arch/powerpc/include/asm/reg.h linux-3.0.3/arch/powerpc/include/asm/reg.h
1762 --- linux-3.0.3/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763 +++ linux-3.0.3/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764 @@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772 diff -urNp linux-3.0.3/arch/powerpc/include/asm/system.h linux-3.0.3/arch/powerpc/include/asm/system.h
1773 --- linux-3.0.3/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774 +++ linux-3.0.3/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779 -extern unsigned long arch_align_stack(unsigned long sp);
1780 +#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784 diff -urNp linux-3.0.3/arch/powerpc/include/asm/uaccess.h linux-3.0.3/arch/powerpc/include/asm/uaccess.h
1785 --- linux-3.0.3/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786 +++ linux-3.0.3/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787 @@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792 +
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796 @@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800 -#ifndef __powerpc64__
1801 -
1802 -static inline unsigned long copy_from_user(void *to,
1803 - const void __user *from, unsigned long n)
1804 -{
1805 - unsigned long over;
1806 -
1807 - if (access_ok(VERIFY_READ, from, n))
1808 - return __copy_tofrom_user((__force void __user *)to, from, n);
1809 - if ((unsigned long)from < TASK_SIZE) {
1810 - over = (unsigned long)from + n - TASK_SIZE;
1811 - return __copy_tofrom_user((__force void __user *)to, from,
1812 - n - over) + over;
1813 - }
1814 - return n;
1815 -}
1816 -
1817 -static inline unsigned long copy_to_user(void __user *to,
1818 - const void *from, unsigned long n)
1819 -{
1820 - unsigned long over;
1821 -
1822 - if (access_ok(VERIFY_WRITE, to, n))
1823 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1824 - if ((unsigned long)to < TASK_SIZE) {
1825 - over = (unsigned long)to + n - TASK_SIZE;
1826 - return __copy_tofrom_user(to, (__force void __user *)from,
1827 - n - over) + over;
1828 - }
1829 - return n;
1830 -}
1831 -
1832 -#else /* __powerpc64__ */
1833 -
1834 -#define __copy_in_user(to, from, size) \
1835 - __copy_tofrom_user((to), (from), (size))
1836 -
1837 -extern unsigned long copy_from_user(void *to, const void __user *from,
1838 - unsigned long n);
1839 -extern unsigned long copy_to_user(void __user *to, const void *from,
1840 - unsigned long n);
1841 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842 - unsigned long n);
1843 -
1844 -#endif /* __powerpc64__ */
1845 -
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853 +
1854 + if (!__builtin_constant_p(n))
1855 + check_object_size(to, n, false);
1856 +
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864 +
1865 + if (!__builtin_constant_p(n))
1866 + check_object_size(from, n, true);
1867 +
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875 +#ifndef __powerpc64__
1876 +
1877 +static inline unsigned long __must_check copy_from_user(void *to,
1878 + const void __user *from, unsigned long n)
1879 +{
1880 + unsigned long over;
1881 +
1882 + if ((long)n < 0)
1883 + return n;
1884 +
1885 + if (access_ok(VERIFY_READ, from, n)) {
1886 + if (!__builtin_constant_p(n))
1887 + check_object_size(to, n, false);
1888 + return __copy_tofrom_user((__force void __user *)to, from, n);
1889 + }
1890 + if ((unsigned long)from < TASK_SIZE) {
1891 + over = (unsigned long)from + n - TASK_SIZE;
1892 + if (!__builtin_constant_p(n - over))
1893 + check_object_size(to, n - over, false);
1894 + return __copy_tofrom_user((__force void __user *)to, from,
1895 + n - over) + over;
1896 + }
1897 + return n;
1898 +}
1899 +
1900 +static inline unsigned long __must_check copy_to_user(void __user *to,
1901 + const void *from, unsigned long n)
1902 +{
1903 + unsigned long over;
1904 +
1905 + if ((long)n < 0)
1906 + return n;
1907 +
1908 + if (access_ok(VERIFY_WRITE, to, n)) {
1909 + if (!__builtin_constant_p(n))
1910 + check_object_size(from, n, true);
1911 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1912 + }
1913 + if ((unsigned long)to < TASK_SIZE) {
1914 + over = (unsigned long)to + n - TASK_SIZE;
1915 + if (!__builtin_constant_p(n))
1916 + check_object_size(from, n - over, true);
1917 + return __copy_tofrom_user(to, (__force void __user *)from,
1918 + n - over) + over;
1919 + }
1920 + return n;
1921 +}
1922 +
1923 +#else /* __powerpc64__ */
1924 +
1925 +#define __copy_in_user(to, from, size) \
1926 + __copy_tofrom_user((to), (from), (size))
1927 +
1928 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929 +{
1930 + if ((long)n < 0 || n > INT_MAX)
1931 + return n;
1932 +
1933 + if (!__builtin_constant_p(n))
1934 + check_object_size(to, n, false);
1935 +
1936 + if (likely(access_ok(VERIFY_READ, from, n)))
1937 + n = __copy_from_user(to, from, n);
1938 + else
1939 + memset(to, 0, n);
1940 + return n;
1941 +}
1942 +
1943 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944 +{
1945 + if ((long)n < 0 || n > INT_MAX)
1946 + return n;
1947 +
1948 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949 + if (!__builtin_constant_p(n))
1950 + check_object_size(from, n, true);
1951 + n = __copy_to_user(to, from, n);
1952 + }
1953 + return n;
1954 +}
1955 +
1956 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957 + unsigned long n);
1958 +
1959 +#endif /* __powerpc64__ */
1960 +
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964 diff -urNp linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S
1965 --- linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966 +++ linux-3.0.3/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967 @@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971 + bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975 @@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979 -1: bl .save_nvgprs
1980 - mr r5,r3
1981 +1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985 diff -urNp linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S
1986 --- linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987 +++ linux-3.0.3/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988 @@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992 + bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996 - bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000 diff -urNp linux-3.0.3/arch/powerpc/kernel/module_32.c linux-3.0.3/arch/powerpc/kernel/module_32.c
2001 --- linux-3.0.3/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002 +++ linux-3.0.3/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2008 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016 - if (location >= mod->module_core
2017 - && location < mod->module_core + mod->core_size)
2018 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021 - else
2022 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025 + else {
2026 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027 + return ~0UL;
2028 + }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032 diff -urNp linux-3.0.3/arch/powerpc/kernel/module.c linux-3.0.3/arch/powerpc/kernel/module.c
2033 --- linux-3.0.3/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034 +++ linux-3.0.3/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035 @@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039 +#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045 + return vmalloc(size);
2046 +}
2047 +
2048 +void *module_alloc_exec(unsigned long size)
2049 +#else
2050 +void *module_alloc(unsigned long size)
2051 +#endif
2052 +
2053 +{
2054 + if (size == 0)
2055 + return NULL;
2056 +
2057 return vmalloc_exec(size);
2058 }
2059
2060 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064 +#ifdef CONFIG_PAX_KERNEXEC
2065 +void module_free_exec(struct module *mod, void *module_region)
2066 +{
2067 + module_free(mod, module_region);
2068 +}
2069 +#endif
2070 +
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074 diff -urNp linux-3.0.3/arch/powerpc/kernel/process.c linux-3.0.3/arch/powerpc/kernel/process.c
2075 --- linux-3.0.3/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076 +++ linux-3.0.3/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088 @@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096 - printk(" (%pS)",
2097 + printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101 @@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110 @@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114 -
2115 -unsigned long arch_align_stack(unsigned long sp)
2116 -{
2117 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118 - sp -= get_random_int() & ~PAGE_MASK;
2119 - return sp & ~0xf;
2120 -}
2121 -
2122 -static inline unsigned long brk_rnd(void)
2123 -{
2124 - unsigned long rnd = 0;
2125 -
2126 - /* 8MB for 32bit, 1GB for 64bit */
2127 - if (is_32bit_task())
2128 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129 - else
2130 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131 -
2132 - return rnd << PAGE_SHIFT;
2133 -}
2134 -
2135 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2136 -{
2137 - unsigned long base = mm->brk;
2138 - unsigned long ret;
2139 -
2140 -#ifdef CONFIG_PPC_STD_MMU_64
2141 - /*
2142 - * If we are using 1TB segments and we are allowed to randomise
2143 - * the heap, we can put it above 1TB so it is backed by a 1TB
2144 - * segment. Otherwise the heap will be in the bottom 1TB
2145 - * which always uses 256MB segments and this may result in a
2146 - * performance penalty.
2147 - */
2148 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150 -#endif
2151 -
2152 - ret = PAGE_ALIGN(base + brk_rnd());
2153 -
2154 - if (ret < mm->brk)
2155 - return mm->brk;
2156 -
2157 - return ret;
2158 -}
2159 -
2160 -unsigned long randomize_et_dyn(unsigned long base)
2161 -{
2162 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163 -
2164 - if (ret < base)
2165 - return base;
2166 -
2167 - return ret;
2168 -}
2169 diff -urNp linux-3.0.3/arch/powerpc/kernel/signal_32.c linux-3.0.3/arch/powerpc/kernel/signal_32.c
2170 --- linux-3.0.3/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171 +++ linux-3.0.3/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181 diff -urNp linux-3.0.3/arch/powerpc/kernel/signal_64.c linux-3.0.3/arch/powerpc/kernel/signal_64.c
2182 --- linux-3.0.3/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183 +++ linux-3.0.3/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193 diff -urNp linux-3.0.3/arch/powerpc/kernel/traps.c linux-3.0.3/arch/powerpc/kernel/traps.c
2194 --- linux-3.0.3/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195 +++ linux-3.0.3/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200 +extern void gr_handle_kernel_exploit(void);
2201 +
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209 + gr_handle_kernel_exploit();
2210 +
2211 oops_exit();
2212 do_exit(err);
2213
2214 diff -urNp linux-3.0.3/arch/powerpc/kernel/vdso.c linux-3.0.3/arch/powerpc/kernel/vdso.c
2215 --- linux-3.0.3/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216 +++ linux-3.0.3/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217 @@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221 +#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229 - current->mm->context.vdso_base = 0;
2230 + current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238 - 0, 0);
2239 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243 diff -urNp linux-3.0.3/arch/powerpc/lib/usercopy_64.c linux-3.0.3/arch/powerpc/lib/usercopy_64.c
2244 --- linux-3.0.3/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245 +++ linux-3.0.3/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246 @@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_READ, from, n)))
2253 - n = __copy_from_user(to, from, n);
2254 - else
2255 - memset(to, 0, n);
2256 - return n;
2257 -}
2258 -
2259 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260 -{
2261 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2262 - n = __copy_to_user(to, from, n);
2263 - return n;
2264 -}
2265 -
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273 -EXPORT_SYMBOL(copy_from_user);
2274 -EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277 diff -urNp linux-3.0.3/arch/powerpc/mm/fault.c linux-3.0.3/arch/powerpc/mm/fault.c
2278 --- linux-3.0.3/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279 +++ linux-3.0.3/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280 @@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284 +#include <linux/slab.h>
2285 +#include <linux/pagemap.h>
2286 +#include <linux/compiler.h>
2287 +#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291 @@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295 +#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299 @@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303 +#ifdef CONFIG_PAX_PAGEEXEC
2304 +/*
2305 + * PaX: decide what to do with offenders (regs->nip = fault address)
2306 + *
2307 + * returns 1 when task should be killed
2308 + */
2309 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2310 +{
2311 + return 1;
2312 +}
2313 +
2314 +void pax_report_insns(void *pc, void *sp)
2315 +{
2316 + unsigned long i;
2317 +
2318 + printk(KERN_ERR "PAX: bytes at PC: ");
2319 + for (i = 0; i < 5; i++) {
2320 + unsigned int c;
2321 + if (get_user(c, (unsigned int __user *)pc+i))
2322 + printk(KERN_CONT "???????? ");
2323 + else
2324 + printk(KERN_CONT "%08x ", c);
2325 + }
2326 + printk("\n");
2327 +}
2328 +#endif
2329 +
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337 - error_code &= 0x48200000;
2338 + error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342 @@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346 - if (error_code & 0x10000000)
2347 + if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351 @@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355 - if (error_code & DSISR_PROTFAULT)
2356 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360 @@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364 +
2365 +#ifdef CONFIG_PAX_PAGEEXEC
2366 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367 +#ifdef CONFIG_PPC_STD_MMU
2368 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369 +#else
2370 + if (is_exec && regs->nip == address) {
2371 +#endif
2372 + switch (pax_handle_fetch_fault(regs)) {
2373 + }
2374 +
2375 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376 + do_group_exit(SIGKILL);
2377 + }
2378 + }
2379 +#endif
2380 +
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384 diff -urNp linux-3.0.3/arch/powerpc/mm/mmap_64.c linux-3.0.3/arch/powerpc/mm/mmap_64.c
2385 --- linux-3.0.3/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386 +++ linux-3.0.3/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391 +
2392 +#ifdef CONFIG_PAX_RANDMMAP
2393 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2394 + mm->mmap_base += mm->delta_mmap;
2395 +#endif
2396 +
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401 +
2402 +#ifdef CONFIG_PAX_RANDMMAP
2403 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2404 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405 +#endif
2406 +
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410 diff -urNp linux-3.0.3/arch/powerpc/mm/slice.c linux-3.0.3/arch/powerpc/mm/slice.c
2411 --- linux-3.0.3/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412 +++ linux-3.0.3/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417 - return (!vma || (addr + len) <= vma->vm_start);
2418 + return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422 @@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426 - if (!vma || addr + len <= vma->vm_start) {
2427 + if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435 - addr = mm->mmap_base;
2436 - while (addr > len) {
2437 + if (mm->mmap_base < len)
2438 + addr = -ENOMEM;
2439 + else
2440 + addr = mm->mmap_base - len;
2441 +
2442 + while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453 - if (!vma || (addr + len) <= vma->vm_start) {
2454 + if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462 - addr = vma->vm_start;
2463 + addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471 +#ifdef CONFIG_PAX_RANDMMAP
2472 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473 + addr = 0;
2474 +#endif
2475 +
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479 diff -urNp linux-3.0.3/arch/s390/include/asm/elf.h linux-3.0.3/arch/s390/include/asm/elf.h
2480 --- linux-3.0.3/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481 +++ linux-3.0.3/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486 -extern unsigned long randomize_et_dyn(unsigned long base);
2487 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489 +
2490 +#ifdef CONFIG_PAX_ASLR
2491 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492 +
2493 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495 +#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499 @@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504 -#define arch_randomize_brk arch_randomize_brk
2505 -
2506 #endif
2507 diff -urNp linux-3.0.3/arch/s390/include/asm/system.h linux-3.0.3/arch/s390/include/asm/system.h
2508 --- linux-3.0.3/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509 +++ linux-3.0.3/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514 -extern unsigned long arch_align_stack(unsigned long sp);
2515 +#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519 diff -urNp linux-3.0.3/arch/s390/include/asm/uaccess.h linux-3.0.3/arch/s390/include/asm/uaccess.h
2520 --- linux-3.0.3/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521 +++ linux-3.0.3/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526 +
2527 + if ((long)n < 0)
2528 + return n;
2529 +
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537 + if ((long)n < 0)
2538 + return n;
2539 +
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547 +
2548 + if ((long)n < 0)
2549 + return n;
2550 +
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554 diff -urNp linux-3.0.3/arch/s390/kernel/module.c linux-3.0.3/arch/s390/kernel/module.c
2555 --- linux-3.0.3/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556 +++ linux-3.0.3/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561 - me->core_size = ALIGN(me->core_size, 4);
2562 - me->arch.got_offset = me->core_size;
2563 - me->core_size += me->arch.got_size;
2564 - me->arch.plt_offset = me->core_size;
2565 - me->core_size += me->arch.plt_size;
2566 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567 + me->arch.got_offset = me->core_size_rw;
2568 + me->core_size_rw += me->arch.got_size;
2569 + me->arch.plt_offset = me->core_size_rx;
2570 + me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578 - gotent = me->module_core + me->arch.got_offset +
2579 + gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2588 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596 - ip = me->module_core + me->arch.plt_offset +
2597 + ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605 - val = (Elf_Addr) me->module_core +
2606 + val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2615 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628 diff -urNp linux-3.0.3/arch/s390/kernel/process.c linux-3.0.3/arch/s390/kernel/process.c
2629 --- linux-3.0.3/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630 +++ linux-3.0.3/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635 -
2636 -unsigned long arch_align_stack(unsigned long sp)
2637 -{
2638 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639 - sp -= get_random_int() & ~PAGE_MASK;
2640 - return sp & ~0xf;
2641 -}
2642 -
2643 -static inline unsigned long brk_rnd(void)
2644 -{
2645 - /* 8MB for 32bit, 1GB for 64bit */
2646 - if (is_32bit_task())
2647 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648 - else
2649 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650 -}
2651 -
2652 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2653 -{
2654 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655 -
2656 - if (ret < mm->brk)
2657 - return mm->brk;
2658 - return ret;
2659 -}
2660 -
2661 -unsigned long randomize_et_dyn(unsigned long base)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664 -
2665 - if (!(current->flags & PF_RANDOMIZE))
2666 - return base;
2667 - if (ret < base)
2668 - return base;
2669 - return ret;
2670 -}
2671 diff -urNp linux-3.0.3/arch/s390/kernel/setup.c linux-3.0.3/arch/s390/kernel/setup.c
2672 --- linux-3.0.3/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673 +++ linux-3.0.3/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678 -unsigned int user_mode = HOME_SPACE_MODE;
2679 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683 diff -urNp linux-3.0.3/arch/s390/mm/mmap.c linux-3.0.3/arch/s390/mm/mmap.c
2684 --- linux-3.0.3/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685 +++ linux-3.0.3/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690 +
2691 +#ifdef CONFIG_PAX_RANDMMAP
2692 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2693 + mm->mmap_base += mm->delta_mmap;
2694 +#endif
2695 +
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700 +
2701 +#ifdef CONFIG_PAX_RANDMMAP
2702 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2703 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704 +#endif
2705 +
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713 +
2714 +#ifdef CONFIG_PAX_RANDMMAP
2715 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2716 + mm->mmap_base += mm->delta_mmap;
2717 +#endif
2718 +
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723 +
2724 +#ifdef CONFIG_PAX_RANDMMAP
2725 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2726 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727 +#endif
2728 +
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732 diff -urNp linux-3.0.3/arch/score/include/asm/system.h linux-3.0.3/arch/score/include/asm/system.h
2733 --- linux-3.0.3/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734 +++ linux-3.0.3/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735 @@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739 -extern unsigned long arch_align_stack(unsigned long sp);
2740 +#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744 diff -urNp linux-3.0.3/arch/score/kernel/process.c linux-3.0.3/arch/score/kernel/process.c
2745 --- linux-3.0.3/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746 +++ linux-3.0.3/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751 -
2752 -unsigned long arch_align_stack(unsigned long sp)
2753 -{
2754 - return sp;
2755 -}
2756 diff -urNp linux-3.0.3/arch/sh/mm/mmap.c linux-3.0.3/arch/sh/mm/mmap.c
2757 --- linux-3.0.3/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758 +++ linux-3.0.3/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763 - if (TASK_SIZE - len >= addr &&
2764 - (!vma || addr + len <= vma->vm_start))
2765 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769 @@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773 - if (likely(!vma || addr + len <= vma->vm_start)) {
2774 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782 - if (TASK_SIZE - len >= addr &&
2783 - (!vma || addr + len <= vma->vm_start))
2784 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792 - if (!vma || addr <= vma->vm_start) {
2793 + if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801 - addr = mm->mmap_base-len;
2802 - if (do_colour_align)
2803 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804 + addr = mm->mmap_base - len;
2805
2806 do {
2807 + if (do_colour_align)
2808 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815 - if (likely(!vma || addr+len <= vma->vm_start)) {
2816 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824 - addr = vma->vm_start-len;
2825 - if (do_colour_align)
2826 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827 - } while (likely(len < vma->vm_start));
2828 + addr = skip_heap_stack_gap(vma, len);
2829 + } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833 diff -urNp linux-3.0.3/arch/sparc/include/asm/atomic_64.h linux-3.0.3/arch/sparc/include/asm/atomic_64.h
2834 --- linux-3.0.3/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835 +++ linux-3.0.3/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836 @@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841 +{
2842 + return v->counter;
2843 +}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846 +{
2847 + return v->counter;
2848 +}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852 +{
2853 + v->counter = i;
2854 +}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857 +{
2858 + v->counter = i;
2859 +}
2860
2861 extern void atomic_add(int, atomic_t *);
2862 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882 +{
2883 + return atomic_add_ret_unchecked(1, v);
2884 +}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887 +{
2888 + return atomic64_add_ret_unchecked(1, v);
2889 +}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896 +{
2897 + return atomic_add_ret_unchecked(i, v);
2898 +}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901 +{
2902 + return atomic64_add_ret_unchecked(i, v);
2903 +}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912 +{
2913 + return atomic_inc_return_unchecked(v) == 0;
2914 +}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923 +{
2924 + atomic_add_unchecked(1, v);
2925 +}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928 +{
2929 + atomic64_add_unchecked(1, v);
2930 +}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934 +{
2935 + atomic_sub_unchecked(1, v);
2936 +}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939 +{
2940 + atomic64_sub_unchecked(1, v);
2941 +}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948 +{
2949 + return cmpxchg(&v->counter, old, new);
2950 +}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953 +{
2954 + return xchg(&v->counter, new);
2955 +}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959 - int c, old;
2960 + int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963 - if (unlikely(c == (u)))
2964 + if (unlikely(c == u))
2965 break;
2966 - old = atomic_cmpxchg((v), c, c + (a));
2967 +
2968 + asm volatile("addcc %2, %0, %0\n"
2969 +
2970 +#ifdef CONFIG_PAX_REFCOUNT
2971 + "tvs %%icc, 6\n"
2972 +#endif
2973 +
2974 + : "=r" (new)
2975 + : "0" (c), "ir" (a)
2976 + : "cc");
2977 +
2978 + old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983 - return c != (u);
2984 + return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993 +{
2994 + return xchg(&v->counter, new);
2995 +}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999 - long c, old;
3000 + long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003 - if (unlikely(c == (u)))
3004 + if (unlikely(c == u))
3005 break;
3006 - old = atomic64_cmpxchg((v), c, c + (a));
3007 +
3008 + asm volatile("addcc %2, %0, %0\n"
3009 +
3010 +#ifdef CONFIG_PAX_REFCOUNT
3011 + "tvs %%xcc, 6\n"
3012 +#endif
3013 +
3014 + : "=r" (new)
3015 + : "0" (c), "ir" (a)
3016 + : "cc");
3017 +
3018 + old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023 - return c != (u);
3024 + return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028 diff -urNp linux-3.0.3/arch/sparc/include/asm/cache.h linux-3.0.3/arch/sparc/include/asm/cache.h
3029 --- linux-3.0.3/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030 +++ linux-3.0.3/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031 @@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035 -#define L1_CACHE_BYTES 32
3036 +#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040 diff -urNp linux-3.0.3/arch/sparc/include/asm/elf_32.h linux-3.0.3/arch/sparc/include/asm/elf_32.h
3041 --- linux-3.0.3/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042 +++ linux-3.0.3/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043 @@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047 +#ifdef CONFIG_PAX_ASLR
3048 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049 +
3050 +#define PAX_DELTA_MMAP_LEN 16
3051 +#define PAX_DELTA_STACK_LEN 16
3052 +#endif
3053 +
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057 diff -urNp linux-3.0.3/arch/sparc/include/asm/elf_64.h linux-3.0.3/arch/sparc/include/asm/elf_64.h
3058 --- linux-3.0.3/arch/sparc/include/asm/elf_64.h 2011-08-23 21:44:40.000000000 -0400
3059 +++ linux-3.0.3/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060 @@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064 +#ifdef CONFIG_PAX_ASLR
3065 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066 +
3067 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069 +#endif
3070 +
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074 diff -urNp linux-3.0.3/arch/sparc/include/asm/pgtable_32.h linux-3.0.3/arch/sparc/include/asm/pgtable_32.h
3075 --- linux-3.0.3/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076 +++ linux-3.0.3/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081 +
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +BTFIXUPDEF_INT(page_shared_noexec)
3084 +BTFIXUPDEF_INT(page_copy_noexec)
3085 +BTFIXUPDEF_INT(page_readonly_noexec)
3086 +#endif
3087 +
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095 +#ifdef CONFIG_PAX_PAGEEXEC
3096 +extern pgprot_t PAGE_SHARED_NOEXEC;
3097 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099 +#else
3100 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101 +# define PAGE_COPY_NOEXEC PAGE_COPY
3102 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103 +#endif
3104 +
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108 diff -urNp linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h
3109 --- linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110 +++ linux-3.0.3/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111 @@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115 +
3116 +#ifdef CONFIG_PAX_PAGEEXEC
3117 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120 +#endif
3121 +
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125 diff -urNp linux-3.0.3/arch/sparc/include/asm/spinlock_64.h linux-3.0.3/arch/sparc/include/asm/spinlock_64.h
3126 --- linux-3.0.3/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127 +++ linux-3.0.3/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132 -static void inline arch_read_lock(arch_rwlock_t *lock)
3133 +static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140 -"4: add %0, 1, %1\n"
3141 +"4: addcc %0, 1, %1\n"
3142 +
3143 +#ifdef CONFIG_PAX_REFCOUNT
3144 +" tvs %%icc, 6\n"
3145 +#endif
3146 +
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154 - : "memory");
3155 + : "memory", "cc");
3156 }
3157
3158 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3159 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167 -" add %0, 1, %1\n"
3168 +" addcc %0, 1, %1\n"
3169 +
3170 +#ifdef CONFIG_PAX_REFCOUNT
3171 +" tvs %%icc, 6\n"
3172 +#endif
3173 +
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3182 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188 -" sub %0, 1, %1\n"
3189 +" subcc %0, 1, %1\n"
3190 +
3191 +#ifdef CONFIG_PAX_REFCOUNT
3192 +" tvs %%icc, 6\n"
3193 +#endif
3194 +
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202 -static void inline arch_write_lock(arch_rwlock_t *lock)
3203 +static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3212 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3221 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225 diff -urNp linux-3.0.3/arch/sparc/include/asm/thread_info_32.h linux-3.0.3/arch/sparc/include/asm/thread_info_32.h
3226 --- linux-3.0.3/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227 +++ linux-3.0.3/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228 @@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232 +
3233 + unsigned long lowest_stack;
3234 };
3235
3236 /*
3237 diff -urNp linux-3.0.3/arch/sparc/include/asm/thread_info_64.h linux-3.0.3/arch/sparc/include/asm/thread_info_64.h
3238 --- linux-3.0.3/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239 +++ linux-3.0.3/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240 @@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244 + unsigned long lowest_stack;
3245 +
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249 diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess_32.h linux-3.0.3/arch/sparc/include/asm/uaccess_32.h
3250 --- linux-3.0.3/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251 +++ linux-3.0.3/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 - if (n && __access_ok((unsigned long) to, n))
3257 + if ((long)n < 0)
3258 + return n;
3259 +
3260 + if (n && __access_ok((unsigned long) to, n)) {
3261 + if (!__builtin_constant_p(n))
3262 + check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264 - else
3265 + } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271 + if ((long)n < 0)
3272 + return n;
3273 +
3274 + if (!__builtin_constant_p(n))
3275 + check_object_size(from, n, true);
3276 +
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282 - if (n && __access_ok((unsigned long) from, n))
3283 + if ((long)n < 0)
3284 + return n;
3285 +
3286 + if (n && __access_ok((unsigned long) from, n)) {
3287 + if (!__builtin_constant_p(n))
3288 + check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290 - else
3291 + } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297 + if ((long)n < 0)
3298 + return n;
3299 +
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303 diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess_64.h linux-3.0.3/arch/sparc/include/asm/uaccess_64.h
3304 --- linux-3.0.3/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305 +++ linux-3.0.3/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306 @@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310 +#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318 - unsigned long ret = ___copy_from_user(to, from, size);
3319 + unsigned long ret;
3320
3321 + if ((long)size < 0 || size > INT_MAX)
3322 + return size;
3323 +
3324 + if (!__builtin_constant_p(size))
3325 + check_object_size(to, size, false);
3326 +
3327 + ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335 - unsigned long ret = ___copy_to_user(to, from, size);
3336 + unsigned long ret;
3337 +
3338 + if ((long)size < 0 || size > INT_MAX)
3339 + return size;
3340 +
3341 + if (!__builtin_constant_p(size))
3342 + check_object_size(from, size, true);
3343
3344 + ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348 diff -urNp linux-3.0.3/arch/sparc/include/asm/uaccess.h linux-3.0.3/arch/sparc/include/asm/uaccess.h
3349 --- linux-3.0.3/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350 +++ linux-3.0.3/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351 @@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354 +
3355 +#ifdef __KERNEL__
3356 +#ifndef __ASSEMBLY__
3357 +#include <linux/types.h>
3358 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359 +#endif
3360 +#endif
3361 +
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365 diff -urNp linux-3.0.3/arch/sparc/kernel/Makefile linux-3.0.3/arch/sparc/kernel/Makefile
3366 --- linux-3.0.3/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367 +++ linux-3.0.3/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368 @@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372 -ccflags-y := -Werror
3373 +#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377 diff -urNp linux-3.0.3/arch/sparc/kernel/process_32.c linux-3.0.3/arch/sparc/kernel/process_32.c
3378 --- linux-3.0.3/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379 +++ linux-3.0.3/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384 - printk("%pS\n", (void *) rw->ins[7]);
3385 + printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393 - printk("PC: <%pS>\n", (void *) r->pc);
3394 + printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410 - printk("%pS ] ", (void *) pc);
3411 + printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415 diff -urNp linux-3.0.3/arch/sparc/kernel/process_64.c linux-3.0.3/arch/sparc/kernel/process_64.c
3416 --- linux-3.0.3/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417 +++ linux-3.0.3/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3431 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453 diff -urNp linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c
3454 --- linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455 +++ linux-3.0.3/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460 - addr = TASK_UNMAPPED_BASE;
3461 + addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469 - if (!vmm || addr + len <= vmm->vm_start)
3470 + if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474 diff -urNp linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c
3475 --- linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476 +++ linux-3.0.3/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481 - if ((flags & MAP_SHARED) &&
3482 + if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490 +#ifdef CONFIG_PAX_RANDMMAP
3491 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492 +#endif
3493 +
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501 - if (task_size - len >= addr &&
3502 - (!vma || addr + len <= vma->vm_start))
3503 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508 - start_addr = addr = mm->free_area_cache;
3509 + start_addr = addr = mm->free_area_cache;
3510 } else {
3511 - start_addr = addr = TASK_UNMAPPED_BASE;
3512 + start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516 @@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520 - if (start_addr != TASK_UNMAPPED_BASE) {
3521 - start_addr = addr = TASK_UNMAPPED_BASE;
3522 + if (start_addr != mm->mmap_base) {
3523 + start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529 - if (likely(!vma || addr + len <= vma->vm_start)) {
3530 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538 - if ((flags & MAP_SHARED) &&
3539 + if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547 - if (task_size - len >= addr &&
3548 - (!vma || addr + len <= vma->vm_start))
3549 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557 - if (!vma || addr <= vma->vm_start) {
3558 + if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566 - addr = mm->mmap_base-len;
3567 - if (do_color_align)
3568 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569 + addr = mm->mmap_base - len;
3570
3571 do {
3572 + if (do_color_align)
3573 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580 - if (likely(!vma || addr+len <= vma->vm_start)) {
3581 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589 - addr = vma->vm_start-len;
3590 - if (do_color_align)
3591 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592 - } while (likely(len < vma->vm_start));
3593 + addr = skip_heap_stack_gap(vma, len);
3594 + } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602 +
3603 +#ifdef CONFIG_PAX_RANDMMAP
3604 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3605 + mm->mmap_base += mm->delta_mmap;
3606 +#endif
3607 +
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615 +
3616 +#ifdef CONFIG_PAX_RANDMMAP
3617 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3618 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619 +#endif
3620 +
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624 diff -urNp linux-3.0.3/arch/sparc/kernel/traps_32.c linux-3.0.3/arch/sparc/kernel/traps_32.c
3625 --- linux-3.0.3/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626 +++ linux-3.0.3/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631 +extern void gr_handle_kernel_exploit(void);
3632 +
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648 - if(regs->psr & PSR_PS)
3649 + if(regs->psr & PSR_PS) {
3650 + gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652 + }
3653 do_exit(SIGSEGV);
3654 }
3655
3656 diff -urNp linux-3.0.3/arch/sparc/kernel/traps_64.c linux-3.0.3/arch/sparc/kernel/traps_64.c
3657 --- linux-3.0.3/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658 +++ linux-3.0.3/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672 +
3673 +#ifdef CONFIG_PAX_REFCOUNT
3674 + if (lvl == 6)
3675 + pax_report_refcount_overflow(regs);
3676 +#endif
3677 +
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685 -
3686 +
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691 +#ifdef CONFIG_PAX_REFCOUNT
3692 + if (lvl == 6)
3693 + pax_report_refcount_overflow(regs);
3694 +#endif
3695 +
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703 - printk("TPC<%pS>\n", (void *) regs->tpc);
3704 + printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3755 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3762 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770 +extern void gr_handle_kernel_exploit(void);
3771 +
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788 - if (regs->tstate & TSTATE_PRIV)
3789 + if (regs->tstate & TSTATE_PRIV) {
3790 + gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792 + }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796 diff -urNp linux-3.0.3/arch/sparc/kernel/unaligned_64.c linux-3.0.3/arch/sparc/kernel/unaligned_64.c
3797 --- linux-3.0.3/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:44:40.000000000 -0400
3798 +++ linux-3.0.3/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808 diff -urNp linux-3.0.3/arch/sparc/lib/atomic_64.S linux-3.0.3/arch/sparc/lib/atomic_64.S
3809 --- linux-3.0.3/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810 +++ linux-3.0.3/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811 @@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815 - add %g1, %o0, %g7
3816 + addcc %g1, %o0, %g7
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 + tvs %icc, 6
3820 +#endif
3821 +
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829 + .globl atomic_add_unchecked
3830 + .type atomic_add_unchecked,#function
3831 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832 + BACKOFF_SETUP(%o2)
3833 +1: lduw [%o1], %g1
3834 + add %g1, %o0, %g7
3835 + cas [%o1], %g1, %g7
3836 + cmp %g1, %g7
3837 + bne,pn %icc, 2f
3838 + nop
3839 + retl
3840 + nop
3841 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3842 + .size atomic_add_unchecked, .-atomic_add_unchecked
3843 +
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849 - sub %g1, %o0, %g7
3850 + subcc %g1, %o0, %g7
3851 +
3852 +#ifdef CONFIG_PAX_REFCOUNT
3853 + tvs %icc, 6
3854 +#endif
3855 +
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863 + .globl atomic_sub_unchecked
3864 + .type atomic_sub_unchecked,#function
3865 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866 + BACKOFF_SETUP(%o2)
3867 +1: lduw [%o1], %g1
3868 + sub %g1, %o0, %g7
3869 + cas [%o1], %g1, %g7
3870 + cmp %g1, %g7
3871 + bne,pn %icc, 2f
3872 + nop
3873 + retl
3874 + nop
3875 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3876 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877 +
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883 - add %g1, %o0, %g7
3884 + addcc %g1, %o0, %g7
3885 +
3886 +#ifdef CONFIG_PAX_REFCOUNT
3887 + tvs %icc, 6
3888 +#endif
3889 +
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897 + .globl atomic_add_ret_unchecked
3898 + .type atomic_add_ret_unchecked,#function
3899 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900 + BACKOFF_SETUP(%o2)
3901 +1: lduw [%o1], %g1
3902 + addcc %g1, %o0, %g7
3903 + cas [%o1], %g1, %g7
3904 + cmp %g1, %g7
3905 + bne,pn %icc, 2f
3906 + add %g7, %o0, %g7
3907 + sra %g7, 0, %o0
3908 + retl
3909 + nop
3910 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3911 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912 +
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918 - sub %g1, %o0, %g7
3919 + subcc %g1, %o0, %g7
3920 +
3921 +#ifdef CONFIG_PAX_REFCOUNT
3922 + tvs %icc, 6
3923 +#endif
3924 +
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932 - add %g1, %o0, %g7
3933 + addcc %g1, %o0, %g7
3934 +
3935 +#ifdef CONFIG_PAX_REFCOUNT
3936 + tvs %xcc, 6
3937 +#endif
3938 +
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946 + .globl atomic64_add_unchecked
3947 + .type atomic64_add_unchecked,#function
3948 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949 + BACKOFF_SETUP(%o2)
3950 +1: ldx [%o1], %g1
3951 + addcc %g1, %o0, %g7
3952 + casx [%o1], %g1, %g7
3953 + cmp %g1, %g7
3954 + bne,pn %xcc, 2f
3955 + nop
3956 + retl
3957 + nop
3958 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3959 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960 +
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966 - sub %g1, %o0, %g7
3967 + subcc %g1, %o0, %g7
3968 +
3969 +#ifdef CONFIG_PAX_REFCOUNT
3970 + tvs %xcc, 6
3971 +#endif
3972 +
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980 + .globl atomic64_sub_unchecked
3981 + .type atomic64_sub_unchecked,#function
3982 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983 + BACKOFF_SETUP(%o2)
3984 +1: ldx [%o1], %g1
3985 + subcc %g1, %o0, %g7
3986 + casx [%o1], %g1, %g7
3987 + cmp %g1, %g7
3988 + bne,pn %xcc, 2f
3989 + nop
3990 + retl
3991 + nop
3992 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3993 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994 +
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000 - add %g1, %o0, %g7
4001 + addcc %g1, %o0, %g7
4002 +
4003 +#ifdef CONFIG_PAX_REFCOUNT
4004 + tvs %xcc, 6
4005 +#endif
4006 +
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014 + .globl atomic64_add_ret_unchecked
4015 + .type atomic64_add_ret_unchecked,#function
4016 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017 + BACKOFF_SETUP(%o2)
4018 +1: ldx [%o1], %g1
4019 + addcc %g1, %o0, %g7
4020 + casx [%o1], %g1, %g7
4021 + cmp %g1, %g7
4022 + bne,pn %xcc, 2f
4023 + add %g7, %o0, %g7
4024 + mov %g7, %o0
4025 + retl
4026 + nop
4027 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4028 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029 +
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035 - sub %g1, %o0, %g7
4036 + subcc %g1, %o0, %g7
4037 +
4038 +#ifdef CONFIG_PAX_REFCOUNT
4039 + tvs %xcc, 6
4040 +#endif
4041 +
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045 diff -urNp linux-3.0.3/arch/sparc/lib/ksyms.c linux-3.0.3/arch/sparc/lib/ksyms.c
4046 --- linux-3.0.3/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047 +++ linux-3.0.3/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052 +EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056 +EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059 +EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067 diff -urNp linux-3.0.3/arch/sparc/lib/Makefile linux-3.0.3/arch/sparc/lib/Makefile
4068 --- linux-3.0.3/arch/sparc/lib/Makefile 2011-08-23 21:44:40.000000000 -0400
4069 +++ linux-3.0.3/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070 @@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074 -ccflags-y := -Werror
4075 +#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079 diff -urNp linux-3.0.3/arch/sparc/Makefile linux-3.0.3/arch/sparc/Makefile
4080 --- linux-3.0.3/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081 +++ linux-3.0.3/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091 diff -urNp linux-3.0.3/arch/sparc/mm/fault_32.c linux-3.0.3/arch/sparc/mm/fault_32.c
4092 --- linux-3.0.3/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093 +++ linux-3.0.3/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094 @@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098 +#include <linux/slab.h>
4099 +#include <linux/pagemap.h>
4100 +#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108 +#ifdef CONFIG_PAX_PAGEEXEC
4109 +#ifdef CONFIG_PAX_DLRESOLVE
4110 +static void pax_emuplt_close(struct vm_area_struct *vma)
4111 +{
4112 + vma->vm_mm->call_dl_resolve = 0UL;
4113 +}
4114 +
4115 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116 +{
4117 + unsigned int *kaddr;
4118 +
4119 + vmf->page = alloc_page(GFP_HIGHUSER);
4120 + if (!vmf->page)
4121 + return VM_FAULT_OOM;
4122 +
4123 + kaddr = kmap(vmf->page);
4124 + memset(kaddr, 0, PAGE_SIZE);
4125 + kaddr[0] = 0x9DE3BFA8U; /* save */
4126 + flush_dcache_page(vmf->page);
4127 + kunmap(vmf->page);
4128 + return VM_FAULT_MAJOR;
4129 +}
4130 +
4131 +static const struct vm_operations_struct pax_vm_ops = {
4132 + .close = pax_emuplt_close,
4133 + .fault = pax_emuplt_fault
4134 +};
4135 +
4136 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137 +{
4138 + int ret;
4139 +
4140 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4141 + vma->vm_mm = current->mm;
4142 + vma->vm_start = addr;
4143 + vma->vm_end = addr + PAGE_SIZE;
4144 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146 + vma->vm_ops = &pax_vm_ops;
4147 +
4148 + ret = insert_vm_struct(current->mm, vma);
4149 + if (ret)
4150 + return ret;
4151 +
4152 + ++current->mm->total_vm;
4153 + return 0;
4154 +}
4155 +#endif
4156 +
4157 +/*
4158 + * PaX: decide what to do with offenders (regs->pc = fault address)
4159 + *
4160 + * returns 1 when task should be killed
4161 + * 2 when patched PLT trampoline was detected
4162 + * 3 when unpatched PLT trampoline was detected
4163 + */
4164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4165 +{
4166 +
4167 +#ifdef CONFIG_PAX_EMUPLT
4168 + int err;
4169 +
4170 + do { /* PaX: patched PLT emulation #1 */
4171 + unsigned int sethi1, sethi2, jmpl;
4172 +
4173 + err = get_user(sethi1, (unsigned int *)regs->pc);
4174 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176 +
4177 + if (err)
4178 + break;
4179 +
4180 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183 + {
4184 + unsigned int addr;
4185 +
4186 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187 + addr = regs->u_regs[UREG_G1];
4188 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189 + regs->pc = addr;
4190 + regs->npc = addr+4;
4191 + return 2;
4192 + }
4193 + } while (0);
4194 +
4195 + { /* PaX: patched PLT emulation #2 */
4196 + unsigned int ba;
4197 +
4198 + err = get_user(ba, (unsigned int *)regs->pc);
4199 +
4200 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201 + unsigned int addr;
4202 +
4203 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204 + regs->pc = addr;
4205 + regs->npc = addr+4;
4206 + return 2;
4207 + }
4208 + }
4209 +
4210 + do { /* PaX: patched PLT emulation #3 */
4211 + unsigned int sethi, jmpl, nop;
4212 +
4213 + err = get_user(sethi, (unsigned int *)regs->pc);
4214 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216 +
4217 + if (err)
4218 + break;
4219 +
4220 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222 + nop == 0x01000000U)
4223 + {
4224 + unsigned int addr;
4225 +
4226 + addr = (sethi & 0x003FFFFFU) << 10;
4227 + regs->u_regs[UREG_G1] = addr;
4228 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229 + regs->pc = addr;
4230 + regs->npc = addr+4;
4231 + return 2;
4232 + }
4233 + } while (0);
4234 +
4235 + do { /* PaX: unpatched PLT emulation step 1 */
4236 + unsigned int sethi, ba, nop;
4237 +
4238 + err = get_user(sethi, (unsigned int *)regs->pc);
4239 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241 +
4242 + if (err)
4243 + break;
4244 +
4245 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247 + nop == 0x01000000U)
4248 + {
4249 + unsigned int addr, save, call;
4250 +
4251 + if ((ba & 0xFFC00000U) == 0x30800000U)
4252 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253 + else
4254 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255 +
4256 + err = get_user(save, (unsigned int *)addr);
4257 + err |= get_user(call, (unsigned int *)(addr+4));
4258 + err |= get_user(nop, (unsigned int *)(addr+8));
4259 + if (err)
4260 + break;
4261 +
4262 +#ifdef CONFIG_PAX_DLRESOLVE
4263 + if (save == 0x9DE3BFA8U &&
4264 + (call & 0xC0000000U) == 0x40000000U &&
4265 + nop == 0x01000000U)
4266 + {
4267 + struct vm_area_struct *vma;
4268 + unsigned long call_dl_resolve;
4269 +
4270 + down_read(&current->mm->mmap_sem);
4271 + call_dl_resolve = current->mm->call_dl_resolve;
4272 + up_read(&current->mm->mmap_sem);
4273 + if (likely(call_dl_resolve))
4274 + goto emulate;
4275 +
4276 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277 +
4278 + down_write(&current->mm->mmap_sem);
4279 + if (current->mm->call_dl_resolve) {
4280 + call_dl_resolve = current->mm->call_dl_resolve;
4281 + up_write(&current->mm->mmap_sem);
4282 + if (vma)
4283 + kmem_cache_free(vm_area_cachep, vma);
4284 + goto emulate;
4285 + }
4286 +
4287 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289 + up_write(&current->mm->mmap_sem);
4290 + if (vma)
4291 + kmem_cache_free(vm_area_cachep, vma);
4292 + return 1;
4293 + }
4294 +
4295 + if (pax_insert_vma(vma, call_dl_resolve)) {
4296 + up_write(&current->mm->mmap_sem);
4297 + kmem_cache_free(vm_area_cachep, vma);
4298 + return 1;
4299 + }
4300 +
4301 + current->mm->call_dl_resolve = call_dl_resolve;
4302 + up_write(&current->mm->mmap_sem);
4303 +
4304 +emulate:
4305 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306 + regs->pc = call_dl_resolve;
4307 + regs->npc = addr+4;
4308 + return 3;
4309 + }
4310 +#endif
4311 +
4312 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313 + if ((save & 0xFFC00000U) == 0x05000000U &&
4314 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4315 + nop == 0x01000000U)
4316 + {
4317 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318 + regs->u_regs[UREG_G2] = addr + 4;
4319 + addr = (save & 0x003FFFFFU) << 10;
4320 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321 + regs->pc = addr;
4322 + regs->npc = addr+4;
4323 + return 3;
4324 + }
4325 + }
4326 + } while (0);
4327 +
4328 + do { /* PaX: unpatched PLT emulation step 2 */
4329 + unsigned int save, call, nop;
4330 +
4331 + err = get_user(save, (unsigned int *)(regs->pc-4));
4332 + err |= get_user(call, (unsigned int *)regs->pc);
4333 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334 + if (err)
4335 + break;
4336 +
4337 + if (save == 0x9DE3BFA8U &&
4338 + (call & 0xC0000000U) == 0x40000000U &&
4339 + nop == 0x01000000U)
4340 + {
4341 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342 +
4343 + regs->u_regs[UREG_RETPC] = regs->pc;
4344 + regs->pc = dl_resolve;
4345 + regs->npc = dl_resolve+4;
4346 + return 3;
4347 + }
4348 + } while (0);
4349 +#endif
4350 +
4351 + return 1;
4352 +}
4353 +
4354 +void pax_report_insns(void *pc, void *sp)
4355 +{
4356 + unsigned long i;
4357 +
4358 + printk(KERN_ERR "PAX: bytes at PC: ");
4359 + for (i = 0; i < 8; i++) {
4360 + unsigned int c;
4361 + if (get_user(c, (unsigned int *)pc+i))
4362 + printk(KERN_CONT "???????? ");
4363 + else
4364 + printk(KERN_CONT "%08x ", c);
4365 + }
4366 + printk("\n");
4367 +}
4368 +#endif
4369 +
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373 @@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377 +
4378 +#ifdef CONFIG_PAX_PAGEEXEC
4379 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380 + up_read(&mm->mmap_sem);
4381 + switch (pax_handle_fetch_fault(regs)) {
4382 +
4383 +#ifdef CONFIG_PAX_EMUPLT
4384 + case 2:
4385 + case 3:
4386 + return;
4387 +#endif
4388 +
4389 + }
4390 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391 + do_group_exit(SIGKILL);
4392 + }
4393 +#endif
4394 +
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398 diff -urNp linux-3.0.3/arch/sparc/mm/fault_64.c linux-3.0.3/arch/sparc/mm/fault_64.c
4399 --- linux-3.0.3/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400 +++ linux-3.0.3/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401 @@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405 +#include <linux/slab.h>
4406 +#include <linux/pagemap.h>
4407 +#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424 +#ifdef CONFIG_PAX_PAGEEXEC
4425 +#ifdef CONFIG_PAX_DLRESOLVE
4426 +static void pax_emuplt_close(struct vm_area_struct *vma)
4427 +{
4428 + vma->vm_mm->call_dl_resolve = 0UL;
4429 +}
4430 +
4431 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432 +{
4433 + unsigned int *kaddr;
4434 +
4435 + vmf->page = alloc_page(GFP_HIGHUSER);
4436 + if (!vmf->page)
4437 + return VM_FAULT_OOM;
4438 +
4439 + kaddr = kmap(vmf->page);
4440 + memset(kaddr, 0, PAGE_SIZE);
4441 + kaddr[0] = 0x9DE3BFA8U; /* save */
4442 + flush_dcache_page(vmf->page);
4443 + kunmap(vmf->page);
4444 + return VM_FAULT_MAJOR;
4445 +}
4446 +
4447 +static const struct vm_operations_struct pax_vm_ops = {
4448 + .close = pax_emuplt_close,
4449 + .fault = pax_emuplt_fault
4450 +};
4451 +
4452 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453 +{
4454 + int ret;
4455 +
4456 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4457 + vma->vm_mm = current->mm;
4458 + vma->vm_start = addr;
4459 + vma->vm_end = addr + PAGE_SIZE;
4460 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462 + vma->vm_ops = &pax_vm_ops;
4463 +
4464 + ret = insert_vm_struct(current->mm, vma);
4465 + if (ret)
4466 + return ret;
4467 +
4468 + ++current->mm->total_vm;
4469 + return 0;
4470 +}
4471 +#endif
4472 +
4473 +/*
4474 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4475 + *
4476 + * returns 1 when task should be killed
4477 + * 2 when patched PLT trampoline was detected
4478 + * 3 when unpatched PLT trampoline was detected
4479 + */
4480 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4481 +{
4482 +
4483 +#ifdef CONFIG_PAX_EMUPLT
4484 + int err;
4485 +
4486 + do { /* PaX: patched PLT emulation #1 */
4487 + unsigned int sethi1, sethi2, jmpl;
4488 +
4489 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4490 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492 +
4493 + if (err)
4494 + break;
4495 +
4496 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499 + {
4500 + unsigned long addr;
4501 +
4502 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503 + addr = regs->u_regs[UREG_G1];
4504 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505 +
4506 + if (test_thread_flag(TIF_32BIT))
4507 + addr &= 0xFFFFFFFFUL;
4508 +
4509 + regs->tpc = addr;
4510 + regs->tnpc = addr+4;
4511 + return 2;
4512 + }
4513 + } while (0);
4514 +
4515 + { /* PaX: patched PLT emulation #2 */
4516 + unsigned int ba;
4517 +
4518 + err = get_user(ba, (unsigned int *)regs->tpc);
4519 +
4520 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521 + unsigned long addr;
4522 +
4523 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524 +
4525 + if (test_thread_flag(TIF_32BIT))
4526 + addr &= 0xFFFFFFFFUL;
4527 +
4528 + regs->tpc = addr;
4529 + regs->tnpc = addr+4;
4530 + return 2;
4531 + }
4532 + }
4533 +
4534 + do { /* PaX: patched PLT emulation #3 */
4535 + unsigned int sethi, jmpl, nop;
4536 +
4537 + err = get_user(sethi, (unsigned int *)regs->tpc);
4538 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540 +
4541 + if (err)
4542 + break;
4543 +
4544 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546 + nop == 0x01000000U)
4547 + {
4548 + unsigned long addr;
4549 +
4550 + addr = (sethi & 0x003FFFFFU) << 10;
4551 + regs->u_regs[UREG_G1] = addr;
4552 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553 +
4554 + if (test_thread_flag(TIF_32BIT))
4555 + addr &= 0xFFFFFFFFUL;
4556 +
4557 + regs->tpc = addr;
4558 + regs->tnpc = addr+4;
4559 + return 2;
4560 + }
4561 + } while (0);
4562 +
4563 + do { /* PaX: patched PLT emulation #4 */
4564 + unsigned int sethi, mov1, call, mov2;
4565 +
4566 + err = get_user(sethi, (unsigned int *)regs->tpc);
4567 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570 +
4571 + if (err)
4572 + break;
4573 +
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + mov1 == 0x8210000FU &&
4576 + (call & 0xC0000000U) == 0x40000000U &&
4577 + mov2 == 0x9E100001U)
4578 + {
4579 + unsigned long addr;
4580 +
4581 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583 +
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4586 +
4587 + regs->tpc = addr;
4588 + regs->tnpc = addr+4;
4589 + return 2;
4590 + }
4591 + } while (0);
4592 +
4593 + do { /* PaX: patched PLT emulation #5 */
4594 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595 +
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604 +
4605 + if (err)
4606 + break;
4607 +
4608 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4612 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613 + sllx == 0x83287020U &&
4614 + jmpl == 0x81C04005U &&
4615 + nop == 0x01000000U)
4616 + {
4617 + unsigned long addr;
4618 +
4619 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620 + regs->u_regs[UREG_G1] <<= 32;
4621 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623 + regs->tpc = addr;
4624 + regs->tnpc = addr+4;
4625 + return 2;
4626 + }
4627 + } while (0);
4628 +
4629 + do { /* PaX: patched PLT emulation #6 */
4630 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631 +
4632 + err = get_user(sethi, (unsigned int *)regs->tpc);
4633 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639 +
4640 + if (err)
4641 + break;
4642 +
4643 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646 + sllx == 0x83287020U &&
4647 + (or & 0xFFFFE000U) == 0x8A116000U &&
4648 + jmpl == 0x81C04005U &&
4649 + nop == 0x01000000U)
4650 + {
4651 + unsigned long addr;
4652 +
4653 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654 + regs->u_regs[UREG_G1] <<= 32;
4655 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657 + regs->tpc = addr;
4658 + regs->tnpc = addr+4;
4659 + return 2;
4660 + }
4661 + } while (0);
4662 +
4663 + do { /* PaX: unpatched PLT emulation step 1 */
4664 + unsigned int sethi, ba, nop;
4665 +
4666 + err = get_user(sethi, (unsigned int *)regs->tpc);
4667 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675 + nop == 0x01000000U)
4676 + {
4677 + unsigned long addr;
4678 + unsigned int save, call;
4679 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680 +
4681 + if ((ba & 0xFFC00000U) == 0x30800000U)
4682 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683 + else
4684 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685 +
4686 + if (test_thread_flag(TIF_32BIT))
4687 + addr &= 0xFFFFFFFFUL;
4688 +
4689 + err = get_user(save, (unsigned int *)addr);
4690 + err |= get_user(call, (unsigned int *)(addr+4));
4691 + err |= get_user(nop, (unsigned int *)(addr+8));
4692 + if (err)
4693 + break;
4694 +
4695 +#ifdef CONFIG_PAX_DLRESOLVE
4696 + if (save == 0x9DE3BFA8U &&
4697 + (call & 0xC0000000U) == 0x40000000U &&
4698 + nop == 0x01000000U)
4699 + {
4700 + struct vm_area_struct *vma;
4701 + unsigned long call_dl_resolve;
4702 +
4703 + down_read(&current->mm->mmap_sem);
4704 + call_dl_resolve = current->mm->call_dl_resolve;
4705 + up_read(&current->mm->mmap_sem);
4706 + if (likely(call_dl_resolve))
4707 + goto emulate;
4708 +
4709 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710 +
4711 + down_write(&current->mm->mmap_sem);
4712 + if (current->mm->call_dl_resolve) {
4713 + call_dl_resolve = current->mm->call_dl_resolve;
4714 + up_write(&current->mm->mmap_sem);
4715 + if (vma)
4716 + kmem_cache_free(vm_area_cachep, vma);
4717 + goto emulate;
4718 + }
4719 +
4720 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722 + up_write(&current->mm->mmap_sem);
4723 + if (vma)
4724 + kmem_cache_free(vm_area_cachep, vma);
4725 + return 1;
4726 + }
4727 +
4728 + if (pax_insert_vma(vma, call_dl_resolve)) {
4729 + up_write(&current->mm->mmap_sem);
4730 + kmem_cache_free(vm_area_cachep, vma);
4731 + return 1;
4732 + }
4733 +
4734 + current->mm->call_dl_resolve = call_dl_resolve;
4735 + up_write(&current->mm->mmap_sem);
4736 +
4737 +emulate:
4738 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739 + regs->tpc = call_dl_resolve;
4740 + regs->tnpc = addr+4;
4741 + return 3;
4742 + }
4743 +#endif
4744 +
4745 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746 + if ((save & 0xFFC00000U) == 0x05000000U &&
4747 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4748 + nop == 0x01000000U)
4749 + {
4750 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751 + regs->u_regs[UREG_G2] = addr + 4;
4752 + addr = (save & 0x003FFFFFU) << 10;
4753 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754 +
4755 + if (test_thread_flag(TIF_32BIT))
4756 + addr &= 0xFFFFFFFFUL;
4757 +
4758 + regs->tpc = addr;
4759 + regs->tnpc = addr+4;
4760 + return 3;
4761 + }
4762 +
4763 + /* PaX: 64-bit PLT stub */
4764 + err = get_user(sethi1, (unsigned int *)addr);
4765 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4766 + err |= get_user(or1, (unsigned int *)(addr+8));
4767 + err |= get_user(or2, (unsigned int *)(addr+12));
4768 + err |= get_user(sllx, (unsigned int *)(addr+16));
4769 + err |= get_user(add, (unsigned int *)(addr+20));
4770 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4771 + err |= get_user(nop, (unsigned int *)(addr+28));
4772 + if (err)
4773 + break;
4774 +
4775 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4778 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779 + sllx == 0x89293020U &&
4780 + add == 0x8A010005U &&
4781 + jmpl == 0x89C14000U &&
4782 + nop == 0x01000000U)
4783 + {
4784 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786 + regs->u_regs[UREG_G4] <<= 32;
4787 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789 + regs->u_regs[UREG_G4] = addr + 24;
4790 + addr = regs->u_regs[UREG_G5];
4791 + regs->tpc = addr;
4792 + regs->tnpc = addr+4;
4793 + return 3;
4794 + }
4795 + }
4796 + } while (0);
4797 +
4798 +#ifdef CONFIG_PAX_DLRESOLVE
4799 + do { /* PaX: unpatched PLT emulation step 2 */
4800 + unsigned int save, call, nop;
4801 +
4802 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4803 + err |= get_user(call, (unsigned int *)regs->tpc);
4804 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805 + if (err)
4806 + break;
4807 +
4808 + if (save == 0x9DE3BFA8U &&
4809 + (call & 0xC0000000U) == 0x40000000U &&
4810 + nop == 0x01000000U)
4811 + {
4812 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813 +
4814 + if (test_thread_flag(TIF_32BIT))
4815 + dl_resolve &= 0xFFFFFFFFUL;
4816 +
4817 + regs->u_regs[UREG_RETPC] = regs->tpc;
4818 + regs->tpc = dl_resolve;
4819 + regs->tnpc = dl_resolve+4;
4820 + return 3;
4821 + }
4822 + } while (0);
4823 +#endif
4824 +
4825 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826 + unsigned int sethi, ba, nop;
4827 +
4828 + err = get_user(sethi, (unsigned int *)regs->tpc);
4829 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831 +
4832 + if (err)
4833 + break;
4834 +
4835 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836 + (ba & 0xFFF00000U) == 0x30600000U &&
4837 + nop == 0x01000000U)
4838 + {
4839 + unsigned long addr;
4840 +
4841 + addr = (sethi & 0x003FFFFFU) << 10;
4842 + regs->u_regs[UREG_G1] = addr;
4843 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844 +
4845 + if (test_thread_flag(TIF_32BIT))
4846 + addr &= 0xFFFFFFFFUL;
4847 +
4848 + regs->tpc = addr;
4849 + regs->tnpc = addr+4;
4850 + return 2;
4851 + }
4852 + } while (0);
4853 +
4854 +#endif
4855 +
4856 + return 1;
4857 +}
4858 +
4859 +void pax_report_insns(void *pc, void *sp)
4860 +{
4861 + unsigned long i;
4862 +
4863 + printk(KERN_ERR "PAX: bytes at PC: ");
4864 + for (i = 0; i < 8; i++) {
4865 + unsigned int c;
4866 + if (get_user(c, (unsigned int *)pc+i))
4867 + printk(KERN_CONT "???????? ");
4868 + else
4869 + printk(KERN_CONT "%08x ", c);
4870 + }
4871 + printk("\n");
4872 +}
4873 +#endif
4874 +
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882 +#ifdef CONFIG_PAX_PAGEEXEC
4883 + /* PaX: detect ITLB misses on non-exec pages */
4884 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886 + {
4887 + if (address != regs->tpc)
4888 + goto good_area;
4889 +
4890 + up_read(&mm->mmap_sem);
4891 + switch (pax_handle_fetch_fault(regs)) {
4892 +
4893 +#ifdef CONFIG_PAX_EMUPLT
4894 + case 2:
4895 + case 3:
4896 + return;
4897 +#endif
4898 +
4899 + }
4900 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901 + do_group_exit(SIGKILL);
4902 + }
4903 +#endif
4904 +
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908 diff -urNp linux-3.0.3/arch/sparc/mm/hugetlbpage.c linux-3.0.3/arch/sparc/mm/hugetlbpage.c
4909 --- linux-3.0.3/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910 +++ linux-3.0.3/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911 @@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915 - if (likely(!vma || addr + len <= vma->vm_start)) {
4916 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924 - if (!vma || addr <= vma->vm_start) {
4925 + if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4934 + addr = mm->mmap_base - len;
4935
4936 do {
4937 + addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944 - if (likely(!vma || addr+len <= vma->vm_start)) {
4945 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953 - addr = (vma->vm_start-len) & HPAGE_MASK;
4954 - } while (likely(len < vma->vm_start));
4955 + addr = skip_heap_stack_gap(vma, len);
4956 + } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964 - if (task_size - len >= addr &&
4965 - (!vma || addr + len <= vma->vm_start))
4966 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970 diff -urNp linux-3.0.3/arch/sparc/mm/init_32.c linux-3.0.3/arch/sparc/mm/init_32.c
4971 --- linux-3.0.3/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972 +++ linux-3.0.3/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973 @@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979 +
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983 @@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987 - protection_map[1] = PAGE_READONLY;
4988 - protection_map[2] = PAGE_COPY;
4989 - protection_map[3] = PAGE_COPY;
4990 + protection_map[1] = PAGE_READONLY_NOEXEC;
4991 + protection_map[2] = PAGE_COPY_NOEXEC;
4992 + protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998 - protection_map[9] = PAGE_READONLY;
4999 - protection_map[10] = PAGE_SHARED;
5000 - protection_map[11] = PAGE_SHARED;
5001 + protection_map[9] = PAGE_READONLY_NOEXEC;
5002 + protection_map[10] = PAGE_SHARED_NOEXEC;
5003 + protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007 diff -urNp linux-3.0.3/arch/sparc/mm/Makefile linux-3.0.3/arch/sparc/mm/Makefile
5008 --- linux-3.0.3/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009 +++ linux-3.0.3/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010 @@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014 -ccflags-y := -Werror
5015 +#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019 diff -urNp linux-3.0.3/arch/sparc/mm/srmmu.c linux-3.0.3/arch/sparc/mm/srmmu.c
5020 --- linux-3.0.3/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021 +++ linux-3.0.3/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026 +
5027 +#ifdef CONFIG_PAX_PAGEEXEC
5028 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031 +#endif
5032 +
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036 diff -urNp linux-3.0.3/arch/um/include/asm/kmap_types.h linux-3.0.3/arch/um/include/asm/kmap_types.h
5037 --- linux-3.0.3/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038 +++ linux-3.0.3/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039 @@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043 + KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047 diff -urNp linux-3.0.3/arch/um/include/asm/page.h linux-3.0.3/arch/um/include/asm/page.h
5048 --- linux-3.0.3/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049 +++ linux-3.0.3/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050 @@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054 +#define ktla_ktva(addr) (addr)
5055 +#define ktva_ktla(addr) (addr)
5056 +
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060 diff -urNp linux-3.0.3/arch/um/kernel/process.c linux-3.0.3/arch/um/kernel/process.c
5061 --- linux-3.0.3/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062 +++ linux-3.0.3/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067 -/*
5068 - * Only x86 and x86_64 have an arch_align_stack().
5069 - * All other arches have "#define arch_align_stack(x) (x)"
5070 - * in their asm/system.h
5071 - * As this is included in UML from asm-um/system-generic.h,
5072 - * we can use it to behave as the subarch does.
5073 - */
5074 -#ifndef arch_align_stack
5075 -unsigned long arch_align_stack(unsigned long sp)
5076 -{
5077 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078 - sp -= get_random_int() % 8192;
5079 - return sp & ~0xf;
5080 -}
5081 -#endif
5082 -
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086 diff -urNp linux-3.0.3/arch/um/sys-i386/syscalls.c linux-3.0.3/arch/um/sys-i386/syscalls.c
5087 --- linux-3.0.3/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088 +++ linux-3.0.3/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089 @@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094 +{
5095 + unsigned long pax_task_size = TASK_SIZE;
5096 +
5097 +#ifdef CONFIG_PAX_SEGMEXEC
5098 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099 + pax_task_size = SEGMEXEC_TASK_SIZE;
5100 +#endif
5101 +
5102 + if (len > pax_task_size || addr > pax_task_size - len)
5103 + return -EINVAL;
5104 +
5105 + return 0;
5106 +}
5107 +
5108 /*
5109 * The prototype on i386 is:
5110 *
5111 diff -urNp linux-3.0.3/arch/x86/boot/bitops.h linux-3.0.3/arch/x86/boot/bitops.h
5112 --- linux-3.0.3/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113 +++ linux-3.0.3/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132 diff -urNp linux-3.0.3/arch/x86/boot/boot.h linux-3.0.3/arch/x86/boot/boot.h
5133 --- linux-3.0.3/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134 +++ linux-3.0.3/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139 - asm("movw %%ds,%0" : "=rm" (seg));
5140 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148 - asm("repe; cmpsb; setnz %0"
5149 + asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153 diff -urNp linux-3.0.3/arch/x86/boot/compressed/head_32.S linux-3.0.3/arch/x86/boot/compressed/head_32.S
5154 --- linux-3.0.3/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155 +++ linux-3.0.3/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160 - movl $LOAD_PHYSICAL_ADDR, %ebx
5161 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165 @@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169 - subl $LOAD_PHYSICAL_ADDR, %ebx
5170 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174 @@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178 - testl %ecx, %ecx
5179 - jz 2f
5180 + jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184 diff -urNp linux-3.0.3/arch/x86/boot/compressed/head_64.S linux-3.0.3/arch/x86/boot/compressed/head_64.S
5185 --- linux-3.0.3/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186 +++ linux-3.0.3/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191 - movl $LOAD_PHYSICAL_ADDR, %ebx
5192 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200 - movq $LOAD_PHYSICAL_ADDR, %rbp
5201 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205 diff -urNp linux-3.0.3/arch/x86/boot/compressed/Makefile linux-3.0.3/arch/x86/boot/compressed/Makefile
5206 --- linux-3.0.3/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207 +++ linux-3.0.3/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212 +ifdef CONSTIFY_PLUGIN
5213 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214 +endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218 diff -urNp linux-3.0.3/arch/x86/boot/compressed/misc.c linux-3.0.3/arch/x86/boot/compressed/misc.c
5219 --- linux-3.0.3/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220 +++ linux-3.0.3/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239 diff -urNp linux-3.0.3/arch/x86/boot/compressed/relocs.c linux-3.0.3/arch/x86/boot/compressed/relocs.c
5240 --- linux-3.0.3/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241 +++ linux-3.0.3/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242 @@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246 +#include "../../../../include/generated/autoconf.h"
5247 +
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250 +static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258 +static void read_phdrs(FILE *fp)
5259 +{
5260 + unsigned int i;
5261 +
5262 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263 + if (!phdr) {
5264 + die("Unable to allocate %d program headers\n",
5265 + ehdr.e_phnum);
5266 + }
5267 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268 + die("Seek to %d failed: %s\n",
5269 + ehdr.e_phoff, strerror(errno));
5270 + }
5271 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272 + die("Cannot read ELF program headers: %s\n",
5273 + strerror(errno));
5274 + }
5275 + for(i = 0; i < ehdr.e_phnum; i++) {
5276 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284 + }
5285 +
5286 +}
5287 +
5288 static void read_shdrs(FILE *fp)
5289 {
5290 - int i;
5291 + unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299 - int i;
5300 + unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308 - int i,j;
5309 + unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317 - int i,j;
5318 + unsigned int i,j;
5319 + uint32_t base;
5320 +
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328 + base = 0;
5329 + for (j = 0; j < ehdr.e_phnum; j++) {
5330 + if (phdr[j].p_type != PT_LOAD )
5331 + continue;
5332 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333 + continue;
5334 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335 + break;
5336 + }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5340 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348 - int i;
5349 + unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356 - int j;
5357 + unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365 - int i, printed = 0;
5366 + unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373 - int j;
5374 + unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382 - int i;
5383 + unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389 - int j;
5390 + unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400 + continue;
5401 +
5402 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405 + continue;
5406 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407 + continue;
5408 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409 + continue;
5410 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411 + continue;
5412 +#endif
5413 +
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421 - int i;
5422 + unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430 + read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434 diff -urNp linux-3.0.3/arch/x86/boot/cpucheck.c linux-3.0.3/arch/x86/boot/cpucheck.c
5435 --- linux-3.0.3/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436 +++ linux-3.0.3/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437 @@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441 - asm("movl %%cr0,%0" : "=r" (cr0));
5442 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450 - asm("pushfl ; "
5451 + asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455 @@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459 - asm("cpuid"
5460 + asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464 @@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468 - asm("cpuid"
5469 + asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473 @@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477 - asm("cpuid"
5478 + asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482 @@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486 - asm("cpuid"
5487 + asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521 - asm("cpuid"
5522 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524 + asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532 diff -urNp linux-3.0.3/arch/x86/boot/header.S linux-3.0.3/arch/x86/boot/header.S
5533 --- linux-3.0.3/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534 +++ linux-3.0.3/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544 diff -urNp linux-3.0.3/arch/x86/boot/Makefile linux-3.0.3/arch/x86/boot/Makefile
5545 --- linux-3.0.3/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546 +++ linux-3.0.3/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551 +ifdef CONSTIFY_PLUGIN
5552 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553 +endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557 diff -urNp linux-3.0.3/arch/x86/boot/memory.c linux-3.0.3/arch/x86/boot/memory.c
5558 --- linux-3.0.3/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559 +++ linux-3.0.3/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560 @@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564 - int count = 0;
5565 + unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569 diff -urNp linux-3.0.3/arch/x86/boot/video.c linux-3.0.3/arch/x86/boot/video.c
5570 --- linux-3.0.3/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571 +++ linux-3.0.3/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576 - int i, len = 0;
5577 + unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581 diff -urNp linux-3.0.3/arch/x86/boot/video-vesa.c linux-3.0.3/arch/x86/boot/video-vesa.c
5582 --- linux-3.0.3/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583 +++ linux-3.0.3/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588 + boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592 diff -urNp linux-3.0.3/arch/x86/ia32/ia32_aout.c linux-3.0.3/arch/x86/ia32/ia32_aout.c
5593 --- linux-3.0.3/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5594 +++ linux-3.0.3/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5595 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5596 unsigned long dump_start, dump_size;
5597 struct user32 dump;
5598
5599 + memset(&dump, 0, sizeof(dump));
5600 +
5601 fs = get_fs();
5602 set_fs(KERNEL_DS);
5603 has_dumped = 1;
5604 diff -urNp linux-3.0.3/arch/x86/ia32/ia32entry.S linux-3.0.3/arch/x86/ia32/ia32entry.S
5605 --- linux-3.0.3/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5606 +++ linux-3.0.3/arch/x86/ia32/ia32entry.S 2011-08-23 21:48:14.000000000 -0400
5607 @@ -13,6 +13,7 @@
5608 #include <asm/thread_info.h>
5609 #include <asm/segment.h>
5610 #include <asm/irqflags.h>
5611 +#include <asm/pgtable.h>
5612 #include <linux/linkage.h>
5613
5614 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5615 @@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
5616 ENDPROC(native_irq_enable_sysexit)
5617 #endif
5618
5619 + .macro pax_enter_kernel_user
5620 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5621 + call pax_enter_kernel_user
5622 +#endif
5623 + .endm
5624 +
5625 + .macro pax_exit_kernel_user
5626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5627 + call pax_exit_kernel_user
5628 +#endif
5629 +#ifdef CONFIG_PAX_RANDKSTACK
5630 + pushq %rax
5631 + call pax_randomize_kstack
5632 + popq %rax
5633 +#endif
5634 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5635 + call pax_erase_kstack
5636 +#endif
5637 + .endm
5638 +
5639 + .macro pax_erase_kstack
5640 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5641 + call pax_erase_kstack
5642 +#endif
5643 + .endm
5644 +
5645 /*
5646 * 32bit SYSENTER instruction entry.
5647 *
5648 @@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target)
5649 CFI_REGISTER rsp,rbp
5650 SWAPGS_UNSAFE_STACK
5651 movq PER_CPU_VAR(kernel_stack), %rsp
5652 - addq $(KERNEL_STACK_OFFSET),%rsp
5653 + pax_enter_kernel_user
5654 /*
5655 * No need to follow this irqs on/off section: the syscall
5656 * disabled irqs, here we enable it straight after entry:
5657 @@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target)
5658 CFI_REL_OFFSET rsp,0
5659 pushfq_cfi
5660 /*CFI_REL_OFFSET rflags,0*/
5661 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5662 + GET_THREAD_INFO(%r10)
5663 + movl TI_sysenter_return(%r10), %r10d
5664 CFI_REGISTER rip,r10
5665 pushq_cfi $__USER32_CS
5666 /*CFI_REL_OFFSET cs,0*/
5667 @@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target)
5668 SAVE_ARGS 0,0,1
5669 /* no need to do an access_ok check here because rbp has been
5670 32bit zero extended */
5671 +
5672 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5673 + mov $PAX_USER_SHADOW_BASE,%r10
5674 + add %r10,%rbp
5675 +#endif
5676 +
5677 1: movl (%rbp),%ebp
5678 .section __ex_table,"a"
5679 .quad 1b,ia32_badarg
5680 @@ -168,6 +202,7 @@ sysenter_dispatch:
5681 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5682 jnz sysexit_audit
5683 sysexit_from_sys_call:
5684 + pax_exit_kernel_user
5685 andl $~TS_COMPAT,TI_status(%r10)
5686 /* clear IF, that popfq doesn't enable interrupts early */
5687 andl $~0x200,EFLAGS-R11(%rsp)
5688 @@ -194,6 +229,9 @@ sysexit_from_sys_call:
5689 movl %eax,%esi /* 2nd arg: syscall number */
5690 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5691 call audit_syscall_entry
5692 +
5693 + pax_erase_kstack
5694 +
5695 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5696 cmpq $(IA32_NR_syscalls-1),%rax
5697 ja ia32_badsys
5698 @@ -246,6 +284,9 @@ sysenter_tracesys:
5699 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5700 movq %rsp,%rdi /* &pt_regs -> arg1 */
5701 call syscall_trace_enter
5702 +
5703 + pax_erase_kstack
5704 +
5705 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5706 RESTORE_REST
5707 cmpq $(IA32_NR_syscalls-1),%rax
5708 @@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target)
5709 ENTRY(ia32_cstar_target)
5710 CFI_STARTPROC32 simple
5711 CFI_SIGNAL_FRAME
5712 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5713 + CFI_DEF_CFA rsp,0
5714 CFI_REGISTER rip,rcx
5715 /*CFI_REGISTER rflags,r11*/
5716 SWAPGS_UNSAFE_STACK
5717 movl %esp,%r8d
5718 CFI_REGISTER rsp,r8
5719 movq PER_CPU_VAR(kernel_stack),%rsp
5720 +
5721 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5722 + pax_enter_kernel_user
5723 +#endif
5724 +
5725 /*
5726 * No need to follow this irqs on/off section: the syscall
5727 * disabled irqs and here we enable it straight after entry:
5728 */
5729 ENABLE_INTERRUPTS(CLBR_NONE)
5730 - SAVE_ARGS 8,1,1
5731 + SAVE_ARGS 8*6,1,1
5732 movl %eax,%eax /* zero extension */
5733 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5734 movq %rcx,RIP-ARGOFFSET(%rsp)
5735 @@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target)
5736 /* no need to do an access_ok check here because r8 has been
5737 32bit zero extended */
5738 /* hardware stack frame is complete now */
5739 +
5740 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5741 + mov $PAX_USER_SHADOW_BASE,%r10
5742 + add %r10,%r8
5743 +#endif
5744 +
5745 1: movl (%r8),%r9d
5746 .section __ex_table,"a"
5747 .quad 1b,ia32_badarg
5748 @@ -327,6 +379,7 @@ cstar_dispatch:
5749 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5750 jnz sysretl_audit
5751 sysretl_from_sys_call:
5752 + pax_exit_kernel_user
5753 andl $~TS_COMPAT,TI_status(%r10)
5754 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5755 movl RIP-ARGOFFSET(%rsp),%ecx
5756 @@ -364,6 +417,9 @@ cstar_tracesys:
5757 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5758 movq %rsp,%rdi /* &pt_regs -> arg1 */
5759 call syscall_trace_enter
5760 +
5761 + pax_erase_kstack
5762 +
5763 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5764 RESTORE_REST
5765 xchgl %ebp,%r9d
5766 @@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5767 CFI_REL_OFFSET rip,RIP-RIP
5768 PARAVIRT_ADJUST_EXCEPTION_FRAME
5769 SWAPGS
5770 + pax_enter_kernel_user
5771 /*
5772 * No need to follow this irqs on/off section: the syscall
5773 * disabled irqs and here we enable it straight after entry:
5774 @@ -441,6 +498,9 @@ ia32_tracesys:
5775 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5776 movq %rsp,%rdi /* &pt_regs -> arg1 */
5777 call syscall_trace_enter
5778 +
5779 + pax_erase_kstack
5780 +
5781 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5782 RESTORE_REST
5783 cmpq $(IA32_NR_syscalls-1),%rax
5784 diff -urNp linux-3.0.3/arch/x86/ia32/ia32_signal.c linux-3.0.3/arch/x86/ia32/ia32_signal.c
5785 --- linux-3.0.3/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5786 +++ linux-3.0.3/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5787 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5788 sp -= frame_size;
5789 /* Align the stack pointer according to the i386 ABI,
5790 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5791 - sp = ((sp + 4) & -16ul) - 4;
5792 + sp = ((sp - 12) & -16ul) - 4;
5793 return (void __user *) sp;
5794 }
5795
5796 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5797 * These are actually not used anymore, but left because some
5798 * gdb versions depend on them as a marker.
5799 */
5800 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5801 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5802 } put_user_catch(err);
5803
5804 if (err)
5805 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5806 0xb8,
5807 __NR_ia32_rt_sigreturn,
5808 0x80cd,
5809 - 0,
5810 + 0
5811 };
5812
5813 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5814 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5815
5816 if (ka->sa.sa_flags & SA_RESTORER)
5817 restorer = ka->sa.sa_restorer;
5818 + else if (current->mm->context.vdso)
5819 + /* Return stub is in 32bit vsyscall page */
5820 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5821 else
5822 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5823 - rt_sigreturn);
5824 + restorer = &frame->retcode;
5825 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5826
5827 /*
5828 * Not actually used anymore, but left because some gdb
5829 * versions need it.
5830 */
5831 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5832 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5833 } put_user_catch(err);
5834
5835 if (err)
5836 diff -urNp linux-3.0.3/arch/x86/include/asm/alternative.h linux-3.0.3/arch/x86/include/asm/alternative.h
5837 --- linux-3.0.3/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5838 +++ linux-3.0.3/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5839 @@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5840 ".section .discard,\"aw\",@progbits\n" \
5841 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5842 ".previous\n" \
5843 - ".section .altinstr_replacement, \"ax\"\n" \
5844 + ".section .altinstr_replacement, \"a\"\n" \
5845 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5846 ".previous"
5847
5848 diff -urNp linux-3.0.3/arch/x86/include/asm/apic.h linux-3.0.3/arch/x86/include/asm/apic.h
5849 --- linux-3.0.3/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5850 +++ linux-3.0.3/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5851 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5852
5853 #ifdef CONFIG_X86_LOCAL_APIC
5854
5855 -extern unsigned int apic_verbosity;
5856 +extern int apic_verbosity;
5857 extern int local_apic_timer_c2_ok;
5858
5859 extern int disable_apic;
5860 diff -urNp linux-3.0.3/arch/x86/include/asm/apm.h linux-3.0.3/arch/x86/include/asm/apm.h
5861 --- linux-3.0.3/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5862 +++ linux-3.0.3/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5863 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5864 __asm__ __volatile__(APM_DO_ZERO_SEGS
5865 "pushl %%edi\n\t"
5866 "pushl %%ebp\n\t"
5867 - "lcall *%%cs:apm_bios_entry\n\t"
5868 + "lcall *%%ss:apm_bios_entry\n\t"
5869 "setc %%al\n\t"
5870 "popl %%ebp\n\t"
5871 "popl %%edi\n\t"
5872 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5873 __asm__ __volatile__(APM_DO_ZERO_SEGS
5874 "pushl %%edi\n\t"
5875 "pushl %%ebp\n\t"
5876 - "lcall *%%cs:apm_bios_entry\n\t"
5877 + "lcall *%%ss:apm_bios_entry\n\t"
5878 "setc %%bl\n\t"
5879 "popl %%ebp\n\t"
5880 "popl %%edi\n\t"
5881 diff -urNp linux-3.0.3/arch/x86/include/asm/atomic64_32.h linux-3.0.3/arch/x86/include/asm/atomic64_32.h
5882 --- linux-3.0.3/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5883 +++ linux-3.0.3/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5884 @@ -12,6 +12,14 @@ typedef struct {
5885 u64 __aligned(8) counter;
5886 } atomic64_t;
5887
5888 +#ifdef CONFIG_PAX_REFCOUNT
5889 +typedef struct {
5890 + u64 __aligned(8) counter;
5891 +} atomic64_unchecked_t;
5892 +#else
5893 +typedef atomic64_t atomic64_unchecked_t;
5894 +#endif
5895 +
5896 #define ATOMIC64_INIT(val) { (val) }
5897
5898 #ifdef CONFIG_X86_CMPXCHG64
5899 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5900 }
5901
5902 /**
5903 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5904 + * @p: pointer to type atomic64_unchecked_t
5905 + * @o: expected value
5906 + * @n: new value
5907 + *
5908 + * Atomically sets @v to @n if it was equal to @o and returns
5909 + * the old value.
5910 + */
5911 +
5912 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5913 +{
5914 + return cmpxchg64(&v->counter, o, n);
5915 +}
5916 +
5917 +/**
5918 * atomic64_xchg - xchg atomic64 variable
5919 * @v: pointer to type atomic64_t
5920 * @n: value to assign
5921 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5922 }
5923
5924 /**
5925 + * atomic64_set_unchecked - set atomic64 variable
5926 + * @v: pointer to type atomic64_unchecked_t
5927 + * @n: value to assign
5928 + *
5929 + * Atomically sets the value of @v to @n.
5930 + */
5931 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5932 +{
5933 + unsigned high = (unsigned)(i >> 32);
5934 + unsigned low = (unsigned)i;
5935 + asm volatile(ATOMIC64_ALTERNATIVE(set)
5936 + : "+b" (low), "+c" (high)
5937 + : "S" (v)
5938 + : "eax", "edx", "memory"
5939 + );
5940 +}
5941 +
5942 +/**
5943 * atomic64_read - read atomic64 variable
5944 * @v: pointer to type atomic64_t
5945 *
5946 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5947 }
5948
5949 /**
5950 + * atomic64_read_unchecked - read atomic64 variable
5951 + * @v: pointer to type atomic64_unchecked_t
5952 + *
5953 + * Atomically reads the value of @v and returns it.
5954 + */
5955 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5956 +{
5957 + long long r;
5958 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5959 + : "=A" (r), "+c" (v)
5960 + : : "memory"
5961 + );
5962 + return r;
5963 + }
5964 +
5965 +/**
5966 * atomic64_add_return - add and return
5967 * @i: integer value to add
5968 * @v: pointer to type atomic64_t
5969 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5970 return i;
5971 }
5972
5973 +/**
5974 + * atomic64_add_return_unchecked - add and return
5975 + * @i: integer value to add
5976 + * @v: pointer to type atomic64_unchecked_t
5977 + *
5978 + * Atomically adds @i to @v and returns @i + *@v
5979 + */
5980 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5981 +{
5982 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
5983 + : "+A" (i), "+c" (v)
5984 + : : "memory"
5985 + );
5986 + return i;
5987 +}
5988 +
5989 /*
5990 * Other variants with different arithmetic operators:
5991 */
5992 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
5993 return a;
5994 }
5995
5996 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5997 +{
5998 + long long a;
5999 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6000 + : "=A" (a)
6001 + : "S" (v)
6002 + : "memory", "ecx"
6003 + );
6004 + return a;
6005 +}
6006 +
6007 static inline long long atomic64_dec_return(atomic64_t *v)
6008 {
6009 long long a;
6010 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6011 }
6012
6013 /**
6014 + * atomic64_add_unchecked - add integer to atomic64 variable
6015 + * @i: integer value to add
6016 + * @v: pointer to type atomic64_unchecked_t
6017 + *
6018 + * Atomically adds @i to @v.
6019 + */
6020 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6021 +{
6022 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6023 + : "+A" (i), "+c" (v)
6024 + : : "memory"
6025 + );
6026 + return i;
6027 +}
6028 +
6029 +/**
6030 * atomic64_sub - subtract the atomic64 variable
6031 * @i: integer value to subtract
6032 * @v: pointer to type atomic64_t
6033 diff -urNp linux-3.0.3/arch/x86/include/asm/atomic64_64.h linux-3.0.3/arch/x86/include/asm/atomic64_64.h
6034 --- linux-3.0.3/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6035 +++ linux-3.0.3/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6036 @@ -18,7 +18,19 @@
6037 */
6038 static inline long atomic64_read(const atomic64_t *v)
6039 {
6040 - return (*(volatile long *)&(v)->counter);
6041 + return (*(volatile const long *)&(v)->counter);
6042 +}
6043 +
6044 +/**
6045 + * atomic64_read_unchecked - read atomic64 variable
6046 + * @v: pointer of type atomic64_unchecked_t
6047 + *
6048 + * Atomically reads the value of @v.
6049 + * Doesn't imply a read memory barrier.
6050 + */
6051 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6052 +{
6053 + return (*(volatile const long *)&(v)->counter);
6054 }
6055
6056 /**
6057 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6058 }
6059
6060 /**
6061 + * atomic64_set_unchecked - set atomic64 variable
6062 + * @v: pointer to type atomic64_unchecked_t
6063 + * @i: required value
6064 + *
6065 + * Atomically sets the value of @v to @i.
6066 + */
6067 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6068 +{
6069 + v->counter = i;
6070 +}
6071 +
6072 +/**
6073 * atomic64_add - add integer to atomic64 variable
6074 * @i: integer value to add
6075 * @v: pointer to type atomic64_t
6076 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6077 */
6078 static inline void atomic64_add(long i, atomic64_t *v)
6079 {
6080 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6081 +
6082 +#ifdef CONFIG_PAX_REFCOUNT
6083 + "jno 0f\n"
6084 + LOCK_PREFIX "subq %1,%0\n"
6085 + "int $4\n0:\n"
6086 + _ASM_EXTABLE(0b, 0b)
6087 +#endif
6088 +
6089 + : "=m" (v->counter)
6090 + : "er" (i), "m" (v->counter));
6091 +}
6092 +
6093 +/**
6094 + * atomic64_add_unchecked - add integer to atomic64 variable
6095 + * @i: integer value to add
6096 + * @v: pointer to type atomic64_unchecked_t
6097 + *
6098 + * Atomically adds @i to @v.
6099 + */
6100 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6101 +{
6102 asm volatile(LOCK_PREFIX "addq %1,%0"
6103 : "=m" (v->counter)
6104 : "er" (i), "m" (v->counter));
6105 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6106 */
6107 static inline void atomic64_sub(long i, atomic64_t *v)
6108 {
6109 - asm volatile(LOCK_PREFIX "subq %1,%0"
6110 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6111 +
6112 +#ifdef CONFIG_PAX_REFCOUNT
6113 + "jno 0f\n"
6114 + LOCK_PREFIX "addq %1,%0\n"
6115 + "int $4\n0:\n"
6116 + _ASM_EXTABLE(0b, 0b)
6117 +#endif
6118 +
6119 + : "=m" (v->counter)
6120 + : "er" (i), "m" (v->counter));
6121 +}
6122 +
6123 +/**
6124 + * atomic64_sub_unchecked - subtract the atomic64 variable
6125 + * @i: integer value to subtract
6126 + * @v: pointer to type atomic64_unchecked_t
6127 + *
6128 + * Atomically subtracts @i from @v.
6129 + */
6130 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6131 +{
6132 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6133 : "=m" (v->counter)
6134 : "er" (i), "m" (v->counter));
6135 }
6136 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6137 {
6138 unsigned char c;
6139
6140 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6141 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6142 +
6143 +#ifdef CONFIG_PAX_REFCOUNT
6144 + "jno 0f\n"
6145 + LOCK_PREFIX "addq %2,%0\n"
6146 + "int $4\n0:\n"
6147 + _ASM_EXTABLE(0b, 0b)
6148 +#endif
6149 +
6150 + "sete %1\n"
6151 : "=m" (v->counter), "=qm" (c)
6152 : "er" (i), "m" (v->counter) : "memory");
6153 return c;
6154 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6155 */
6156 static inline void atomic64_inc(atomic64_t *v)
6157 {
6158 + asm volatile(LOCK_PREFIX "incq %0\n"
6159 +
6160 +#ifdef CONFIG_PAX_REFCOUNT
6161 + "jno 0f\n"
6162 + LOCK_PREFIX "decq %0\n"
6163 + "int $4\n0:\n"
6164 + _ASM_EXTABLE(0b, 0b)
6165 +#endif
6166 +
6167 + : "=m" (v->counter)
6168 + : "m" (v->counter));
6169 +}
6170 +
6171 +/**
6172 + * atomic64_inc_unchecked - increment atomic64 variable
6173 + * @v: pointer to type atomic64_unchecked_t
6174 + *
6175 + * Atomically increments @v by 1.
6176 + */
6177 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6178 +{
6179 asm volatile(LOCK_PREFIX "incq %0"
6180 : "=m" (v->counter)
6181 : "m" (v->counter));
6182 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6183 */
6184 static inline void atomic64_dec(atomic64_t *v)
6185 {
6186 - asm volatile(LOCK_PREFIX "decq %0"
6187 + asm volatile(LOCK_PREFIX "decq %0\n"
6188 +
6189 +#ifdef CONFIG_PAX_REFCOUNT
6190 + "jno 0f\n"
6191 + LOCK_PREFIX "incq %0\n"
6192 + "int $4\n0:\n"
6193 + _ASM_EXTABLE(0b, 0b)
6194 +#endif
6195 +
6196 + : "=m" (v->counter)
6197 + : "m" (v->counter));
6198 +}
6199 +
6200 +/**
6201 + * atomic64_dec_unchecked - decrement atomic64 variable
6202 + * @v: pointer to type atomic64_t
6203 + *
6204 + * Atomically decrements @v by 1.
6205 + */
6206 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6207 +{
6208 + asm volatile(LOCK_PREFIX "decq %0\n"
6209 : "=m" (v->counter)
6210 : "m" (v->counter));
6211 }
6212 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6213 {
6214 unsigned char c;
6215
6216 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6217 + asm volatile(LOCK_PREFIX "decq %0\n"
6218 +
6219 +#ifdef CONFIG_PAX_REFCOUNT
6220 + "jno 0f\n"
6221 + LOCK_PREFIX "incq %0\n"
6222 + "int $4\n0:\n"
6223 + _ASM_EXTABLE(0b, 0b)
6224 +#endif
6225 +
6226 + "sete %1\n"
6227 : "=m" (v->counter), "=qm" (c)
6228 : "m" (v->counter) : "memory");
6229 return c != 0;
6230 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6231 {
6232 unsigned char c;
6233
6234 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6235 + asm volatile(LOCK_PREFIX "incq %0\n"
6236 +
6237 +#ifdef CONFIG_PAX_REFCOUNT
6238 + "jno 0f\n"
6239 + LOCK_PREFIX "decq %0\n"
6240 + "int $4\n0:\n"
6241 + _ASM_EXTABLE(0b, 0b)
6242 +#endif
6243 +
6244 + "sete %1\n"
6245 : "=m" (v->counter), "=qm" (c)
6246 : "m" (v->counter) : "memory");
6247 return c != 0;
6248 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6249 {
6250 unsigned char c;
6251
6252 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6253 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6254 +
6255 +#ifdef CONFIG_PAX_REFCOUNT
6256 + "jno 0f\n"
6257 + LOCK_PREFIX "subq %2,%0\n"
6258 + "int $4\n0:\n"
6259 + _ASM_EXTABLE(0b, 0b)
6260 +#endif
6261 +
6262 + "sets %1\n"
6263 : "=m" (v->counter), "=qm" (c)
6264 : "er" (i), "m" (v->counter) : "memory");
6265 return c;
6266 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6267 static inline long atomic64_add_return(long i, atomic64_t *v)
6268 {
6269 long __i = i;
6270 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6271 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6272 +
6273 +#ifdef CONFIG_PAX_REFCOUNT
6274 + "jno 0f\n"
6275 + "movq %0, %1\n"
6276 + "int $4\n0:\n"
6277 + _ASM_EXTABLE(0b, 0b)
6278 +#endif
6279 +
6280 + : "+r" (i), "+m" (v->counter)
6281 + : : "memory");
6282 + return i + __i;
6283 +}
6284 +
6285 +/**
6286 + * atomic64_add_return_unchecked - add and return
6287 + * @i: integer value to add
6288 + * @v: pointer to type atomic64_unchecked_t
6289 + *
6290 + * Atomically adds @i to @v and returns @i + @v
6291 + */
6292 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6293 +{
6294 + long __i = i;
6295 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6296 : "+r" (i), "+m" (v->counter)
6297 : : "memory");
6298 return i + __i;
6299 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6300 }
6301
6302 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6303 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6304 +{
6305 + return atomic64_add_return_unchecked(1, v);
6306 +}
6307 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6308
6309 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6310 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6311 return cmpxchg(&v->counter, old, new);
6312 }
6313
6314 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6315 +{
6316 + return cmpxchg(&v->counter, old, new);
6317 +}
6318 +
6319 static inline long atomic64_xchg(atomic64_t *v, long new)
6320 {
6321 return xchg(&v->counter, new);
6322 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6323 */
6324 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6325 {
6326 - long c, old;
6327 + long c, old, new;
6328 c = atomic64_read(v);
6329 for (;;) {
6330 - if (unlikely(c == (u)))
6331 + if (unlikely(c == u))
6332 break;
6333 - old = atomic64_cmpxchg((v), c, c + (a));
6334 +
6335 + asm volatile("add %2,%0\n"
6336 +
6337 +#ifdef CONFIG_PAX_REFCOUNT
6338 + "jno 0f\n"
6339 + "sub %2,%0\n"
6340 + "int $4\n0:\n"
6341 + _ASM_EXTABLE(0b, 0b)
6342 +#endif
6343 +
6344 + : "=r" (new)
6345 + : "0" (c), "ir" (a));
6346 +
6347 + old = atomic64_cmpxchg(v, c, new);
6348 if (likely(old == c))
6349 break;
6350 c = old;
6351 }
6352 - return c != (u);
6353 + return c != u;
6354 }
6355
6356 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6357 diff -urNp linux-3.0.3/arch/x86/include/asm/atomic.h linux-3.0.3/arch/x86/include/asm/atomic.h
6358 --- linux-3.0.3/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6359 +++ linux-3.0.3/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6360 @@ -22,7 +22,18 @@
6361 */
6362 static inline int atomic_read(const atomic_t *v)
6363 {
6364 - return (*(volatile int *)&(v)->counter);
6365 + return (*(volatile const int *)&(v)->counter);
6366 +}
6367 +
6368 +/**
6369 + * atomic_read_unchecked - read atomic variable
6370 + * @v: pointer of type atomic_unchecked_t
6371 + *
6372 + * Atomically reads the value of @v.
6373 + */
6374 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6375 +{
6376 + return (*(volatile const int *)&(v)->counter);
6377 }
6378
6379 /**
6380 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6381 }
6382
6383 /**
6384 + * atomic_set_unchecked - set atomic variable
6385 + * @v: pointer of type atomic_unchecked_t
6386 + * @i: required value
6387 + *
6388 + * Atomically sets the value of @v to @i.
6389 + */
6390 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6391 +{
6392 + v->counter = i;
6393 +}
6394 +
6395 +/**
6396 * atomic_add - add integer to atomic variable
6397 * @i: integer value to add
6398 * @v: pointer of type atomic_t
6399 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6400 */
6401 static inline void atomic_add(int i, atomic_t *v)
6402 {
6403 - asm volatile(LOCK_PREFIX "addl %1,%0"
6404 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6405 +
6406 +#ifdef CONFIG_PAX_REFCOUNT
6407 + "jno 0f\n"
6408 + LOCK_PREFIX "subl %1,%0\n"
6409 + "int $4\n0:\n"
6410 + _ASM_EXTABLE(0b, 0b)
6411 +#endif
6412 +
6413 + : "+m" (v->counter)
6414 + : "ir" (i));
6415 +}
6416 +
6417 +/**
6418 + * atomic_add_unchecked - add integer to atomic variable
6419 + * @i: integer value to add
6420 + * @v: pointer of type atomic_unchecked_t
6421 + *
6422 + * Atomically adds @i to @v.
6423 + */
6424 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6425 +{
6426 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6427 : "+m" (v->counter)
6428 : "ir" (i));
6429 }
6430 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6431 */
6432 static inline void atomic_sub(int i, atomic_t *v)
6433 {
6434 - asm volatile(LOCK_PREFIX "subl %1,%0"
6435 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6436 +
6437 +#ifdef CONFIG_PAX_REFCOUNT
6438 + "jno 0f\n"
6439 + LOCK_PREFIX "addl %1,%0\n"
6440 + "int $4\n0:\n"
6441 + _ASM_EXTABLE(0b, 0b)
6442 +#endif
6443 +
6444 + : "+m" (v->counter)
6445 + : "ir" (i));
6446 +}
6447 +
6448 +/**
6449 + * atomic_sub_unchecked - subtract integer from atomic variable
6450 + * @i: integer value to subtract
6451 + * @v: pointer of type atomic_unchecked_t
6452 + *
6453 + * Atomically subtracts @i from @v.
6454 + */
6455 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6456 +{
6457 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6458 : "+m" (v->counter)
6459 : "ir" (i));
6460 }
6461 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6462 {
6463 unsigned char c;
6464
6465 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6466 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6467 +
6468 +#ifdef CONFIG_PAX_REFCOUNT
6469 + "jno 0f\n"
6470 + LOCK_PREFIX "addl %2,%0\n"
6471 + "int $4\n0:\n"
6472 + _ASM_EXTABLE(0b, 0b)
6473 +#endif
6474 +
6475 + "sete %1\n"
6476 : "+m" (v->counter), "=qm" (c)
6477 : "ir" (i) : "memory");
6478 return c;
6479 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6480 */
6481 static inline void atomic_inc(atomic_t *v)
6482 {
6483 - asm volatile(LOCK_PREFIX "incl %0"
6484 + asm volatile(LOCK_PREFIX "incl %0\n"
6485 +
6486 +#ifdef CONFIG_PAX_REFCOUNT
6487 + "jno 0f\n"
6488 + LOCK_PREFIX "decl %0\n"
6489 + "int $4\n0:\n"
6490 + _ASM_EXTABLE(0b, 0b)
6491 +#endif
6492 +
6493 + : "+m" (v->counter));
6494 +}
6495 +
6496 +/**
6497 + * atomic_inc_unchecked - increment atomic variable
6498 + * @v: pointer of type atomic_unchecked_t
6499 + *
6500 + * Atomically increments @v by 1.
6501 + */
6502 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6503 +{
6504 + asm volatile(LOCK_PREFIX "incl %0\n"
6505 : "+m" (v->counter));
6506 }
6507
6508 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6509 */
6510 static inline void atomic_dec(atomic_t *v)
6511 {
6512 - asm volatile(LOCK_PREFIX "decl %0"
6513 + asm volatile(LOCK_PREFIX "decl %0\n"
6514 +
6515 +#ifdef CONFIG_PAX_REFCOUNT
6516 + "jno 0f\n"
6517 + LOCK_PREFIX "incl %0\n"
6518 + "int $4\n0:\n"
6519 + _ASM_EXTABLE(0b, 0b)
6520 +#endif
6521 +
6522 + : "+m" (v->counter));
6523 +}
6524 +
6525 +/**
6526 + * atomic_dec_unchecked - decrement atomic variable
6527 + * @v: pointer of type atomic_unchecked_t
6528 + *
6529 + * Atomically decrements @v by 1.
6530 + */
6531 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6532 +{
6533 + asm volatile(LOCK_PREFIX "decl %0\n"
6534 : "+m" (v->counter));
6535 }
6536
6537 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6538 {
6539 unsigned char c;
6540
6541 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6542 + asm volatile(LOCK_PREFIX "decl %0\n"
6543 +
6544 +#ifdef CONFIG_PAX_REFCOUNT
6545 + "jno 0f\n"
6546 + LOCK_PREFIX "incl %0\n"
6547 + "int $4\n0:\n"
6548 + _ASM_EXTABLE(0b, 0b)
6549 +#endif
6550 +
6551 + "sete %1\n"
6552 : "+m" (v->counter), "=qm" (c)
6553 : : "memory");
6554 return c != 0;
6555 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6556 {
6557 unsigned char c;
6558
6559 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6560 + asm volatile(LOCK_PREFIX "incl %0\n"
6561 +
6562 +#ifdef CONFIG_PAX_REFCOUNT
6563 + "jno 0f\n"
6564 + LOCK_PREFIX "decl %0\n"
6565 + "int $4\n0:\n"
6566 + _ASM_EXTABLE(0b, 0b)
6567 +#endif
6568 +
6569 + "sete %1\n"
6570 + : "+m" (v->counter), "=qm" (c)
6571 + : : "memory");
6572 + return c != 0;
6573 +}
6574 +
6575 +/**
6576 + * atomic_inc_and_test_unchecked - increment and test
6577 + * @v: pointer of type atomic_unchecked_t
6578 + *
6579 + * Atomically increments @v by 1
6580 + * and returns true if the result is zero, or false for all
6581 + * other cases.
6582 + */
6583 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6584 +{
6585 + unsigned char c;
6586 +
6587 + asm volatile(LOCK_PREFIX "incl %0\n"
6588 + "sete %1\n"
6589 : "+m" (v->counter), "=qm" (c)
6590 : : "memory");
6591 return c != 0;
6592 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6593 {
6594 unsigned char c;
6595
6596 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6597 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6598 +
6599 +#ifdef CONFIG_PAX_REFCOUNT
6600 + "jno 0f\n"
6601 + LOCK_PREFIX "subl %2,%0\n"
6602 + "int $4\n0:\n"
6603 + _ASM_EXTABLE(0b, 0b)
6604 +#endif
6605 +
6606 + "sets %1\n"
6607 : "+m" (v->counter), "=qm" (c)
6608 : "ir" (i) : "memory");
6609 return c;
6610 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6611 #endif
6612 /* Modern 486+ processor */
6613 __i = i;
6614 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6615 +
6616 +#ifdef CONFIG_PAX_REFCOUNT
6617 + "jno 0f\n"
6618 + "movl %0, %1\n"
6619 + "int $4\n0:\n"
6620 + _ASM_EXTABLE(0b, 0b)
6621 +#endif
6622 +
6623 + : "+r" (i), "+m" (v->counter)
6624 + : : "memory");
6625 + return i + __i;
6626 +
6627 +#ifdef CONFIG_M386
6628 +no_xadd: /* Legacy 386 processor */
6629 + local_irq_save(flags);
6630 + __i = atomic_read(v);
6631 + atomic_set(v, i + __i);
6632 + local_irq_restore(flags);
6633 + return i + __i;
6634 +#endif
6635 +}
6636 +
6637 +/**
6638 + * atomic_add_return_unchecked - add integer and return
6639 + * @v: pointer of type atomic_unchecked_t
6640 + * @i: integer value to add
6641 + *
6642 + * Atomically adds @i to @v and returns @i + @v
6643 + */
6644 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6645 +{
6646 + int __i;
6647 +#ifdef CONFIG_M386
6648 + unsigned long flags;
6649 + if (unlikely(boot_cpu_data.x86 <= 3))
6650 + goto no_xadd;
6651 +#endif
6652 + /* Modern 486+ processor */
6653 + __i = i;
6654 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6655 : "+r" (i), "+m" (v->counter)
6656 : : "memory");
6657 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6658 }
6659
6660 #define atomic_inc_return(v) (atomic_add_return(1, v))
6661 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6662 +{
6663 + return atomic_add_return_unchecked(1, v);
6664 +}
6665 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6666
6667 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6668 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6669 return cmpxchg(&v->counter, old, new);
6670 }
6671
6672 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6673 +{
6674 + return cmpxchg(&v->counter, old, new);
6675 +}
6676 +
6677 static inline int atomic_xchg(atomic_t *v, int new)
6678 {
6679 return xchg(&v->counter, new);
6680 }
6681
6682 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6683 +{
6684 + return xchg(&v->counter, new);
6685 +}
6686 +
6687 /**
6688 * atomic_add_unless - add unless the number is already a given value
6689 * @v: pointer of type atomic_t
6690 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6691 */
6692 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6693 {
6694 - int c, old;
6695 + int c, old, new;
6696 c = atomic_read(v);
6697 for (;;) {
6698 - if (unlikely(c == (u)))
6699 + if (unlikely(c == u))
6700 break;
6701 - old = atomic_cmpxchg((v), c, c + (a));
6702 +
6703 + asm volatile("addl %2,%0\n"
6704 +
6705 +#ifdef CONFIG_PAX_REFCOUNT
6706 + "jno 0f\n"
6707 + "subl %2,%0\n"
6708 + "int $4\n0:\n"
6709 + _ASM_EXTABLE(0b, 0b)
6710 +#endif
6711 +
6712 + : "=r" (new)
6713 + : "0" (c), "ir" (a));
6714 +
6715 + old = atomic_cmpxchg(v, c, new);
6716 if (likely(old == c))
6717 break;
6718 c = old;
6719 }
6720 - return c != (u);
6721 + return c != u;
6722 }
6723
6724 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6725
6726 +/**
6727 + * atomic_inc_not_zero_hint - increment if not null
6728 + * @v: pointer of type atomic_t
6729 + * @hint: probable value of the atomic before the increment
6730 + *
6731 + * This version of atomic_inc_not_zero() gives a hint of probable
6732 + * value of the atomic. This helps processor to not read the memory
6733 + * before doing the atomic read/modify/write cycle, lowering
6734 + * number of bus transactions on some arches.
6735 + *
6736 + * Returns: 0 if increment was not done, 1 otherwise.
6737 + */
6738 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6739 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6740 +{
6741 + int val, c = hint, new;
6742 +
6743 + /* sanity test, should be removed by compiler if hint is a constant */
6744 + if (!hint)
6745 + return atomic_inc_not_zero(v);
6746 +
6747 + do {
6748 + asm volatile("incl %0\n"
6749 +
6750 +#ifdef CONFIG_PAX_REFCOUNT
6751 + "jno 0f\n"
6752 + "decl %0\n"
6753 + "int $4\n0:\n"
6754 + _ASM_EXTABLE(0b, 0b)
6755 +#endif
6756 +
6757 + : "=r" (new)
6758 + : "0" (c));
6759 +
6760 + val = atomic_cmpxchg(v, c, new);
6761 + if (val == c)
6762 + return 1;
6763 + c = val;
6764 + } while (c);
6765 +
6766 + return 0;
6767 +}
6768 +
6769 /*
6770 * atomic_dec_if_positive - decrement by 1 if old value positive
6771 * @v: pointer of type atomic_t
6772 diff -urNp linux-3.0.3/arch/x86/include/asm/bitops.h linux-3.0.3/arch/x86/include/asm/bitops.h
6773 --- linux-3.0.3/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6774 +++ linux-3.0.3/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6775 @@ -38,7 +38,7 @@
6776 * a mask operation on a byte.
6777 */
6778 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6779 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6780 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6781 #define CONST_MASK(nr) (1 << ((nr) & 7))
6782
6783 /**
6784 diff -urNp linux-3.0.3/arch/x86/include/asm/boot.h linux-3.0.3/arch/x86/include/asm/boot.h
6785 --- linux-3.0.3/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6786 +++ linux-3.0.3/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6787 @@ -11,10 +11,15 @@
6788 #include <asm/pgtable_types.h>
6789
6790 /* Physical address where kernel should be loaded. */
6791 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6792 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6793 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6794 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6795
6796 +#ifndef __ASSEMBLY__
6797 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6798 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6799 +#endif
6800 +
6801 /* Minimum kernel alignment, as a power of two */
6802 #ifdef CONFIG_X86_64
6803 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6804 diff -urNp linux-3.0.3/arch/x86/include/asm/cacheflush.h linux-3.0.3/arch/x86/include/asm/cacheflush.h
6805 --- linux-3.0.3/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6806 +++ linux-3.0.3/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6807 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6808 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6809
6810 if (pg_flags == _PGMT_DEFAULT)
6811 - return -1;
6812 + return ~0UL;
6813 else if (pg_flags == _PGMT_WC)
6814 return _PAGE_CACHE_WC;
6815 else if (pg_flags == _PGMT_UC_MINUS)
6816 diff -urNp linux-3.0.3/arch/x86/include/asm/cache.h linux-3.0.3/arch/x86/include/asm/cache.h
6817 --- linux-3.0.3/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6818 +++ linux-3.0.3/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6819 @@ -5,12 +5,13 @@
6820
6821 /* L1 cache line size */
6822 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6823 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6824 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6825
6826 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6827 +#define __read_only __attribute__((__section__(".data..read_only")))
6828
6829 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6830 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6831 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6832
6833 #ifdef CONFIG_X86_VSMP
6834 #ifdef CONFIG_SMP
6835 diff -urNp linux-3.0.3/arch/x86/include/asm/checksum_32.h linux-3.0.3/arch/x86/include/asm/checksum_32.h
6836 --- linux-3.0.3/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6837 +++ linux-3.0.3/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6838 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6839 int len, __wsum sum,
6840 int *src_err_ptr, int *dst_err_ptr);
6841
6842 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6843 + int len, __wsum sum,
6844 + int *src_err_ptr, int *dst_err_ptr);
6845 +
6846 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6847 + int len, __wsum sum,
6848 + int *src_err_ptr, int *dst_err_ptr);
6849 +
6850 /*
6851 * Note: when you get a NULL pointer exception here this means someone
6852 * passed in an incorrect kernel address to one of these functions.
6853 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6854 int *err_ptr)
6855 {
6856 might_sleep();
6857 - return csum_partial_copy_generic((__force void *)src, dst,
6858 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6859 len, sum, err_ptr, NULL);
6860 }
6861
6862 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6863 {
6864 might_sleep();
6865 if (access_ok(VERIFY_WRITE, dst, len))
6866 - return csum_partial_copy_generic(src, (__force void *)dst,
6867 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6868 len, sum, NULL, err_ptr);
6869
6870 if (len)
6871 diff -urNp linux-3.0.3/arch/x86/include/asm/cpufeature.h linux-3.0.3/arch/x86/include/asm/cpufeature.h
6872 --- linux-3.0.3/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6873 +++ linux-3.0.3/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6874 @@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6875 ".section .discard,\"aw\",@progbits\n"
6876 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6877 ".previous\n"
6878 - ".section .altinstr_replacement,\"ax\"\n"
6879 + ".section .altinstr_replacement,\"a\"\n"
6880 "3: movb $1,%0\n"
6881 "4:\n"
6882 ".previous\n"
6883 diff -urNp linux-3.0.3/arch/x86/include/asm/desc_defs.h linux-3.0.3/arch/x86/include/asm/desc_defs.h
6884 --- linux-3.0.3/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6885 +++ linux-3.0.3/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6886 @@ -31,6 +31,12 @@ struct desc_struct {
6887 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6888 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6889 };
6890 + struct {
6891 + u16 offset_low;
6892 + u16 seg;
6893 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6894 + unsigned offset_high: 16;
6895 + } gate;
6896 };
6897 } __attribute__((packed));
6898
6899 diff -urNp linux-3.0.3/arch/x86/include/asm/desc.h linux-3.0.3/arch/x86/include/asm/desc.h
6900 --- linux-3.0.3/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6901 +++ linux-3.0.3/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6902 @@ -4,6 +4,7 @@
6903 #include <asm/desc_defs.h>
6904 #include <asm/ldt.h>
6905 #include <asm/mmu.h>
6906 +#include <asm/pgtable.h>
6907
6908 #include <linux/smp.h>
6909
6910 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6911
6912 desc->type = (info->read_exec_only ^ 1) << 1;
6913 desc->type |= info->contents << 2;
6914 + desc->type |= info->seg_not_present ^ 1;
6915
6916 desc->s = 1;
6917 desc->dpl = 0x3;
6918 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6919 }
6920
6921 extern struct desc_ptr idt_descr;
6922 -extern gate_desc idt_table[];
6923 -
6924 -struct gdt_page {
6925 - struct desc_struct gdt[GDT_ENTRIES];
6926 -} __attribute__((aligned(PAGE_SIZE)));
6927 -
6928 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6929 +extern gate_desc idt_table[256];
6930
6931 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6932 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6933 {
6934 - return per_cpu(gdt_page, cpu).gdt;
6935 + return cpu_gdt_table[cpu];
6936 }
6937
6938 #ifdef CONFIG_X86_64
6939 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
6940 unsigned long base, unsigned dpl, unsigned flags,
6941 unsigned short seg)
6942 {
6943 - gate->a = (seg << 16) | (base & 0xffff);
6944 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6945 + gate->gate.offset_low = base;
6946 + gate->gate.seg = seg;
6947 + gate->gate.reserved = 0;
6948 + gate->gate.type = type;
6949 + gate->gate.s = 0;
6950 + gate->gate.dpl = dpl;
6951 + gate->gate.p = 1;
6952 + gate->gate.offset_high = base >> 16;
6953 }
6954
6955 #endif
6956 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
6957
6958 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
6959 {
6960 + pax_open_kernel();
6961 memcpy(&idt[entry], gate, sizeof(*gate));
6962 + pax_close_kernel();
6963 }
6964
6965 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
6966 {
6967 + pax_open_kernel();
6968 memcpy(&ldt[entry], desc, 8);
6969 + pax_close_kernel();
6970 }
6971
6972 static inline void
6973 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
6974 default: size = sizeof(*gdt); break;
6975 }
6976
6977 + pax_open_kernel();
6978 memcpy(&gdt[entry], desc, size);
6979 + pax_close_kernel();
6980 }
6981
6982 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
6983 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const
6984
6985 static inline void native_load_tr_desc(void)
6986 {
6987 + pax_open_kernel();
6988 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
6989 + pax_close_kernel();
6990 }
6991
6992 static inline void native_load_gdt(const struct desc_ptr *dtr)
6993 @@ -244,8 +255,10 @@ static inline void native_load_tls(struc
6994 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
6995 unsigned int i;
6996
6997 + pax_open_kernel();
6998 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
6999 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7000 + pax_close_kernel();
7001 }
7002
7003 #define _LDT_empty(info) \
7004 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7005 desc->limit = (limit >> 16) & 0xf;
7006 }
7007
7008 -static inline void _set_gate(int gate, unsigned type, void *addr,
7009 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7010 unsigned dpl, unsigned ist, unsigned seg)
7011 {
7012 gate_desc s;
7013 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7014 * Pentium F0 0F bugfix can have resulted in the mapped
7015 * IDT being write-protected.
7016 */
7017 -static inline void set_intr_gate(unsigned int n, void *addr)
7018 +static inline void set_intr_gate(unsigned int n, const void *addr)
7019 {
7020 BUG_ON((unsigned)n > 0xFF);
7021 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7022 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7023 /*
7024 * This routine sets up an interrupt gate at directory privilege level 3.
7025 */
7026 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7027 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7028 {
7029 BUG_ON((unsigned)n > 0xFF);
7030 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7031 }
7032
7033 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7034 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7035 {
7036 BUG_ON((unsigned)n > 0xFF);
7037 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7038 }
7039
7040 -static inline void set_trap_gate(unsigned int n, void *addr)
7041 +static inline void set_trap_gate(unsigned int n, const void *addr)
7042 {
7043 BUG_ON((unsigned)n > 0xFF);
7044 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7045 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7046 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7047 {
7048 BUG_ON((unsigned)n > 0xFF);
7049 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7050 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7051 }
7052
7053 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7054 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7055 {
7056 BUG_ON((unsigned)n > 0xFF);
7057 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7058 }
7059
7060 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7061 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7062 {
7063 BUG_ON((unsigned)n > 0xFF);
7064 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7065 }
7066
7067 +#ifdef CONFIG_X86_32
7068 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7069 +{
7070 + struct desc_struct d;
7071 +
7072 + if (likely(limit))
7073 + limit = (limit - 1UL) >> PAGE_SHIFT;
7074 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7075 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7076 +}
7077 +#endif
7078 +
7079 #endif /* _ASM_X86_DESC_H */
7080 diff -urNp linux-3.0.3/arch/x86/include/asm/e820.h linux-3.0.3/arch/x86/include/asm/e820.h
7081 --- linux-3.0.3/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7082 +++ linux-3.0.3/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7083 @@ -69,7 +69,7 @@ struct e820map {
7084 #define ISA_START_ADDRESS 0xa0000
7085 #define ISA_END_ADDRESS 0x100000
7086
7087 -#define BIOS_BEGIN 0x000a0000
7088 +#define BIOS_BEGIN 0x000c0000
7089 #define BIOS_END 0x00100000
7090
7091 #define BIOS_ROM_BASE 0xffe00000
7092 diff -urNp linux-3.0.3/arch/x86/include/asm/elf.h linux-3.0.3/arch/x86/include/asm/elf.h
7093 --- linux-3.0.3/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7094 +++ linux-3.0.3/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7095 @@ -237,7 +237,25 @@ extern int force_personality32;
7096 the loader. We need to make sure that it is out of the way of the program
7097 that it will "exec", and that there is sufficient room for the brk. */
7098
7099 +#ifdef CONFIG_PAX_SEGMEXEC
7100 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7101 +#else
7102 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7103 +#endif
7104 +
7105 +#ifdef CONFIG_PAX_ASLR
7106 +#ifdef CONFIG_X86_32
7107 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7108 +
7109 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7110 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7111 +#else
7112 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7113 +
7114 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7115 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7116 +#endif
7117 +#endif
7118
7119 /* This yields a mask that user programs can use to figure out what
7120 instruction set this CPU supports. This could be done in user space,
7121 @@ -290,9 +308,7 @@ do { \
7122
7123 #define ARCH_DLINFO \
7124 do { \
7125 - if (vdso_enabled) \
7126 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7127 - (unsigned long)current->mm->context.vdso); \
7128 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7129 } while (0)
7130
7131 #define AT_SYSINFO 32
7132 @@ -303,7 +319,7 @@ do { \
7133
7134 #endif /* !CONFIG_X86_32 */
7135
7136 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7137 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7138
7139 #define VDSO_ENTRY \
7140 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7141 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7142 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7143 #define compat_arch_setup_additional_pages syscall32_setup_pages
7144
7145 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7146 -#define arch_randomize_brk arch_randomize_brk
7147 -
7148 #endif /* _ASM_X86_ELF_H */
7149 diff -urNp linux-3.0.3/arch/x86/include/asm/emergency-restart.h linux-3.0.3/arch/x86/include/asm/emergency-restart.h
7150 --- linux-3.0.3/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7151 +++ linux-3.0.3/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7152 @@ -15,6 +15,6 @@ enum reboot_type {
7153
7154 extern enum reboot_type reboot_type;
7155
7156 -extern void machine_emergency_restart(void);
7157 +extern void machine_emergency_restart(void) __noreturn;
7158
7159 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7160 diff -urNp linux-3.0.3/arch/x86/include/asm/futex.h linux-3.0.3/arch/x86/include/asm/futex.h
7161 --- linux-3.0.3/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7162 +++ linux-3.0.3/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7163 @@ -12,16 +12,18 @@
7164 #include <asm/system.h>
7165
7166 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7167 + typecheck(u32 *, uaddr); \
7168 asm volatile("1:\t" insn "\n" \
7169 "2:\t.section .fixup,\"ax\"\n" \
7170 "3:\tmov\t%3, %1\n" \
7171 "\tjmp\t2b\n" \
7172 "\t.previous\n" \
7173 _ASM_EXTABLE(1b, 3b) \
7174 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7175 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7176 : "i" (-EFAULT), "0" (oparg), "1" (0))
7177
7178 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7179 + typecheck(u32 *, uaddr); \
7180 asm volatile("1:\tmovl %2, %0\n" \
7181 "\tmovl\t%0, %3\n" \
7182 "\t" insn "\n" \
7183 @@ -34,7 +36,7 @@
7184 _ASM_EXTABLE(1b, 4b) \
7185 _ASM_EXTABLE(2b, 4b) \
7186 : "=&a" (oldval), "=&r" (ret), \
7187 - "+m" (*uaddr), "=&r" (tem) \
7188 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7189 : "r" (oparg), "i" (-EFAULT), "1" (0))
7190
7191 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7192 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7193
7194 switch (op) {
7195 case FUTEX_OP_SET:
7196 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7197 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7198 break;
7199 case FUTEX_OP_ADD:
7200 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7201 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7202 uaddr, oparg);
7203 break;
7204 case FUTEX_OP_OR:
7205 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7206 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7207 return -EFAULT;
7208
7209 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7210 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7211 "2:\t.section .fixup, \"ax\"\n"
7212 "3:\tmov %3, %0\n"
7213 "\tjmp 2b\n"
7214 "\t.previous\n"
7215 _ASM_EXTABLE(1b, 3b)
7216 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7217 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7218 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7219 : "memory"
7220 );
7221 diff -urNp linux-3.0.3/arch/x86/include/asm/hw_irq.h linux-3.0.3/arch/x86/include/asm/hw_irq.h
7222 --- linux-3.0.3/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7223 +++ linux-3.0.3/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7224 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7225 extern void enable_IO_APIC(void);
7226
7227 /* Statistics */
7228 -extern atomic_t irq_err_count;
7229 -extern atomic_t irq_mis_count;
7230 +extern atomic_unchecked_t irq_err_count;
7231 +extern atomic_unchecked_t irq_mis_count;
7232
7233 /* EISA */
7234 extern void eisa_set_level_irq(unsigned int irq);
7235 diff -urNp linux-3.0.3/arch/x86/include/asm/i387.h linux-3.0.3/arch/x86/include/asm/i387.h
7236 --- linux-3.0.3/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7237 +++ linux-3.0.3/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7238 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7239 {
7240 int err;
7241
7242 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7243 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7244 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7245 +#endif
7246 +
7247 /* See comment in fxsave() below. */
7248 #ifdef CONFIG_AS_FXSAVEQ
7249 asm volatile("1: fxrstorq %[fx]\n\t"
7250 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7251 {
7252 int err;
7253
7254 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7255 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7256 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7257 +#endif
7258 +
7259 /*
7260 * Clear the bytes not touched by the fxsave and reserved
7261 * for the SW usage.
7262 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7263 #endif /* CONFIG_X86_64 */
7264
7265 /* We need a safe address that is cheap to find and that is already
7266 - in L1 during context switch. The best choices are unfortunately
7267 - different for UP and SMP */
7268 -#ifdef CONFIG_SMP
7269 -#define safe_address (__per_cpu_offset[0])
7270 -#else
7271 -#define safe_address (kstat_cpu(0).cpustat.user)
7272 -#endif
7273 + in L1 during context switch. */
7274 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7275
7276 /*
7277 * These must be called with preempt disabled
7278 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7279 struct thread_info *me = current_thread_info();
7280 preempt_disable();
7281 if (me->status & TS_USEDFPU)
7282 - __save_init_fpu(me->task);
7283 + __save_init_fpu(current);
7284 else
7285 clts();
7286 }
7287 diff -urNp linux-3.0.3/arch/x86/include/asm/io.h linux-3.0.3/arch/x86/include/asm/io.h
7288 --- linux-3.0.3/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7289 +++ linux-3.0.3/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7290 @@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7291
7292 #include <linux/vmalloc.h>
7293
7294 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7295 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7296 +{
7297 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7298 +}
7299 +
7300 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7301 +{
7302 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7303 +}
7304 +
7305 /*
7306 * Convert a virtual cached pointer to an uncached pointer
7307 */
7308 diff -urNp linux-3.0.3/arch/x86/include/asm/irqflags.h linux-3.0.3/arch/x86/include/asm/irqflags.h
7309 --- linux-3.0.3/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7310 +++ linux-3.0.3/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7311 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7312 sti; \
7313 sysexit
7314
7315 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7316 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7317 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7318 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7319 +
7320 #else
7321 #define INTERRUPT_RETURN iret
7322 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7323 diff -urNp linux-3.0.3/arch/x86/include/asm/kprobes.h linux-3.0.3/arch/x86/include/asm/kprobes.h
7324 --- linux-3.0.3/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7325 +++ linux-3.0.3/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7326 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7327 #define RELATIVEJUMP_SIZE 5
7328 #define RELATIVECALL_OPCODE 0xe8
7329 #define RELATIVE_ADDR_SIZE 4
7330 -#define MAX_STACK_SIZE 64
7331 -#define MIN_STACK_SIZE(ADDR) \
7332 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7333 - THREAD_SIZE - (unsigned long)(ADDR))) \
7334 - ? (MAX_STACK_SIZE) \
7335 - : (((unsigned long)current_thread_info()) + \
7336 - THREAD_SIZE - (unsigned long)(ADDR)))
7337 +#define MAX_STACK_SIZE 64UL
7338 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7339
7340 #define flush_insn_slot(p) do { } while (0)
7341
7342 diff -urNp linux-3.0.3/arch/x86/include/asm/kvm_host.h linux-3.0.3/arch/x86/include/asm/kvm_host.h
7343 --- linux-3.0.3/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7344 +++ linux-3.0.3/arch/x86/include/asm/kvm_host.h 2011-08-23 21:47:55.000000000 -0400
7345 @@ -441,7 +441,7 @@ struct kvm_arch {
7346 unsigned int n_used_mmu_pages;
7347 unsigned int n_requested_mmu_pages;
7348 unsigned int n_max_mmu_pages;
7349 - atomic_t invlpg_counter;
7350 + atomic_unchecked_t invlpg_counter;
7351 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7352 /*
7353 * Hash table of struct kvm_mmu_page.
7354 @@ -618,7 +618,7 @@ struct kvm_x86_ops {
7355 struct x86_instruction_info *info,
7356 enum x86_intercept_stage stage);
7357
7358 - const struct trace_print_flags *exit_reasons_str;
7359 + const struct trace_print_flags * const exit_reasons_str;
7360 };
7361
7362 struct kvm_arch_async_pf {
7363 diff -urNp linux-3.0.3/arch/x86/include/asm/local.h linux-3.0.3/arch/x86/include/asm/local.h
7364 --- linux-3.0.3/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7365 +++ linux-3.0.3/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7366 @@ -18,26 +18,58 @@ typedef struct {
7367
7368 static inline void local_inc(local_t *l)
7369 {
7370 - asm volatile(_ASM_INC "%0"
7371 + asm volatile(_ASM_INC "%0\n"
7372 +
7373 +#ifdef CONFIG_PAX_REFCOUNT
7374 + "jno 0f\n"
7375 + _ASM_DEC "%0\n"
7376 + "int $4\n0:\n"
7377 + _ASM_EXTABLE(0b, 0b)
7378 +#endif
7379 +
7380 : "+m" (l->a.counter));
7381 }
7382
7383 static inline void local_dec(local_t *l)
7384 {
7385 - asm volatile(_ASM_DEC "%0"
7386 + asm volatile(_ASM_DEC "%0\n"
7387 +
7388 +#ifdef CONFIG_PAX_REFCOUNT
7389 + "jno 0f\n"
7390 + _ASM_INC "%0\n"
7391 + "int $4\n0:\n"
7392 + _ASM_EXTABLE(0b, 0b)
7393 +#endif
7394 +
7395 : "+m" (l->a.counter));
7396 }
7397
7398 static inline void local_add(long i, local_t *l)
7399 {
7400 - asm volatile(_ASM_ADD "%1,%0"
7401 + asm volatile(_ASM_ADD "%1,%0\n"
7402 +
7403 +#ifdef CONFIG_PAX_REFCOUNT
7404 + "jno 0f\n"
7405 + _ASM_SUB "%1,%0\n"
7406 + "int $4\n0:\n"
7407 + _ASM_EXTABLE(0b, 0b)
7408 +#endif
7409 +
7410 : "+m" (l->a.counter)
7411 : "ir" (i));
7412 }
7413
7414 static inline void local_sub(long i, local_t *l)
7415 {
7416 - asm volatile(_ASM_SUB "%1,%0"
7417 + asm volatile(_ASM_SUB "%1,%0\n"
7418 +
7419 +#ifdef CONFIG_PAX_REFCOUNT
7420 + "jno 0f\n"
7421 + _ASM_ADD "%1,%0\n"
7422 + "int $4\n0:\n"
7423 + _ASM_EXTABLE(0b, 0b)
7424 +#endif
7425 +
7426 : "+m" (l->a.counter)
7427 : "ir" (i));
7428 }
7429 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7430 {
7431 unsigned char c;
7432
7433 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7434 + asm volatile(_ASM_SUB "%2,%0\n"
7435 +
7436 +#ifdef CONFIG_PAX_REFCOUNT
7437 + "jno 0f\n"
7438 + _ASM_ADD "%2,%0\n"
7439 + "int $4\n0:\n"
7440 + _ASM_EXTABLE(0b, 0b)
7441 +#endif
7442 +
7443 + "sete %1\n"
7444 : "+m" (l->a.counter), "=qm" (c)
7445 : "ir" (i) : "memory");
7446 return c;
7447 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7448 {
7449 unsigned char c;
7450
7451 - asm volatile(_ASM_DEC "%0; sete %1"
7452 + asm volatile(_ASM_DEC "%0\n"
7453 +
7454 +#ifdef CONFIG_PAX_REFCOUNT
7455 + "jno 0f\n"
7456 + _ASM_INC "%0\n"
7457 + "int $4\n0:\n"
7458 + _ASM_EXTABLE(0b, 0b)
7459 +#endif
7460 +
7461 + "sete %1\n"
7462 : "+m" (l->a.counter), "=qm" (c)
7463 : : "memory");
7464 return c != 0;
7465 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7466 {
7467 unsigned char c;
7468
7469 - asm volatile(_ASM_INC "%0; sete %1"
7470 + asm volatile(_ASM_INC "%0\n"
7471 +
7472 +#ifdef CONFIG_PAX_REFCOUNT
7473 + "jno 0f\n"
7474 + _ASM_DEC "%0\n"
7475 + "int $4\n0:\n"
7476 + _ASM_EXTABLE(0b, 0b)
7477 +#endif
7478 +
7479 + "sete %1\n"
7480 : "+m" (l->a.counter), "=qm" (c)
7481 : : "memory");
7482 return c != 0;
7483 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7484 {
7485 unsigned char c;
7486
7487 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7488 + asm volatile(_ASM_ADD "%2,%0\n"
7489 +
7490 +#ifdef CONFIG_PAX_REFCOUNT
7491 + "jno 0f\n"
7492 + _ASM_SUB "%2,%0\n"
7493 + "int $4\n0:\n"
7494 + _ASM_EXTABLE(0b, 0b)
7495 +#endif
7496 +
7497 + "sets %1\n"
7498 : "+m" (l->a.counter), "=qm" (c)
7499 : "ir" (i) : "memory");
7500 return c;
7501 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7502 #endif
7503 /* Modern 486+ processor */
7504 __i = i;
7505 - asm volatile(_ASM_XADD "%0, %1;"
7506 + asm volatile(_ASM_XADD "%0, %1\n"
7507 +
7508 +#ifdef CONFIG_PAX_REFCOUNT
7509 + "jno 0f\n"
7510 + _ASM_MOV "%0,%1\n"
7511 + "int $4\n0:\n"
7512 + _ASM_EXTABLE(0b, 0b)
7513 +#endif
7514 +
7515 : "+r" (i), "+m" (l->a.counter)
7516 : : "memory");
7517 return i + __i;
7518 diff -urNp linux-3.0.3/arch/x86/include/asm/mman.h linux-3.0.3/arch/x86/include/asm/mman.h
7519 --- linux-3.0.3/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7520 +++ linux-3.0.3/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7521 @@ -5,4 +5,14 @@
7522
7523 #include <asm-generic/mman.h>
7524
7525 +#ifdef __KERNEL__
7526 +#ifndef __ASSEMBLY__
7527 +#ifdef CONFIG_X86_32
7528 +#define arch_mmap_check i386_mmap_check
7529 +int i386_mmap_check(unsigned long addr, unsigned long len,
7530 + unsigned long flags);
7531 +#endif
7532 +#endif
7533 +#endif
7534 +
7535 #endif /* _ASM_X86_MMAN_H */
7536 diff -urNp linux-3.0.3/arch/x86/include/asm/mmu_context.h linux-3.0.3/arch/x86/include/asm/mmu_context.h
7537 --- linux-3.0.3/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7538 +++ linux-3.0.3/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7539 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7540
7541 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7542 {
7543 +
7544 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7545 + unsigned int i;
7546 + pgd_t *pgd;
7547 +
7548 + pax_open_kernel();
7549 + pgd = get_cpu_pgd(smp_processor_id());
7550 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7551 + set_pgd_batched(pgd+i, native_make_pgd(0));
7552 + pax_close_kernel();
7553 +#endif
7554 +
7555 #ifdef CONFIG_SMP
7556 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7557 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7558 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7559 struct task_struct *tsk)
7560 {
7561 unsigned cpu = smp_processor_id();
7562 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7563 + int tlbstate = TLBSTATE_OK;
7564 +#endif
7565
7566 if (likely(prev != next)) {
7567 #ifdef CONFIG_SMP
7568 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7569 + tlbstate = percpu_read(cpu_tlbstate.state);
7570 +#endif
7571 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7572 percpu_write(cpu_tlbstate.active_mm, next);
7573 #endif
7574 cpumask_set_cpu(cpu, mm_cpumask(next));
7575
7576 /* Re-load page tables */
7577 +#ifdef CONFIG_PAX_PER_CPU_PGD
7578 + pax_open_kernel();
7579 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7580 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7581 + pax_close_kernel();
7582 + load_cr3(get_cpu_pgd(cpu));
7583 +#else
7584 load_cr3(next->pgd);
7585 +#endif
7586
7587 /* stop flush ipis for the previous mm */
7588 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7589 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7590 */
7591 if (unlikely(prev->context.ldt != next->context.ldt))
7592 load_LDT_nolock(&next->context);
7593 - }
7594 +
7595 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7596 + if (!(__supported_pte_mask & _PAGE_NX)) {
7597 + smp_mb__before_clear_bit();
7598 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7599 + smp_mb__after_clear_bit();
7600 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7601 + }
7602 +#endif
7603 +
7604 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7605 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7606 + prev->context.user_cs_limit != next->context.user_cs_limit))
7607 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7608 #ifdef CONFIG_SMP
7609 + else if (unlikely(tlbstate != TLBSTATE_OK))
7610 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7611 +#endif
7612 +#endif
7613 +
7614 + }
7615 else {
7616 +
7617 +#ifdef CONFIG_PAX_PER_CPU_PGD
7618 + pax_open_kernel();
7619 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7620 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7621 + pax_close_kernel();
7622 + load_cr3(get_cpu_pgd(cpu));
7623 +#endif
7624 +
7625 +#ifdef CONFIG_SMP
7626 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7627 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7628
7629 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7630 * tlb flush IPI delivery. We must reload CR3
7631 * to make sure to use no freed page tables.
7632 */
7633 +
7634 +#ifndef CONFIG_PAX_PER_CPU_PGD
7635 load_cr3(next->pgd);
7636 +#endif
7637 +
7638 load_LDT_nolock(&next->context);
7639 +
7640 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7641 + if (!(__supported_pte_mask & _PAGE_NX))
7642 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7643 +#endif
7644 +
7645 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7646 +#ifdef CONFIG_PAX_PAGEEXEC
7647 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7648 +#endif
7649 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7650 +#endif
7651 +
7652 }
7653 - }
7654 #endif
7655 + }
7656 }
7657
7658 #define activate_mm(prev, next) \
7659 diff -urNp linux-3.0.3/arch/x86/include/asm/mmu.h linux-3.0.3/arch/x86/include/asm/mmu.h
7660 --- linux-3.0.3/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7661 +++ linux-3.0.3/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7662 @@ -9,7 +9,7 @@
7663 * we put the segment information here.
7664 */
7665 typedef struct {
7666 - void *ldt;
7667 + struct desc_struct *ldt;
7668 int size;
7669
7670 #ifdef CONFIG_X86_64
7671 @@ -18,7 +18,19 @@ typedef struct {
7672 #endif
7673
7674 struct mutex lock;
7675 - void *vdso;
7676 + unsigned long vdso;
7677 +
7678 +#ifdef CONFIG_X86_32
7679 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7680 + unsigned long user_cs_base;
7681 + unsigned long user_cs_limit;
7682 +
7683 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7684 + cpumask_t cpu_user_cs_mask;
7685 +#endif
7686 +
7687 +#endif
7688 +#endif
7689 } mm_context_t;
7690
7691 #ifdef CONFIG_SMP
7692 diff -urNp linux-3.0.3/arch/x86/include/asm/module.h linux-3.0.3/arch/x86/include/asm/module.h
7693 --- linux-3.0.3/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7694 +++ linux-3.0.3/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7695 @@ -5,6 +5,7 @@
7696
7697 #ifdef CONFIG_X86_64
7698 /* X86_64 does not define MODULE_PROC_FAMILY */
7699 +#define MODULE_PROC_FAMILY ""
7700 #elif defined CONFIG_M386
7701 #define MODULE_PROC_FAMILY "386 "
7702 #elif defined CONFIG_M486
7703 @@ -59,8 +60,30 @@
7704 #error unknown processor family
7705 #endif
7706
7707 -#ifdef CONFIG_X86_32
7708 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7709 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7710 +#define MODULE_PAX_UDEREF "UDEREF "
7711 +#else
7712 +#define MODULE_PAX_UDEREF ""
7713 +#endif
7714 +
7715 +#ifdef CONFIG_PAX_KERNEXEC
7716 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7717 +#else
7718 +#define MODULE_PAX_KERNEXEC ""
7719 #endif
7720
7721 +#ifdef CONFIG_PAX_REFCOUNT
7722 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7723 +#else
7724 +#define MODULE_PAX_REFCOUNT ""
7725 +#endif
7726 +
7727 +#ifdef CONFIG_GRKERNSEC
7728 +#define MODULE_GRSEC "GRSECURITY "
7729 +#else
7730 +#define MODULE_GRSEC ""
7731 +#endif
7732 +
7733 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7734 +
7735 #endif /* _ASM_X86_MODULE_H */
7736 diff -urNp linux-3.0.3/arch/x86/include/asm/page_64_types.h linux-3.0.3/arch/x86/include/asm/page_64_types.h
7737 --- linux-3.0.3/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7738 +++ linux-3.0.3/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7739 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7740
7741 /* duplicated to the one in bootmem.h */
7742 extern unsigned long max_pfn;
7743 -extern unsigned long phys_base;
7744 +extern const unsigned long phys_base;
7745
7746 extern unsigned long __phys_addr(unsigned long);
7747 #define __phys_reloc_hide(x) (x)
7748 diff -urNp linux-3.0.3/arch/x86/include/asm/paravirt.h linux-3.0.3/arch/x86/include/asm/paravirt.h
7749 --- linux-3.0.3/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7750 +++ linux-3.0.3/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7751 @@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7752 val);
7753 }
7754
7755 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7756 +{
7757 + pgdval_t val = native_pgd_val(pgd);
7758 +
7759 + if (sizeof(pgdval_t) > sizeof(long))
7760 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7761 + val, (u64)val >> 32);
7762 + else
7763 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7764 + val);
7765 +}
7766 +
7767 static inline void pgd_clear(pgd_t *pgdp)
7768 {
7769 set_pgd(pgdp, __pgd(0));
7770 @@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7771 pv_mmu_ops.set_fixmap(idx, phys, flags);
7772 }
7773
7774 +#ifdef CONFIG_PAX_KERNEXEC
7775 +static inline unsigned long pax_open_kernel(void)
7776 +{
7777 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7778 +}
7779 +
7780 +static inline unsigned long pax_close_kernel(void)
7781 +{
7782 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7783 +}
7784 +#else
7785 +static inline unsigned long pax_open_kernel(void) { return 0; }
7786 +static inline unsigned long pax_close_kernel(void) { return 0; }
7787 +#endif
7788 +
7789 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7790
7791 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7792 @@ -955,7 +982,7 @@ extern void default_banner(void);
7793
7794 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7795 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7796 -#define PARA_INDIRECT(addr) *%cs:addr
7797 +#define PARA_INDIRECT(addr) *%ss:addr
7798 #endif
7799
7800 #define INTERRUPT_RETURN \
7801 @@ -1032,6 +1059,21 @@ extern void default_banner(void);
7802 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7803 CLBR_NONE, \
7804 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7805 +
7806 +#define GET_CR0_INTO_RDI \
7807 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7808 + mov %rax,%rdi
7809 +
7810 +#define SET_RDI_INTO_CR0 \
7811 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7812 +
7813 +#define GET_CR3_INTO_RDI \
7814 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7815 + mov %rax,%rdi
7816 +
7817 +#define SET_RDI_INTO_CR3 \
7818 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7819 +
7820 #endif /* CONFIG_X86_32 */
7821
7822 #endif /* __ASSEMBLY__ */
7823 diff -urNp linux-3.0.3/arch/x86/include/asm/paravirt_types.h linux-3.0.3/arch/x86/include/asm/paravirt_types.h
7824 --- linux-3.0.3/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7825 +++ linux-3.0.3/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7826 @@ -78,19 +78,19 @@ struct pv_init_ops {
7827 */
7828 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7829 unsigned long addr, unsigned len);
7830 -};
7831 +} __no_const;
7832
7833
7834 struct pv_lazy_ops {
7835 /* Set deferred update mode, used for batching operations. */
7836 void (*enter)(void);
7837 void (*leave)(void);
7838 -};
7839 +} __no_const;
7840
7841 struct pv_time_ops {
7842 unsigned long long (*sched_clock)(void);
7843 unsigned long (*get_tsc_khz)(void);
7844 -};
7845 +} __no_const;
7846
7847 struct pv_cpu_ops {
7848 /* hooks for various privileged instructions */
7849 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7850
7851 void (*start_context_switch)(struct task_struct *prev);
7852 void (*end_context_switch)(struct task_struct *next);
7853 -};
7854 +} __no_const;
7855
7856 struct pv_irq_ops {
7857 /*
7858 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7859 unsigned long start_eip,
7860 unsigned long start_esp);
7861 #endif
7862 -};
7863 +} __no_const;
7864
7865 struct pv_mmu_ops {
7866 unsigned long (*read_cr2)(void);
7867 @@ -306,6 +306,7 @@ struct pv_mmu_ops {
7868 struct paravirt_callee_save make_pud;
7869
7870 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7871 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7872 #endif /* PAGETABLE_LEVELS == 4 */
7873 #endif /* PAGETABLE_LEVELS >= 3 */
7874
7875 @@ -317,6 +318,12 @@ struct pv_mmu_ops {
7876 an mfn. We can tell which is which from the index. */
7877 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7878 phys_addr_t phys, pgprot_t flags);
7879 +
7880 +#ifdef CONFIG_PAX_KERNEXEC
7881 + unsigned long (*pax_open_kernel)(void);
7882 + unsigned long (*pax_close_kernel)(void);
7883 +#endif
7884 +
7885 };
7886
7887 struct arch_spinlock;
7888 @@ -327,7 +334,7 @@ struct pv_lock_ops {
7889 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7890 int (*spin_trylock)(struct arch_spinlock *lock);
7891 void (*spin_unlock)(struct arch_spinlock *lock);
7892 -};
7893 +} __no_const;
7894
7895 /* This contains all the paravirt structures: we get a convenient
7896 * number for each function using the offset which we use to indicate
7897 diff -urNp linux-3.0.3/arch/x86/include/asm/pgalloc.h linux-3.0.3/arch/x86/include/asm/pgalloc.h
7898 --- linux-3.0.3/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7899 +++ linux-3.0.3/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7900 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7901 pmd_t *pmd, pte_t *pte)
7902 {
7903 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7904 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7905 +}
7906 +
7907 +static inline void pmd_populate_user(struct mm_struct *mm,
7908 + pmd_t *pmd, pte_t *pte)
7909 +{
7910 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7911 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7912 }
7913
7914 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable-2level.h linux-3.0.3/arch/x86/include/asm/pgtable-2level.h
7915 --- linux-3.0.3/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7916 +++ linux-3.0.3/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7917 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7918
7919 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7920 {
7921 + pax_open_kernel();
7922 *pmdp = pmd;
7923 + pax_close_kernel();
7924 }
7925
7926 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7927 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_32.h linux-3.0.3/arch/x86/include/asm/pgtable_32.h
7928 --- linux-3.0.3/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7929 +++ linux-3.0.3/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7930 @@ -25,9 +25,6 @@
7931 struct mm_struct;
7932 struct vm_area_struct;
7933
7934 -extern pgd_t swapper_pg_dir[1024];
7935 -extern pgd_t initial_page_table[1024];
7936 -
7937 static inline void pgtable_cache_init(void) { }
7938 static inline void check_pgt_cache(void) { }
7939 void paging_init(void);
7940 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7941 # include <asm/pgtable-2level.h>
7942 #endif
7943
7944 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7945 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7946 +#ifdef CONFIG_X86_PAE
7947 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7948 +#endif
7949 +
7950 #if defined(CONFIG_HIGHPTE)
7951 #define pte_offset_map(dir, address) \
7952 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7953 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7954 /* Clear a kernel PTE and flush it from the TLB */
7955 #define kpte_clear_flush(ptep, vaddr) \
7956 do { \
7957 + pax_open_kernel(); \
7958 pte_clear(&init_mm, (vaddr), (ptep)); \
7959 + pax_close_kernel(); \
7960 __flush_tlb_one((vaddr)); \
7961 } while (0)
7962
7963 @@ -74,6 +79,9 @@ do { \
7964
7965 #endif /* !__ASSEMBLY__ */
7966
7967 +#define HAVE_ARCH_UNMAPPED_AREA
7968 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7969 +
7970 /*
7971 * kern_addr_valid() is (1) for FLATMEM and (0) for
7972 * SPARSEMEM and DISCONTIGMEM
7973 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h
7974 --- linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
7975 +++ linux-3.0.3/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
7976 @@ -8,7 +8,7 @@
7977 */
7978 #ifdef CONFIG_X86_PAE
7979 # include <asm/pgtable-3level_types.h>
7980 -# define PMD_SIZE (1UL << PMD_SHIFT)
7981 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7982 # define PMD_MASK (~(PMD_SIZE - 1))
7983 #else
7984 # include <asm/pgtable-2level_types.h>
7985 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7986 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7987 #endif
7988
7989 +#ifdef CONFIG_PAX_KERNEXEC
7990 +#ifndef __ASSEMBLY__
7991 +extern unsigned char MODULES_EXEC_VADDR[];
7992 +extern unsigned char MODULES_EXEC_END[];
7993 +#endif
7994 +#include <asm/boot.h>
7995 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7996 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7997 +#else
7998 +#define ktla_ktva(addr) (addr)
7999 +#define ktva_ktla(addr) (addr)
8000 +#endif
8001 +
8002 #define MODULES_VADDR VMALLOC_START
8003 #define MODULES_END VMALLOC_END
8004 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8005 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable-3level.h linux-3.0.3/arch/x86/include/asm/pgtable-3level.h
8006 --- linux-3.0.3/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8007 +++ linux-3.0.3/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8008 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8009
8010 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8011 {
8012 + pax_open_kernel();
8013 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8014 + pax_close_kernel();
8015 }
8016
8017 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8018 {
8019 + pax_open_kernel();
8020 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8021 + pax_close_kernel();
8022 }
8023
8024 /*
8025 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_64.h linux-3.0.3/arch/x86/include/asm/pgtable_64.h
8026 --- linux-3.0.3/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8027 +++ linux-3.0.3/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8028 @@ -16,10 +16,13 @@
8029
8030 extern pud_t level3_kernel_pgt[512];
8031 extern pud_t level3_ident_pgt[512];
8032 +extern pud_t level3_vmalloc_pgt[512];
8033 +extern pud_t level3_vmemmap_pgt[512];
8034 +extern pud_t level2_vmemmap_pgt[512];
8035 extern pmd_t level2_kernel_pgt[512];
8036 extern pmd_t level2_fixmap_pgt[512];
8037 -extern pmd_t level2_ident_pgt[512];
8038 -extern pgd_t init_level4_pgt[];
8039 +extern pmd_t level2_ident_pgt[512*2];
8040 +extern pgd_t init_level4_pgt[512];
8041
8042 #define swapper_pg_dir init_level4_pgt
8043
8044 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8045
8046 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8047 {
8048 + pax_open_kernel();
8049 *pmdp = pmd;
8050 + pax_close_kernel();
8051 }
8052
8053 static inline void native_pmd_clear(pmd_t *pmd)
8054 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8055
8056 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8057 {
8058 + pax_open_kernel();
8059 + *pgdp = pgd;
8060 + pax_close_kernel();
8061 +}
8062 +
8063 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8064 +{
8065 *pgdp = pgd;
8066 }
8067
8068 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h
8069 --- linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8070 +++ linux-3.0.3/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8071 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8072 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8073 #define MODULES_END _AC(0xffffffffff000000, UL)
8074 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8075 +#define MODULES_EXEC_VADDR MODULES_VADDR
8076 +#define MODULES_EXEC_END MODULES_END
8077 +
8078 +#define ktla_ktva(addr) (addr)
8079 +#define ktva_ktla(addr) (addr)
8080
8081 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8082 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable.h linux-3.0.3/arch/x86/include/asm/pgtable.h
8083 --- linux-3.0.3/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8084 +++ linux-3.0.3/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8085 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8086
8087 #ifndef __PAGETABLE_PUD_FOLDED
8088 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8089 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8090 #define pgd_clear(pgd) native_pgd_clear(pgd)
8091 #endif
8092
8093 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8094
8095 #define arch_end_context_switch(prev) do {} while(0)
8096
8097 +#define pax_open_kernel() native_pax_open_kernel()
8098 +#define pax_close_kernel() native_pax_close_kernel()
8099 #endif /* CONFIG_PARAVIRT */
8100
8101 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8102 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8103 +
8104 +#ifdef CONFIG_PAX_KERNEXEC
8105 +static inline unsigned long native_pax_open_kernel(void)
8106 +{
8107 + unsigned long cr0;
8108 +
8109 + preempt_disable();
8110 + barrier();
8111 + cr0 = read_cr0() ^ X86_CR0_WP;
8112 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8113 + write_cr0(cr0);
8114 + return cr0 ^ X86_CR0_WP;
8115 +}
8116 +
8117 +static inline unsigned long native_pax_close_kernel(void)
8118 +{
8119 + unsigned long cr0;
8120 +
8121 + cr0 = read_cr0() ^ X86_CR0_WP;
8122 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8123 + write_cr0(cr0);
8124 + barrier();
8125 + preempt_enable_no_resched();
8126 + return cr0 ^ X86_CR0_WP;
8127 +}
8128 +#else
8129 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8130 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8131 +#endif
8132 +
8133 /*
8134 * The following only work if pte_present() is true.
8135 * Undefined behaviour if not..
8136 */
8137 +static inline int pte_user(pte_t pte)
8138 +{
8139 + return pte_val(pte) & _PAGE_USER;
8140 +}
8141 +
8142 static inline int pte_dirty(pte_t pte)
8143 {
8144 return pte_flags(pte) & _PAGE_DIRTY;
8145 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8146 return pte_clear_flags(pte, _PAGE_RW);
8147 }
8148
8149 +static inline pte_t pte_mkread(pte_t pte)
8150 +{
8151 + return __pte(pte_val(pte) | _PAGE_USER);
8152 +}
8153 +
8154 static inline pte_t pte_mkexec(pte_t pte)
8155 {
8156 - return pte_clear_flags(pte, _PAGE_NX);
8157 +#ifdef CONFIG_X86_PAE
8158 + if (__supported_pte_mask & _PAGE_NX)
8159 + return pte_clear_flags(pte, _PAGE_NX);
8160 + else
8161 +#endif
8162 + return pte_set_flags(pte, _PAGE_USER);
8163 +}
8164 +
8165 +static inline pte_t pte_exprotect(pte_t pte)
8166 +{
8167 +#ifdef CONFIG_X86_PAE
8168 + if (__supported_pte_mask & _PAGE_NX)
8169 + return pte_set_flags(pte, _PAGE_NX);
8170 + else
8171 +#endif
8172 + return pte_clear_flags(pte, _PAGE_USER);
8173 }
8174
8175 static inline pte_t pte_mkdirty(pte_t pte)
8176 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8177 #endif
8178
8179 #ifndef __ASSEMBLY__
8180 +
8181 +#ifdef CONFIG_PAX_PER_CPU_PGD
8182 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8183 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8184 +{
8185 + return cpu_pgd[cpu];
8186 +}
8187 +#endif
8188 +
8189 #include <linux/mm_types.h>
8190
8191 static inline int pte_none(pte_t pte)
8192 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8193
8194 static inline int pgd_bad(pgd_t pgd)
8195 {
8196 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8197 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8198 }
8199
8200 static inline int pgd_none(pgd_t pgd)
8201 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8202 * pgd_offset() returns a (pgd_t *)
8203 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8204 */
8205 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8206 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8207 +
8208 +#ifdef CONFIG_PAX_PER_CPU_PGD
8209 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8210 +#endif
8211 +
8212 /*
8213 * a shortcut which implies the use of the kernel's pgd, instead
8214 * of a process's
8215 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8216 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8217 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8218
8219 +#ifdef CONFIG_X86_32
8220 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8221 +#else
8222 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8223 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8224 +
8225 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8226 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8227 +#else
8228 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8229 +#endif
8230 +
8231 +#endif
8232 +
8233 #ifndef __ASSEMBLY__
8234
8235 extern int direct_gbpages;
8236 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8237 * dst and src can be on the same page, but the range must not overlap,
8238 * and must not cross a page boundary.
8239 */
8240 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8241 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8242 {
8243 - memcpy(dst, src, count * sizeof(pgd_t));
8244 + pax_open_kernel();
8245 + while (count--)
8246 + *dst++ = *src++;
8247 + pax_close_kernel();
8248 }
8249
8250 +#ifdef CONFIG_PAX_PER_CPU_PGD
8251 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8252 +#endif
8253 +
8254 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8255 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8256 +#else
8257 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8258 +#endif
8259
8260 #include <asm-generic/pgtable.h>
8261 #endif /* __ASSEMBLY__ */
8262 diff -urNp linux-3.0.3/arch/x86/include/asm/pgtable_types.h linux-3.0.3/arch/x86/include/asm/pgtable_types.h
8263 --- linux-3.0.3/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8264 +++ linux-3.0.3/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8265 @@ -16,13 +16,12 @@
8266 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8267 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8268 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8269 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8270 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8271 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8272 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8273 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8274 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8275 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8276 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8277 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8278 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8279 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8280
8281 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8282 @@ -40,7 +39,6 @@
8283 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8284 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8285 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8286 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8287 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8288 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8289 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8290 @@ -57,8 +55,10 @@
8291
8292 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8293 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8294 -#else
8295 +#elif defined(CONFIG_KMEMCHECK)
8296 #define _PAGE_NX (_AT(pteval_t, 0))
8297 +#else
8298 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8299 #endif
8300
8301 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8302 @@ -96,6 +96,9 @@
8303 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8304 _PAGE_ACCESSED)
8305
8306 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8307 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8308 +
8309 #define __PAGE_KERNEL_EXEC \
8310 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8311 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8312 @@ -106,8 +109,8 @@
8313 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8314 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8315 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8316 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8317 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8318 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8319 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8320 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8321 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8322 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8323 @@ -166,8 +169,8 @@
8324 * bits are combined, this will alow user to access the high address mapped
8325 * VDSO in the presence of CONFIG_COMPAT_VDSO
8326 */
8327 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8328 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8329 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8330 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8331 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8332 #endif
8333
8334 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8335 {
8336 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8337 }
8338 +#endif
8339
8340 +#if PAGETABLE_LEVELS == 3
8341 +#include <asm-generic/pgtable-nopud.h>
8342 +#endif
8343 +
8344 +#if PAGETABLE_LEVELS == 2
8345 +#include <asm-generic/pgtable-nopmd.h>
8346 +#endif
8347 +
8348 +#ifndef __ASSEMBLY__
8349 #if PAGETABLE_LEVELS > 3
8350 typedef struct { pudval_t pud; } pud_t;
8351
8352 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8353 return pud.pud;
8354 }
8355 #else
8356 -#include <asm-generic/pgtable-nopud.h>
8357 -
8358 static inline pudval_t native_pud_val(pud_t pud)
8359 {
8360 return native_pgd_val(pud.pgd);
8361 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8362 return pmd.pmd;
8363 }
8364 #else
8365 -#include <asm-generic/pgtable-nopmd.h>
8366 -
8367 static inline pmdval_t native_pmd_val(pmd_t pmd)
8368 {
8369 return native_pgd_val(pmd.pud.pgd);
8370 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8371
8372 extern pteval_t __supported_pte_mask;
8373 extern void set_nx(void);
8374 -extern int nx_enabled;
8375
8376 #define pgprot_writecombine pgprot_writecombine
8377 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8378 diff -urNp linux-3.0.3/arch/x86/include/asm/processor.h linux-3.0.3/arch/x86/include/asm/processor.h
8379 --- linux-3.0.3/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8380 +++ linux-3.0.3/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8381 @@ -266,7 +266,7 @@ struct tss_struct {
8382
8383 } ____cacheline_aligned;
8384
8385 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8386 +extern struct tss_struct init_tss[NR_CPUS];
8387
8388 /*
8389 * Save the original ist values for checking stack pointers during debugging
8390 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8391 */
8392 #define TASK_SIZE PAGE_OFFSET
8393 #define TASK_SIZE_MAX TASK_SIZE
8394 +
8395 +#ifdef CONFIG_PAX_SEGMEXEC
8396 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8397 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8398 +#else
8399 #define STACK_TOP TASK_SIZE
8400 -#define STACK_TOP_MAX STACK_TOP
8401 +#endif
8402 +
8403 +#define STACK_TOP_MAX TASK_SIZE
8404
8405 #define INIT_THREAD { \
8406 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8407 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8408 .vm86_info = NULL, \
8409 .sysenter_cs = __KERNEL_CS, \
8410 .io_bitmap_ptr = NULL, \
8411 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8412 */
8413 #define INIT_TSS { \
8414 .x86_tss = { \
8415 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8416 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8417 .ss0 = __KERNEL_DS, \
8418 .ss1 = __KERNEL_CS, \
8419 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8420 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8421 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8422
8423 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8424 -#define KSTK_TOP(info) \
8425 -({ \
8426 - unsigned long *__ptr = (unsigned long *)(info); \
8427 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8428 -})
8429 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8430
8431 /*
8432 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8433 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8434 #define task_pt_regs(task) \
8435 ({ \
8436 struct pt_regs *__regs__; \
8437 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8438 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8439 __regs__ - 1; \
8440 })
8441
8442 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8443 /*
8444 * User space process size. 47bits minus one guard page.
8445 */
8446 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8447 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8448
8449 /* This decides where the kernel will search for a free chunk of vm
8450 * space during mmap's.
8451 */
8452 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8453 - 0xc0000000 : 0xFFFFe000)
8454 + 0xc0000000 : 0xFFFFf000)
8455
8456 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8457 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8458 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8459 #define STACK_TOP_MAX TASK_SIZE_MAX
8460
8461 #define INIT_THREAD { \
8462 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8463 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8464 }
8465
8466 #define INIT_TSS { \
8467 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8468 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8469 }
8470
8471 /*
8472 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8473 */
8474 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8475
8476 +#ifdef CONFIG_PAX_SEGMEXEC
8477 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8478 +#endif
8479 +
8480 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8481
8482 /* Get/set a process' ability to use the timestamp counter instruction */
8483 diff -urNp linux-3.0.3/arch/x86/include/asm/ptrace.h linux-3.0.3/arch/x86/include/asm/ptrace.h
8484 --- linux-3.0.3/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8485 +++ linux-3.0.3/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8486 @@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8487 }
8488
8489 /*
8490 - * user_mode_vm(regs) determines whether a register set came from user mode.
8491 + * user_mode(regs) determines whether a register set came from user mode.
8492 * This is true if V8086 mode was enabled OR if the register set was from
8493 * protected mode with RPL-3 CS value. This tricky test checks that with
8494 * one comparison. Many places in the kernel can bypass this full check
8495 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8496 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8497 + * be used.
8498 */
8499 -static inline int user_mode(struct pt_regs *regs)
8500 +static inline int user_mode_novm(struct pt_regs *regs)
8501 {
8502 #ifdef CONFIG_X86_32
8503 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8504 #else
8505 - return !!(regs->cs & 3);
8506 + return !!(regs->cs & SEGMENT_RPL_MASK);
8507 #endif
8508 }
8509
8510 -static inline int user_mode_vm(struct pt_regs *regs)
8511 +static inline int user_mode(struct pt_regs *regs)
8512 {
8513 #ifdef CONFIG_X86_32
8514 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8515 USER_RPL;
8516 #else
8517 - return user_mode(regs);
8518 + return user_mode_novm(regs);
8519 #endif
8520 }
8521
8522 diff -urNp linux-3.0.3/arch/x86/include/asm/reboot.h linux-3.0.3/arch/x86/include/asm/reboot.h
8523 --- linux-3.0.3/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8524 +++ linux-3.0.3/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8525 @@ -6,19 +6,19 @@
8526 struct pt_regs;
8527
8528 struct machine_ops {
8529 - void (*restart)(char *cmd);
8530 - void (*halt)(void);
8531 - void (*power_off)(void);
8532 + void (* __noreturn restart)(char *cmd);
8533 + void (* __noreturn halt)(void);
8534 + void (* __noreturn power_off)(void);
8535 void (*shutdown)(void);
8536 void (*crash_shutdown)(struct pt_regs *);
8537 - void (*emergency_restart)(void);
8538 -};
8539 + void (* __noreturn emergency_restart)(void);
8540 +} __no_const;
8541
8542 extern struct machine_ops machine_ops;
8543
8544 void native_machine_crash_shutdown(struct pt_regs *regs);
8545 void native_machine_shutdown(void);
8546 -void machine_real_restart(unsigned int type);
8547 +void machine_real_restart(unsigned int type) __noreturn;
8548 /* These must match dispatch_table in reboot_32.S */
8549 #define MRR_BIOS 0
8550 #define MRR_APM 1
8551 diff -urNp linux-3.0.3/arch/x86/include/asm/rwsem.h linux-3.0.3/arch/x86/include/asm/rwsem.h
8552 --- linux-3.0.3/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8553 +++ linux-3.0.3/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8554 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8555 {
8556 asm volatile("# beginning down_read\n\t"
8557 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8558 +
8559 +#ifdef CONFIG_PAX_REFCOUNT
8560 + "jno 0f\n"
8561 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8562 + "int $4\n0:\n"
8563 + _ASM_EXTABLE(0b, 0b)
8564 +#endif
8565 +
8566 /* adds 0x00000001 */
8567 " jns 1f\n"
8568 " call call_rwsem_down_read_failed\n"
8569 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8570 "1:\n\t"
8571 " mov %1,%2\n\t"
8572 " add %3,%2\n\t"
8573 +
8574 +#ifdef CONFIG_PAX_REFCOUNT
8575 + "jno 0f\n"
8576 + "sub %3,%2\n"
8577 + "int $4\n0:\n"
8578 + _ASM_EXTABLE(0b, 0b)
8579 +#endif
8580 +
8581 " jle 2f\n\t"
8582 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8583 " jnz 1b\n\t"
8584 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8585 long tmp;
8586 asm volatile("# beginning down_write\n\t"
8587 LOCK_PREFIX " xadd %1,(%2)\n\t"
8588 +
8589 +#ifdef CONFIG_PAX_REFCOUNT
8590 + "jno 0f\n"
8591 + "mov %1,(%2)\n"
8592 + "int $4\n0:\n"
8593 + _ASM_EXTABLE(0b, 0b)
8594 +#endif
8595 +
8596 /* adds 0xffff0001, returns the old value */
8597 " test %1,%1\n\t"
8598 /* was the count 0 before? */
8599 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8600 long tmp;
8601 asm volatile("# beginning __up_read\n\t"
8602 LOCK_PREFIX " xadd %1,(%2)\n\t"
8603 +
8604 +#ifdef CONFIG_PAX_REFCOUNT
8605 + "jno 0f\n"
8606 + "mov %1,(%2)\n"
8607 + "int $4\n0:\n"
8608 + _ASM_EXTABLE(0b, 0b)
8609 +#endif
8610 +
8611 /* subtracts 1, returns the old value */
8612 " jns 1f\n\t"
8613 " call call_rwsem_wake\n" /* expects old value in %edx */
8614 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8615 long tmp;
8616 asm volatile("# beginning __up_write\n\t"
8617 LOCK_PREFIX " xadd %1,(%2)\n\t"
8618 +
8619 +#ifdef CONFIG_PAX_REFCOUNT
8620 + "jno 0f\n"
8621 + "mov %1,(%2)\n"
8622 + "int $4\n0:\n"
8623 + _ASM_EXTABLE(0b, 0b)
8624 +#endif
8625 +
8626 /* subtracts 0xffff0001, returns the old value */
8627 " jns 1f\n\t"
8628 " call call_rwsem_wake\n" /* expects old value in %edx */
8629 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8630 {
8631 asm volatile("# beginning __downgrade_write\n\t"
8632 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8633 +
8634 +#ifdef CONFIG_PAX_REFCOUNT
8635 + "jno 0f\n"
8636 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8637 + "int $4\n0:\n"
8638 + _ASM_EXTABLE(0b, 0b)
8639 +#endif
8640 +
8641 /*
8642 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8643 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8644 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8645 */
8646 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8647 {
8648 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8649 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8650 +
8651 +#ifdef CONFIG_PAX_REFCOUNT
8652 + "jno 0f\n"
8653 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8654 + "int $4\n0:\n"
8655 + _ASM_EXTABLE(0b, 0b)
8656 +#endif
8657 +
8658 : "+m" (sem->count)
8659 : "er" (delta));
8660 }
8661 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8662 {
8663 long tmp = delta;
8664
8665 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8666 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8667 +
8668 +#ifdef CONFIG_PAX_REFCOUNT
8669 + "jno 0f\n"
8670 + "mov %0,%1\n"
8671 + "int $4\n0:\n"
8672 + _ASM_EXTABLE(0b, 0b)
8673 +#endif
8674 +
8675 : "+r" (tmp), "+m" (sem->count)
8676 : : "memory");
8677
8678 diff -urNp linux-3.0.3/arch/x86/include/asm/segment.h linux-3.0.3/arch/x86/include/asm/segment.h
8679 --- linux-3.0.3/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8680 +++ linux-3.0.3/arch/x86/include/asm/segment.h 2011-08-23 21:47:55.000000000 -0400
8681 @@ -64,8 +64,8 @@
8682 * 26 - ESPFIX small SS
8683 * 27 - per-cpu [ offset to per-cpu data area ]
8684 * 28 - stack_canary-20 [ for stack protector ]
8685 - * 29 - unused
8686 - * 30 - unused
8687 + * 29 - PCI BIOS CS
8688 + * 30 - PCI BIOS DS
8689 * 31 - TSS for double fault handler
8690 */
8691 #define GDT_ENTRY_TLS_MIN 6
8692 @@ -79,6 +79,8 @@
8693
8694 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8695
8696 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8697 +
8698 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8699
8700 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8701 @@ -104,6 +106,12 @@
8702 #define __KERNEL_STACK_CANARY 0
8703 #endif
8704
8705 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8706 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8707 +
8708 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8709 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8710 +
8711 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8712
8713 /*
8714 @@ -141,7 +149,7 @@
8715 */
8716
8717 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8718 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8719 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8720
8721
8722 #else
8723 @@ -165,6 +173,8 @@
8724 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8725 #define __USER32_DS __USER_DS
8726
8727 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8728 +
8729 #define GDT_ENTRY_TSS 8 /* needs two entries */
8730 #define GDT_ENTRY_LDT 10 /* needs two entries */
8731 #define GDT_ENTRY_TLS_MIN 12
8732 @@ -185,6 +195,7 @@
8733 #endif
8734
8735 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8736 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8737 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8738 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8739 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8740 diff -urNp linux-3.0.3/arch/x86/include/asm/smp.h linux-3.0.3/arch/x86/include/asm/smp.h
8741 --- linux-3.0.3/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8742 +++ linux-3.0.3/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8743 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8744 /* cpus sharing the last level cache: */
8745 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8746 DECLARE_PER_CPU(u16, cpu_llc_id);
8747 -DECLARE_PER_CPU(int, cpu_number);
8748 +DECLARE_PER_CPU(unsigned int, cpu_number);
8749
8750 static inline struct cpumask *cpu_sibling_mask(int cpu)
8751 {
8752 @@ -77,7 +77,7 @@ struct smp_ops {
8753
8754 void (*send_call_func_ipi)(const struct cpumask *mask);
8755 void (*send_call_func_single_ipi)(int cpu);
8756 -};
8757 +} __no_const;
8758
8759 /* Globals due to paravirt */
8760 extern void set_cpu_sibling_map(int cpu);
8761 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8762 extern int safe_smp_processor_id(void);
8763
8764 #elif defined(CONFIG_X86_64_SMP)
8765 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8766 -
8767 -#define stack_smp_processor_id() \
8768 -({ \
8769 - struct thread_info *ti; \
8770 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8771 - ti->cpu; \
8772 -})
8773 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8774 +#define stack_smp_processor_id() raw_smp_processor_id()
8775 #define safe_smp_processor_id() smp_processor_id()
8776
8777 #endif
8778 diff -urNp linux-3.0.3/arch/x86/include/asm/spinlock.h linux-3.0.3/arch/x86/include/asm/spinlock.h
8779 --- linux-3.0.3/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8780 +++ linux-3.0.3/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8781 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8782 static inline void arch_read_lock(arch_rwlock_t *rw)
8783 {
8784 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8785 +
8786 +#ifdef CONFIG_PAX_REFCOUNT
8787 + "jno 0f\n"
8788 + LOCK_PREFIX " addl $1,(%0)\n"
8789 + "int $4\n0:\n"
8790 + _ASM_EXTABLE(0b, 0b)
8791 +#endif
8792 +
8793 "jns 1f\n"
8794 "call __read_lock_failed\n\t"
8795 "1:\n"
8796 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8797 static inline void arch_write_lock(arch_rwlock_t *rw)
8798 {
8799 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8800 +
8801 +#ifdef CONFIG_PAX_REFCOUNT
8802 + "jno 0f\n"
8803 + LOCK_PREFIX " addl %1,(%0)\n"
8804 + "int $4\n0:\n"
8805 + _ASM_EXTABLE(0b, 0b)
8806 +#endif
8807 +
8808 "jz 1f\n"
8809 "call __write_lock_failed\n\t"
8810 "1:\n"
8811 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8812
8813 static inline void arch_read_unlock(arch_rwlock_t *rw)
8814 {
8815 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8816 + asm volatile(LOCK_PREFIX "incl %0\n"
8817 +
8818 +#ifdef CONFIG_PAX_REFCOUNT
8819 + "jno 0f\n"
8820 + LOCK_PREFIX "decl %0\n"
8821 + "int $4\n0:\n"
8822 + _ASM_EXTABLE(0b, 0b)
8823 +#endif
8824 +
8825 + :"+m" (rw->lock) : : "memory");
8826 }
8827
8828 static inline void arch_write_unlock(arch_rwlock_t *rw)
8829 {
8830 - asm volatile(LOCK_PREFIX "addl %1, %0"
8831 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8832 +
8833 +#ifdef CONFIG_PAX_REFCOUNT
8834 + "jno 0f\n"
8835 + LOCK_PREFIX "subl %1, %0\n"
8836 + "int $4\n0:\n"
8837 + _ASM_EXTABLE(0b, 0b)
8838 +#endif
8839 +
8840 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8841 }
8842
8843 diff -urNp linux-3.0.3/arch/x86/include/asm/stackprotector.h linux-3.0.3/arch/x86/include/asm/stackprotector.h
8844 --- linux-3.0.3/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8845 +++ linux-3.0.3/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8846 @@ -48,7 +48,7 @@
8847 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8848 */
8849 #define GDT_STACK_CANARY_INIT \
8850 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8851 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8852
8853 /*
8854 * Initialize the stackprotector canary value.
8855 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8856
8857 static inline void load_stack_canary_segment(void)
8858 {
8859 -#ifdef CONFIG_X86_32
8860 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8861 asm volatile ("mov %0, %%gs" : : "r" (0));
8862 #endif
8863 }
8864 diff -urNp linux-3.0.3/arch/x86/include/asm/stacktrace.h linux-3.0.3/arch/x86/include/asm/stacktrace.h
8865 --- linux-3.0.3/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8866 +++ linux-3.0.3/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8867 @@ -11,28 +11,20 @@
8868
8869 extern int kstack_depth_to_print;
8870
8871 -struct thread_info;
8872 +struct task_struct;
8873 struct stacktrace_ops;
8874
8875 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8876 - unsigned long *stack,
8877 - unsigned long bp,
8878 - const struct stacktrace_ops *ops,
8879 - void *data,
8880 - unsigned long *end,
8881 - int *graph);
8882 -
8883 -extern unsigned long
8884 -print_context_stack(struct thread_info *tinfo,
8885 - unsigned long *stack, unsigned long bp,
8886 - const struct stacktrace_ops *ops, void *data,
8887 - unsigned long *end, int *graph);
8888 -
8889 -extern unsigned long
8890 -print_context_stack_bp(struct thread_info *tinfo,
8891 - unsigned long *stack, unsigned long bp,
8892 - const struct stacktrace_ops *ops, void *data,
8893 - unsigned long *end, int *graph);
8894 +typedef unsigned long walk_stack_t(struct task_struct *task,
8895 + void *stack_start,
8896 + unsigned long *stack,
8897 + unsigned long bp,
8898 + const struct stacktrace_ops *ops,
8899 + void *data,
8900 + unsigned long *end,
8901 + int *graph);
8902 +
8903 +extern walk_stack_t print_context_stack;
8904 +extern walk_stack_t print_context_stack_bp;
8905
8906 /* Generic stack tracer with callbacks */
8907
8908 @@ -40,7 +32,7 @@ struct stacktrace_ops {
8909 void (*address)(void *data, unsigned long address, int reliable);
8910 /* On negative return stop dumping */
8911 int (*stack)(void *data, char *name);
8912 - walk_stack_t walk_stack;
8913 + walk_stack_t *walk_stack;
8914 };
8915
8916 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8917 diff -urNp linux-3.0.3/arch/x86/include/asm/system.h linux-3.0.3/arch/x86/include/asm/system.h
8918 --- linux-3.0.3/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8919 +++ linux-3.0.3/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8920 @@ -129,7 +129,7 @@ do { \
8921 "call __switch_to\n\t" \
8922 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8923 __switch_canary \
8924 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8925 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8926 "movq %%rax,%%rdi\n\t" \
8927 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8928 "jnz ret_from_fork\n\t" \
8929 @@ -140,7 +140,7 @@ do { \
8930 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8931 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8932 [_tif_fork] "i" (_TIF_FORK), \
8933 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
8934 + [thread_info] "m" (current_tinfo), \
8935 [current_task] "m" (current_task) \
8936 __switch_canary_iparam \
8937 : "memory", "cc" __EXTRA_CLOBBER)
8938 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8939 {
8940 unsigned long __limit;
8941 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8942 - return __limit + 1;
8943 + return __limit;
8944 }
8945
8946 static inline void native_clts(void)
8947 @@ -397,12 +397,12 @@ void enable_hlt(void);
8948
8949 void cpu_idle_wait(void);
8950
8951 -extern unsigned long arch_align_stack(unsigned long sp);
8952 +#define arch_align_stack(x) ((x) & ~0xfUL)
8953 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8954
8955 void default_idle(void);
8956
8957 -void stop_this_cpu(void *dummy);
8958 +void stop_this_cpu(void *dummy) __noreturn;
8959
8960 /*
8961 * Force strict CPU ordering.
8962 diff -urNp linux-3.0.3/arch/x86/include/asm/thread_info.h linux-3.0.3/arch/x86/include/asm/thread_info.h
8963 --- linux-3.0.3/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
8964 +++ linux-3.0.3/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
8965 @@ -10,6 +10,7 @@
8966 #include <linux/compiler.h>
8967 #include <asm/page.h>
8968 #include <asm/types.h>
8969 +#include <asm/percpu.h>
8970
8971 /*
8972 * low level task data that entry.S needs immediate access to
8973 @@ -24,7 +25,6 @@ struct exec_domain;
8974 #include <asm/atomic.h>
8975
8976 struct thread_info {
8977 - struct task_struct *task; /* main task structure */
8978 struct exec_domain *exec_domain; /* execution domain */
8979 __u32 flags; /* low level flags */
8980 __u32 status; /* thread synchronous flags */
8981 @@ -34,18 +34,12 @@ struct thread_info {
8982 mm_segment_t addr_limit;
8983 struct restart_block restart_block;
8984 void __user *sysenter_return;
8985 -#ifdef CONFIG_X86_32
8986 - unsigned long previous_esp; /* ESP of the previous stack in
8987 - case of nested (IRQ) stacks
8988 - */
8989 - __u8 supervisor_stack[0];
8990 -#endif
8991 + unsigned long lowest_stack;
8992 int uaccess_err;
8993 };
8994
8995 -#define INIT_THREAD_INFO(tsk) \
8996 +#define INIT_THREAD_INFO \
8997 { \
8998 - .task = &tsk, \
8999 .exec_domain = &default_exec_domain, \
9000 .flags = 0, \
9001 .cpu = 0, \
9002 @@ -56,7 +50,7 @@ struct thread_info {
9003 }, \
9004 }
9005
9006 -#define init_thread_info (init_thread_union.thread_info)
9007 +#define init_thread_info (init_thread_union.stack)
9008 #define init_stack (init_thread_union.stack)
9009
9010 #else /* !__ASSEMBLY__ */
9011 @@ -170,6 +164,23 @@ struct thread_info {
9012 ret; \
9013 })
9014
9015 +#ifdef __ASSEMBLY__
9016 +/* how to get the thread information struct from ASM */
9017 +#define GET_THREAD_INFO(reg) \
9018 + mov PER_CPU_VAR(current_tinfo), reg
9019 +
9020 +/* use this one if reg already contains %esp */
9021 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9022 +#else
9023 +/* how to get the thread information struct from C */
9024 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9025 +
9026 +static __always_inline struct thread_info *current_thread_info(void)
9027 +{
9028 + return percpu_read_stable(current_tinfo);
9029 +}
9030 +#endif
9031 +
9032 #ifdef CONFIG_X86_32
9033
9034 #define STACK_WARN (THREAD_SIZE/8)
9035 @@ -180,35 +191,13 @@ struct thread_info {
9036 */
9037 #ifndef __ASSEMBLY__
9038
9039 -
9040 /* how to get the current stack pointer from C */
9041 register unsigned long current_stack_pointer asm("esp") __used;
9042
9043 -/* how to get the thread information struct from C */
9044 -static inline struct thread_info *current_thread_info(void)
9045 -{
9046 - return (struct thread_info *)
9047 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9048 -}
9049 -
9050 -#else /* !__ASSEMBLY__ */
9051 -
9052 -/* how to get the thread information struct from ASM */
9053 -#define GET_THREAD_INFO(reg) \
9054 - movl $-THREAD_SIZE, reg; \
9055 - andl %esp, reg
9056 -
9057 -/* use this one if reg already contains %esp */
9058 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9059 - andl $-THREAD_SIZE, reg
9060 -
9061 #endif
9062
9063 #else /* X86_32 */
9064
9065 -#include <asm/percpu.h>
9066 -#define KERNEL_STACK_OFFSET (5*8)
9067 -
9068 /*
9069 * macros/functions for gaining access to the thread information structure
9070 * preempt_count needs to be 1 initially, until the scheduler is functional.
9071 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9072 #ifndef __ASSEMBLY__
9073 DECLARE_PER_CPU(unsigned long, kernel_stack);
9074
9075 -static inline struct thread_info *current_thread_info(void)
9076 -{
9077 - struct thread_info *ti;
9078 - ti = (void *)(percpu_read_stable(kernel_stack) +
9079 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9080 - return ti;
9081 -}
9082 -
9083 -#else /* !__ASSEMBLY__ */
9084 -
9085 -/* how to get the thread information struct from ASM */
9086 -#define GET_THREAD_INFO(reg) \
9087 - movq PER_CPU_VAR(kernel_stack),reg ; \
9088 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9089 -
9090 +/* how to get the current stack pointer from C */
9091 +register unsigned long current_stack_pointer asm("rsp") __used;
9092 #endif
9093
9094 #endif /* !X86_32 */
9095 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9096 extern void free_thread_info(struct thread_info *ti);
9097 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9098 #define arch_task_cache_init arch_task_cache_init
9099 +
9100 +#define __HAVE_THREAD_FUNCTIONS
9101 +#define task_thread_info(task) (&(task)->tinfo)
9102 +#define task_stack_page(task) ((task)->stack)
9103 +#define setup_thread_stack(p, org) do {} while (0)
9104 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9105 +
9106 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9107 +extern struct task_struct *alloc_task_struct_node(int node);
9108 +extern void free_task_struct(struct task_struct *);
9109 +
9110 #endif
9111 #endif /* _ASM_X86_THREAD_INFO_H */
9112 diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess_32.h linux-3.0.3/arch/x86/include/asm/uaccess_32.h
9113 --- linux-3.0.3/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9114 +++ linux-3.0.3/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9115 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9116 static __always_inline unsigned long __must_check
9117 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9118 {
9119 + pax_track_stack();
9120 +
9121 + if ((long)n < 0)
9122 + return n;
9123 +
9124 if (__builtin_constant_p(n)) {
9125 unsigned long ret;
9126
9127 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9128 return ret;
9129 }
9130 }
9131 + if (!__builtin_constant_p(n))
9132 + check_object_size(from, n, true);
9133 return __copy_to_user_ll(to, from, n);
9134 }
9135
9136 @@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9137 __copy_to_user(void __user *to, const void *from, unsigned long n)
9138 {
9139 might_fault();
9140 +
9141 return __copy_to_user_inatomic(to, from, n);
9142 }
9143
9144 static __always_inline unsigned long
9145 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9146 {
9147 + if ((long)n < 0)
9148 + return n;
9149 +
9150 /* Avoid zeroing the tail if the copy fails..
9151 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9152 * but as the zeroing behaviour is only significant when n is not
9153 @@ -137,6 +148,12 @@ static __always_inline unsigned long
9154 __copy_from_user(void *to, const void __user *from, unsigned long n)
9155 {
9156 might_fault();
9157 +
9158 + pax_track_stack();
9159 +
9160 + if ((long)n < 0)
9161 + return n;
9162 +
9163 if (__builtin_constant_p(n)) {
9164 unsigned long ret;
9165
9166 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9167 return ret;
9168 }
9169 }
9170 + if (!__builtin_constant_p(n))
9171 + check_object_size(to, n, false);
9172 return __copy_from_user_ll(to, from, n);
9173 }
9174
9175 @@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9176 const void __user *from, unsigned long n)
9177 {
9178 might_fault();
9179 +
9180 + if ((long)n < 0)
9181 + return n;
9182 +
9183 if (__builtin_constant_p(n)) {
9184 unsigned long ret;
9185
9186 @@ -181,15 +204,19 @@ static __always_inline unsigned long
9187 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9188 unsigned long n)
9189 {
9190 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9191 -}
9192 + if ((long)n < 0)
9193 + return n;
9194
9195 -unsigned long __must_check copy_to_user(void __user *to,
9196 - const void *from, unsigned long n);
9197 -unsigned long __must_check _copy_from_user(void *to,
9198 - const void __user *from,
9199 - unsigned long n);
9200 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9201 +}
9202
9203 +extern void copy_to_user_overflow(void)
9204 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9205 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9206 +#else
9207 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9208 +#endif
9209 +;
9210
9211 extern void copy_from_user_overflow(void)
9212 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9213 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9214 #endif
9215 ;
9216
9217 -static inline unsigned long __must_check copy_from_user(void *to,
9218 - const void __user *from,
9219 - unsigned long n)
9220 +/**
9221 + * copy_to_user: - Copy a block of data into user space.
9222 + * @to: Destination address, in user space.
9223 + * @from: Source address, in kernel space.
9224 + * @n: Number of bytes to copy.
9225 + *
9226 + * Context: User context only. This function may sleep.
9227 + *
9228 + * Copy data from kernel space to user space.
9229 + *
9230 + * Returns number of bytes that could not be copied.
9231 + * On success, this will be zero.
9232 + */
9233 +static inline unsigned long __must_check
9234 +copy_to_user(void __user *to, const void *from, unsigned long n)
9235 +{
9236 + int sz = __compiletime_object_size(from);
9237 +
9238 + if (unlikely(sz != -1 && sz < n))
9239 + copy_to_user_overflow();
9240 + else if (access_ok(VERIFY_WRITE, to, n))
9241 + n = __copy_to_user(to, from, n);
9242 + return n;
9243 +}
9244 +
9245 +/**
9246 + * copy_from_user: - Copy a block of data from user space.
9247 + * @to: Destination address, in kernel space.
9248 + * @from: Source address, in user space.
9249 + * @n: Number of bytes to copy.
9250 + *
9251 + * Context: User context only. This function may sleep.
9252 + *
9253 + * Copy data from user space to kernel space.
9254 + *
9255 + * Returns number of bytes that could not be copied.
9256 + * On success, this will be zero.
9257 + *
9258 + * If some data could not be copied, this function will pad the copied
9259 + * data to the requested size using zero bytes.
9260 + */
9261 +static inline unsigned long __must_check
9262 +copy_from_user(void *to, const void __user *from, unsigned long n)
9263 {
9264 int sz = __compiletime_object_size(to);
9265
9266 - if (likely(sz == -1 || sz >= n))
9267 - n = _copy_from_user(to, from, n);
9268 - else
9269 + if (unlikely(sz != -1 && sz < n))
9270 copy_from_user_overflow();
9271 -
9272 + else if (access_ok(VERIFY_READ, from, n))
9273 + n = __copy_from_user(to, from, n);
9274 + else if ((long)n > 0) {
9275 + if (!__builtin_constant_p(n))
9276 + check_object_size(to, n, false);
9277 + memset(to, 0, n);
9278 + }
9279 return n;
9280 }
9281
9282 diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess_64.h linux-3.0.3/arch/x86/include/asm/uaccess_64.h
9283 --- linux-3.0.3/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9284 +++ linux-3.0.3/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9285 @@ -10,6 +10,9 @@
9286 #include <asm/alternative.h>
9287 #include <asm/cpufeature.h>
9288 #include <asm/page.h>
9289 +#include <asm/pgtable.h>
9290 +
9291 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9292
9293 /*
9294 * Copy To/From Userspace
9295 @@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9296 return ret;
9297 }
9298
9299 -__must_check unsigned long
9300 -_copy_to_user(void __user *to, const void *from, unsigned len);
9301 -__must_check unsigned long
9302 -_copy_from_user(void *to, const void __user *from, unsigned len);
9303 +static __always_inline __must_check unsigned long
9304 +__copy_to_user(void __user *to, const void *from, unsigned len);
9305 +static __always_inline __must_check unsigned long
9306 +__copy_from_user(void *to, const void __user *from, unsigned len);
9307 __must_check unsigned long
9308 copy_in_user(void __user *to, const void __user *from, unsigned len);
9309
9310 static inline unsigned long __must_check copy_from_user(void *to,
9311 const void __user *from,
9312 - unsigned long n)
9313 + unsigned n)
9314 {
9315 - int sz = __compiletime_object_size(to);
9316 -
9317 might_fault();
9318 - if (likely(sz == -1 || sz >= n))
9319 - n = _copy_from_user(to, from, n);
9320 -#ifdef CONFIG_DEBUG_VM
9321 - else
9322 - WARN(1, "Buffer overflow detected!\n");
9323 -#endif
9324 +
9325 + if (access_ok(VERIFY_READ, from, n))
9326 + n = __copy_from_user(to, from, n);
9327 + else if ((int)n > 0) {
9328 + if (!__builtin_constant_p(n))
9329 + check_object_size(to, n, false);
9330 + memset(to, 0, n);
9331 + }
9332 return n;
9333 }
9334
9335 @@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9336 {
9337 might_fault();
9338
9339 - return _copy_to_user(dst, src, size);
9340 + if (access_ok(VERIFY_WRITE, dst, size))
9341 + size = __copy_to_user(dst, src, size);
9342 + return size;
9343 }
9344
9345 static __always_inline __must_check
9346 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9347 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9348 {
9349 - int ret = 0;
9350 + int sz = __compiletime_object_size(dst);
9351 + unsigned ret = 0;
9352
9353 might_fault();
9354 - if (!__builtin_constant_p(size))
9355 - return copy_user_generic(dst, (__force void *)src, size);
9356 +
9357 + pax_track_stack();
9358 +
9359 + if ((int)size < 0)
9360 + return size;
9361 +
9362 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9363 + if (!__access_ok(VERIFY_READ, src, size))
9364 + return size;
9365 +#endif
9366 +
9367 + if (unlikely(sz != -1 && sz < size)) {
9368 +#ifdef CONFIG_DEBUG_VM
9369 + WARN(1, "Buffer overflow detected!\n");
9370 +#endif
9371 + return size;
9372 + }
9373 +
9374 + if (!__builtin_constant_p(size)) {
9375 + check_object_size(dst, size, false);
9376 +
9377 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9378 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9379 + src += PAX_USER_SHADOW_BASE;
9380 +#endif
9381 +
9382 + return copy_user_generic(dst, (__force const void *)src, size);
9383 + }
9384 switch (size) {
9385 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9386 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9387 ret, "b", "b", "=q", 1);
9388 return ret;
9389 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9390 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9391 ret, "w", "w", "=r", 2);
9392 return ret;
9393 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9394 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9395 ret, "l", "k", "=r", 4);
9396 return ret;
9397 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9398 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9399 ret, "q", "", "=r", 8);
9400 return ret;
9401 case 10:
9402 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9403 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9404 ret, "q", "", "=r", 10);
9405 if (unlikely(ret))
9406 return ret;
9407 __get_user_asm(*(u16 *)(8 + (char *)dst),
9408 - (u16 __user *)(8 + (char __user *)src),
9409 + (const u16 __user *)(8 + (const char __user *)src),
9410 ret, "w", "w", "=r", 2);
9411 return ret;
9412 case 16:
9413 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9414 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9415 ret, "q", "", "=r", 16);
9416 if (unlikely(ret))
9417 return ret;
9418 __get_user_asm(*(u64 *)(8 + (char *)dst),
9419 - (u64 __user *)(8 + (char __user *)src),
9420 + (const u64 __user *)(8 + (const char __user *)src),
9421 ret, "q", "", "=r", 8);
9422 return ret;
9423 default:
9424 - return copy_user_generic(dst, (__force void *)src, size);
9425 +
9426 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9427 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9428 + src += PAX_USER_SHADOW_BASE;
9429 +#endif
9430 +
9431 + return copy_user_generic(dst, (__force const void *)src, size);
9432 }
9433 }
9434
9435 static __always_inline __must_check
9436 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9437 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9438 {
9439 - int ret = 0;
9440 + int sz = __compiletime_object_size(src);
9441 + unsigned ret = 0;
9442
9443 might_fault();
9444 - if (!__builtin_constant_p(size))
9445 +
9446 + pax_track_stack();
9447 +
9448 + if ((int)size < 0)
9449 + return size;
9450 +
9451 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9452 + if (!__access_ok(VERIFY_WRITE, dst, size))
9453 + return size;
9454 +#endif
9455 +
9456 + if (unlikely(sz != -1 && sz < size)) {
9457 +#ifdef CONFIG_DEBUG_VM
9458 + WARN(1, "Buffer overflow detected!\n");
9459 +#endif
9460 + return size;
9461 + }
9462 +
9463 + if (!__builtin_constant_p(size)) {
9464 + check_object_size(src, size, true);
9465 +
9466 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9467 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9468 + dst += PAX_USER_SHADOW_BASE;
9469 +#endif
9470 +
9471 return copy_user_generic((__force void *)dst, src, size);
9472 + }
9473 switch (size) {
9474 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9475 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9476 ret, "b", "b", "iq", 1);
9477 return ret;
9478 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9479 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9480 ret, "w", "w", "ir", 2);
9481 return ret;
9482 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9483 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9484 ret, "l", "k", "ir", 4);
9485 return ret;
9486 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9487 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9488 ret, "q", "", "er", 8);
9489 return ret;
9490 case 10:
9491 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9492 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9493 ret, "q", "", "er", 10);
9494 if (unlikely(ret))
9495 return ret;
9496 asm("":::"memory");
9497 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9498 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9499 ret, "w", "w", "ir", 2);
9500 return ret;
9501 case 16:
9502 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9503 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9504 ret, "q", "", "er", 16);
9505 if (unlikely(ret))
9506 return ret;
9507 asm("":::"memory");
9508 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9509 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9510 ret, "q", "", "er", 8);
9511 return ret;
9512 default:
9513 +
9514 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9515 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9516 + dst += PAX_USER_SHADOW_BASE;
9517 +#endif
9518 +
9519 return copy_user_generic((__force void *)dst, src, size);
9520 }
9521 }
9522
9523 static __always_inline __must_check
9524 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9525 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9526 {
9527 - int ret = 0;
9528 + unsigned ret = 0;
9529
9530 might_fault();
9531 - if (!__builtin_constant_p(size))
9532 +
9533 + if ((int)size < 0)
9534 + return size;
9535 +
9536 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9537 + if (!__access_ok(VERIFY_READ, src, size))
9538 + return size;
9539 + if (!__access_ok(VERIFY_WRITE, dst, size))
9540 + return size;
9541 +#endif
9542 +
9543 + if (!__builtin_constant_p(size)) {
9544 +
9545 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9546 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9547 + src += PAX_USER_SHADOW_BASE;
9548 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9549 + dst += PAX_USER_SHADOW_BASE;
9550 +#endif
9551 +
9552 return copy_user_generic((__force void *)dst,
9553 - (__force void *)src, size);
9554 + (__force const void *)src, size);
9555 + }
9556 switch (size) {
9557 case 1: {
9558 u8 tmp;
9559 - __get_user_asm(tmp, (u8 __user *)src,
9560 + __get_user_asm(tmp, (const u8 __user *)src,
9561 ret, "b", "b", "=q", 1);
9562 if (likely(!ret))
9563 __put_user_asm(tmp, (u8 __user *)dst,
9564 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9565 }
9566 case 2: {
9567 u16 tmp;
9568 - __get_user_asm(tmp, (u16 __user *)src,
9569 + __get_user_asm(tmp, (const u16 __user *)src,
9570 ret, "w", "w", "=r", 2);
9571 if (likely(!ret))
9572 __put_user_asm(tmp, (u16 __user *)dst,
9573 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9574
9575 case 4: {
9576 u32 tmp;
9577 - __get_user_asm(tmp, (u32 __user *)src,
9578 + __get_user_asm(tmp, (const u32 __user *)src,
9579 ret, "l", "k", "=r", 4);
9580 if (likely(!ret))
9581 __put_user_asm(tmp, (u32 __user *)dst,
9582 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9583 }
9584 case 8: {
9585 u64 tmp;
9586 - __get_user_asm(tmp, (u64 __user *)src,
9587 + __get_user_asm(tmp, (const u64 __user *)src,
9588 ret, "q", "", "=r", 8);
9589 if (likely(!ret))
9590 __put_user_asm(tmp, (u64 __user *)dst,
9591 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9592 return ret;
9593 }
9594 default:
9595 +
9596 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9597 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9598 + src += PAX_USER_SHADOW_BASE;
9599 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9600 + dst += PAX_USER_SHADOW_BASE;
9601 +#endif
9602 +
9603 return copy_user_generic((__force void *)dst,
9604 - (__force void *)src, size);
9605 + (__force const void *)src, size);
9606 }
9607 }
9608
9609 @@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9610 static __must_check __always_inline int
9611 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9612 {
9613 + pax_track_stack();
9614 +
9615 + if ((int)size < 0)
9616 + return size;
9617 +
9618 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9619 + if (!__access_ok(VERIFY_READ, src, size))
9620 + return size;
9621 +
9622 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9623 + src += PAX_USER_SHADOW_BASE;
9624 +#endif
9625 +
9626 return copy_user_generic(dst, (__force const void *)src, size);
9627 }
9628
9629 -static __must_check __always_inline int
9630 +static __must_check __always_inline unsigned long
9631 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9632 {
9633 + if ((int)size < 0)
9634 + return size;
9635 +
9636 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9637 + if (!__access_ok(VERIFY_WRITE, dst, size))
9638 + return size;
9639 +
9640 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9641 + dst += PAX_USER_SHADOW_BASE;
9642 +#endif
9643 +
9644 return copy_user_generic((__force void *)dst, src, size);
9645 }
9646
9647 -extern long __copy_user_nocache(void *dst, const void __user *src,
9648 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9649 unsigned size, int zerorest);
9650
9651 -static inline int
9652 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9653 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9654 {
9655 might_sleep();
9656 +
9657 + if ((int)size < 0)
9658 + return size;
9659 +
9660 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9661 + if (!__access_ok(VERIFY_READ, src, size))
9662 + return size;
9663 +#endif
9664 +
9665 return __copy_user_nocache(dst, src, size, 1);
9666 }
9667
9668 -static inline int
9669 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9670 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9671 unsigned size)
9672 {
9673 + if ((int)size < 0)
9674 + return size;
9675 +
9676 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9677 + if (!__access_ok(VERIFY_READ, src, size))
9678 + return size;
9679 +#endif
9680 +
9681 return __copy_user_nocache(dst, src, size, 0);
9682 }
9683
9684 -unsigned long
9685 +extern unsigned long
9686 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9687
9688 #endif /* _ASM_X86_UACCESS_64_H */
9689 diff -urNp linux-3.0.3/arch/x86/include/asm/uaccess.h linux-3.0.3/arch/x86/include/asm/uaccess.h
9690 --- linux-3.0.3/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9691 +++ linux-3.0.3/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9692 @@ -7,12 +7,15 @@
9693 #include <linux/compiler.h>
9694 #include <linux/thread_info.h>
9695 #include <linux/string.h>
9696 +#include <linux/sched.h>
9697 #include <asm/asm.h>
9698 #include <asm/page.h>
9699
9700 #define VERIFY_READ 0
9701 #define VERIFY_WRITE 1
9702
9703 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9704 +
9705 /*
9706 * The fs value determines whether argument validity checking should be
9707 * performed or not. If get_fs() == USER_DS, checking is performed, with
9708 @@ -28,7 +31,12 @@
9709
9710 #define get_ds() (KERNEL_DS)
9711 #define get_fs() (current_thread_info()->addr_limit)
9712 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9713 +void __set_fs(mm_segment_t x);
9714 +void set_fs(mm_segment_t x);
9715 +#else
9716 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9717 +#endif
9718
9719 #define segment_eq(a, b) ((a).seg == (b).seg)
9720
9721 @@ -76,7 +84,33 @@
9722 * checks that the pointer is in the user space range - after calling
9723 * this function, memory access functions may still return -EFAULT.
9724 */
9725 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9726 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9727 +#define access_ok(type, addr, size) \
9728 +({ \
9729 + long __size = size; \
9730 + unsigned long __addr = (unsigned long)addr; \
9731 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9732 + unsigned long __end_ao = __addr + __size - 1; \
9733 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9734 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9735 + while(__addr_ao <= __end_ao) { \
9736 + char __c_ao; \
9737 + __addr_ao += PAGE_SIZE; \
9738 + if (__size > PAGE_SIZE) \
9739 + cond_resched(); \
9740 + if (__get_user(__c_ao, (char __user *)__addr)) \
9741 + break; \
9742 + if (type != VERIFY_WRITE) { \
9743 + __addr = __addr_ao; \
9744 + continue; \
9745 + } \
9746 + if (__put_user(__c_ao, (char __user *)__addr)) \
9747 + break; \
9748 + __addr = __addr_ao; \
9749 + } \
9750 + } \
9751 + __ret_ao; \
9752 +})
9753
9754 /*
9755 * The exception table consists of pairs of addresses: the first is the
9756 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9757 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9758 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9759
9760 -
9761 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9762 +#define __copyuser_seg "gs;"
9763 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9764 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9765 +#else
9766 +#define __copyuser_seg
9767 +#define __COPYUSER_SET_ES
9768 +#define __COPYUSER_RESTORE_ES
9769 +#endif
9770
9771 #ifdef CONFIG_X86_32
9772 #define __put_user_asm_u64(x, addr, err, errret) \
9773 - asm volatile("1: movl %%eax,0(%2)\n" \
9774 - "2: movl %%edx,4(%2)\n" \
9775 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9776 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9777 "3:\n" \
9778 ".section .fixup,\"ax\"\n" \
9779 "4: movl %3,%0\n" \
9780 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9781 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9782
9783 #define __put_user_asm_ex_u64(x, addr) \
9784 - asm volatile("1: movl %%eax,0(%1)\n" \
9785 - "2: movl %%edx,4(%1)\n" \
9786 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9787 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9788 "3:\n" \
9789 _ASM_EXTABLE(1b, 2b - 1b) \
9790 _ASM_EXTABLE(2b, 3b - 2b) \
9791 @@ -373,7 +415,7 @@ do { \
9792 } while (0)
9793
9794 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9795 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9796 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9797 "2:\n" \
9798 ".section .fixup,\"ax\"\n" \
9799 "3: mov %3,%0\n" \
9800 @@ -381,7 +423,7 @@ do { \
9801 " jmp 2b\n" \
9802 ".previous\n" \
9803 _ASM_EXTABLE(1b, 3b) \
9804 - : "=r" (err), ltype(x) \
9805 + : "=r" (err), ltype (x) \
9806 : "m" (__m(addr)), "i" (errret), "0" (err))
9807
9808 #define __get_user_size_ex(x, ptr, size) \
9809 @@ -406,7 +448,7 @@ do { \
9810 } while (0)
9811
9812 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9813 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9814 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9815 "2:\n" \
9816 _ASM_EXTABLE(1b, 2b - 1b) \
9817 : ltype(x) : "m" (__m(addr)))
9818 @@ -423,13 +465,24 @@ do { \
9819 int __gu_err; \
9820 unsigned long __gu_val; \
9821 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9822 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9823 + (x) = (__typeof__(*(ptr)))__gu_val; \
9824 __gu_err; \
9825 })
9826
9827 /* FIXME: this hack is definitely wrong -AK */
9828 struct __large_struct { unsigned long buf[100]; };
9829 -#define __m(x) (*(struct __large_struct __user *)(x))
9830 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9831 +#define ____m(x) \
9832 +({ \
9833 + unsigned long ____x = (unsigned long)(x); \
9834 + if (____x < PAX_USER_SHADOW_BASE) \
9835 + ____x += PAX_USER_SHADOW_BASE; \
9836 + (void __user *)____x; \
9837 +})
9838 +#else
9839 +#define ____m(x) (x)
9840 +#endif
9841 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9842
9843 /*
9844 * Tell gcc we read from memory instead of writing: this is because
9845 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9846 * aliasing issues.
9847 */
9848 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9849 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9850 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9851 "2:\n" \
9852 ".section .fixup,\"ax\"\n" \
9853 "3: mov %3,%0\n" \
9854 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9855 ".previous\n" \
9856 _ASM_EXTABLE(1b, 3b) \
9857 : "=r"(err) \
9858 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9859 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9860
9861 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9862 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9863 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9864 "2:\n" \
9865 _ASM_EXTABLE(1b, 2b - 1b) \
9866 : : ltype(x), "m" (__m(addr)))
9867 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9868 * On error, the variable @x is set to zero.
9869 */
9870
9871 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9872 +#define __get_user(x, ptr) get_user((x), (ptr))
9873 +#else
9874 #define __get_user(x, ptr) \
9875 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9876 +#endif
9877
9878 /**
9879 * __put_user: - Write a simple value into user space, with less checking.
9880 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9881 * Returns zero on success, or -EFAULT on error.
9882 */
9883
9884 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9885 +#define __put_user(x, ptr) put_user((x), (ptr))
9886 +#else
9887 #define __put_user(x, ptr) \
9888 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9889 +#endif
9890
9891 #define __get_user_unaligned __get_user
9892 #define __put_user_unaligned __put_user
9893 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9894 #define get_user_ex(x, ptr) do { \
9895 unsigned long __gue_val; \
9896 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9897 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9898 + (x) = (__typeof__(*(ptr)))__gue_val; \
9899 } while (0)
9900
9901 #ifdef CONFIG_X86_WP_WORKS_OK
9902 diff -urNp linux-3.0.3/arch/x86/include/asm/vgtod.h linux-3.0.3/arch/x86/include/asm/vgtod.h
9903 --- linux-3.0.3/arch/x86/include/asm/vgtod.h 2011-07-21 22:17:23.000000000 -0400
9904 +++ linux-3.0.3/arch/x86/include/asm/vgtod.h 2011-08-23 21:47:55.000000000 -0400
9905 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9906 int sysctl_enabled;
9907 struct timezone sys_tz;
9908 struct { /* extract of a clocksource struct */
9909 + char name[8];
9910 cycle_t (*vread)(void);
9911 cycle_t cycle_last;
9912 cycle_t mask;
9913 diff -urNp linux-3.0.3/arch/x86/include/asm/x86_init.h linux-3.0.3/arch/x86/include/asm/x86_init.h
9914 --- linux-3.0.3/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9915 +++ linux-3.0.3/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9916 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9917 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9918 void (*find_smp_config)(void);
9919 void (*get_smp_config)(unsigned int early);
9920 -};
9921 +} __no_const;
9922
9923 /**
9924 * struct x86_init_resources - platform specific resource related ops
9925 @@ -42,7 +42,7 @@ struct x86_init_resources {
9926 void (*probe_roms)(void);
9927 void (*reserve_resources)(void);
9928 char *(*memory_setup)(void);
9929 -};
9930 +} __no_const;
9931
9932 /**
9933 * struct x86_init_irqs - platform specific interrupt setup
9934 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9935 void (*pre_vector_init)(void);
9936 void (*intr_init)(void);
9937 void (*trap_init)(void);
9938 -};
9939 +} __no_const;
9940
9941 /**
9942 * struct x86_init_oem - oem platform specific customizing functions
9943 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9944 struct x86_init_oem {
9945 void (*arch_setup)(void);
9946 void (*banner)(void);
9947 -};
9948 +} __no_const;
9949
9950 /**
9951 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9952 @@ -76,7 +76,7 @@ struct x86_init_oem {
9953 */
9954 struct x86_init_mapping {
9955 void (*pagetable_reserve)(u64 start, u64 end);
9956 -};
9957 +} __no_const;
9958
9959 /**
9960 * struct x86_init_paging - platform specific paging functions
9961 @@ -86,7 +86,7 @@ struct x86_init_mapping {
9962 struct x86_init_paging {
9963 void (*pagetable_setup_start)(pgd_t *base);
9964 void (*pagetable_setup_done)(pgd_t *base);
9965 -};
9966 +} __no_const;
9967
9968 /**
9969 * struct x86_init_timers - platform specific timer setup
9970 @@ -101,7 +101,7 @@ struct x86_init_timers {
9971 void (*tsc_pre_init)(void);
9972 void (*timer_init)(void);
9973 void (*wallclock_init)(void);
9974 -};
9975 +} __no_const;
9976
9977 /**
9978 * struct x86_init_iommu - platform specific iommu setup
9979 @@ -109,7 +109,7 @@ struct x86_init_timers {
9980 */
9981 struct x86_init_iommu {
9982 int (*iommu_init)(void);
9983 -};
9984 +} __no_const;
9985
9986 /**
9987 * struct x86_init_pci - platform specific pci init functions
9988 @@ -123,7 +123,7 @@ struct x86_init_pci {
9989 int (*init)(void);
9990 void (*init_irq)(void);
9991 void (*fixup_irqs)(void);
9992 -};
9993 +} __no_const;
9994
9995 /**
9996 * struct x86_init_ops - functions for platform specific setup
9997 @@ -139,7 +139,7 @@ struct x86_init_ops {
9998 struct x86_init_timers timers;
9999 struct x86_init_iommu iommu;
10000 struct x86_init_pci pci;
10001 -};
10002 +} __no_const;
10003
10004 /**
10005 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10006 @@ -147,7 +147,7 @@ struct x86_init_ops {
10007 */
10008 struct x86_cpuinit_ops {
10009 void (*setup_percpu_clockev)(void);
10010 -};
10011 +} __no_const;
10012
10013 /**
10014 * struct x86_platform_ops - platform specific runtime functions
10015 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10016 bool (*is_untracked_pat_range)(u64 start, u64 end);
10017 void (*nmi_init)(void);
10018 int (*i8042_detect)(void);
10019 -};
10020 +} __no_const;
10021
10022 struct pci_dev;
10023
10024 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10025 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10026 void (*teardown_msi_irq)(unsigned int irq);
10027 void (*teardown_msi_irqs)(struct pci_dev *dev);
10028 -};
10029 +} __no_const;
10030
10031 extern struct x86_init_ops x86_init;
10032 extern struct x86_cpuinit_ops x86_cpuinit;
10033 diff -urNp linux-3.0.3/arch/x86/include/asm/xsave.h linux-3.0.3/arch/x86/include/asm/xsave.h
10034 --- linux-3.0.3/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10035 +++ linux-3.0.3/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10036 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10037 {
10038 int err;
10039
10040 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10041 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10042 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10043 +#endif
10044 +
10045 /*
10046 * Clear the xsave header first, so that reserved fields are
10047 * initialized to zero.
10048 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10049 u32 lmask = mask;
10050 u32 hmask = mask >> 32;
10051
10052 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10053 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10054 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10055 +#endif
10056 +
10057 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10058 "2:\n"
10059 ".section .fixup,\"ax\"\n"
10060 diff -urNp linux-3.0.3/arch/x86/Kconfig linux-3.0.3/arch/x86/Kconfig
10061 --- linux-3.0.3/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10062 +++ linux-3.0.3/arch/x86/Kconfig 2011-08-23 21:48:14.000000000 -0400
10063 @@ -229,7 +229,7 @@ config X86_HT
10064
10065 config X86_32_LAZY_GS
10066 def_bool y
10067 - depends on X86_32 && !CC_STACKPROTECTOR
10068 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10069
10070 config ARCH_HWEIGHT_CFLAGS
10071 string
10072 @@ -1018,7 +1018,7 @@ choice
10073
10074 config NOHIGHMEM
10075 bool "off"
10076 - depends on !X86_NUMAQ
10077 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10078 ---help---
10079 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10080 However, the address space of 32-bit x86 processors is only 4
10081 @@ -1055,7 +1055,7 @@ config NOHIGHMEM
10082
10083 config HIGHMEM4G
10084 bool "4GB"
10085 - depends on !X86_NUMAQ
10086 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10087 ---help---
10088 Select this if you have a 32-bit processor and between 1 and 4
10089 gigabytes of physical RAM.
10090 @@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10091 hex
10092 default 0xB0000000 if VMSPLIT_3G_OPT
10093 default 0x80000000 if VMSPLIT_2G
10094 - default 0x78000000 if VMSPLIT_2G_OPT
10095 + default 0x70000000 if VMSPLIT_2G_OPT
10096 default 0x40000000 if VMSPLIT_1G
10097 default 0xC0000000
10098 depends on X86_32
10099 @@ -1453,7 +1453,7 @@ config ARCH_USES_PG_UNCACHED
10100
10101 config EFI
10102 bool "EFI runtime service support"
10103 - depends on ACPI
10104 + depends on ACPI && !PAX_KERNEXEC
10105 ---help---
10106 This enables the kernel to use EFI runtime services that are
10107 available (such as the EFI variable services).
10108 @@ -1483,6 +1483,7 @@ config SECCOMP
10109
10110 config CC_STACKPROTECTOR
10111 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10112 + depends on X86_64 || !PAX_MEMORY_UDEREF
10113 ---help---
10114 This option turns on the -fstack-protector GCC feature. This
10115 feature puts, at the beginning of functions, a canary value on
10116 @@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10117 config PHYSICAL_START
10118 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10119 default "0x1000000"
10120 + range 0x400000 0x40000000
10121 ---help---
10122 This gives the physical address where the kernel is loaded.
10123
10124 @@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10125 config PHYSICAL_ALIGN
10126 hex "Alignment value to which kernel should be aligned" if X86_32
10127 default "0x1000000"
10128 + range 0x400000 0x1000000 if PAX_KERNEXEC
10129 range 0x2000 0x1000000
10130 ---help---
10131 This value puts the alignment restrictions on physical address
10132 @@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10133 Say N if you want to disable CPU hotplug.
10134
10135 config COMPAT_VDSO
10136 - def_bool y
10137 + def_bool n
10138 prompt "Compat VDSO support"
10139 depends on X86_32 || IA32_EMULATION
10140 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10141 ---help---
10142 Map the 32-bit VDSO to the predictable old-style address too.
10143
10144 diff -urNp linux-3.0.3/arch/x86/Kconfig.cpu linux-3.0.3/arch/x86/Kconfig.cpu
10145 --- linux-3.0.3/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10146 +++ linux-3.0.3/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10147 @@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10148
10149 config X86_F00F_BUG
10150 def_bool y
10151 - depends on M586MMX || M586TSC || M586 || M486 || M386
10152 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10153
10154 config X86_INVD_BUG
10155 def_bool y
10156 @@ -362,7 +362,7 @@ config X86_POPAD_OK
10157
10158 config X86_ALIGNMENT_16
10159 def_bool y
10160 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10161 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10162
10163 config X86_INTEL_USERCOPY
10164 def_bool y
10165 @@ -408,7 +408,7 @@ config X86_CMPXCHG64
10166 # generates cmov.
10167 config X86_CMOV
10168 def_bool y
10169 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10170 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10171
10172 config X86_MINIMUM_CPU_FAMILY
10173 int
10174 diff -urNp linux-3.0.3/arch/x86/Kconfig.debug linux-3.0.3/arch/x86/Kconfig.debug
10175 --- linux-3.0.3/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10176 +++ linux-3.0.3/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10177 @@ -81,7 +81,7 @@ config X86_PTDUMP
10178 config DEBUG_RODATA
10179 bool "Write protect kernel read-only data structures"
10180 default y
10181 - depends on DEBUG_KERNEL
10182 + depends on DEBUG_KERNEL && BROKEN
10183 ---help---
10184 Mark the kernel read-only data as write-protected in the pagetables,
10185 in order to catch accidental (and incorrect) writes to such const
10186 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10187
10188 config DEBUG_SET_MODULE_RONX
10189 bool "Set loadable kernel module data as NX and text as RO"
10190 - depends on MODULES
10191 + depends on MODULES && BROKEN
10192 ---help---
10193 This option helps catch unintended modifications to loadable
10194 kernel module's text and read-only data. It also prevents execution
10195 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile
10196 --- linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10197 +++ linux-3.0.3/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10198 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10199 $(call cc-option, -fno-stack-protector) \
10200 $(call cc-option, -mpreferred-stack-boundary=2)
10201 KBUILD_CFLAGS += $(call cc-option, -m32)
10202 +ifdef CONSTIFY_PLUGIN
10203 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10204 +endif
10205 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10206 GCOV_PROFILE := n
10207
10208 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S
10209 --- linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10210 +++ linux-3.0.3/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10211 @@ -108,6 +108,9 @@ wakeup_code:
10212 /* Do any other stuff... */
10213
10214 #ifndef CONFIG_64BIT
10215 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10216 + call verify_cpu
10217 +
10218 /* This could also be done in C code... */
10219 movl pmode_cr3, %eax
10220 movl %eax, %cr3
10221 @@ -131,6 +134,7 @@ wakeup_code:
10222 movl pmode_cr0, %eax
10223 movl %eax, %cr0
10224 jmp pmode_return
10225 +# include "../../verify_cpu.S"
10226 #else
10227 pushw $0
10228 pushw trampoline_segment
10229 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/sleep.c linux-3.0.3/arch/x86/kernel/acpi/sleep.c
10230 --- linux-3.0.3/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10231 +++ linux-3.0.3/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10232 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10233 header->trampoline_segment = trampoline_address() >> 4;
10234 #ifdef CONFIG_SMP
10235 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10236 +
10237 + pax_open_kernel();
10238 early_gdt_descr.address =
10239 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10240 + pax_close_kernel();
10241 +
10242 initial_gs = per_cpu_offset(smp_processor_id());
10243 #endif
10244 initial_code = (unsigned long)wakeup_long64;
10245 diff -urNp linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S
10246 --- linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10247 +++ linux-3.0.3/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10248 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10249 # and restore the stack ... but you need gdt for this to work
10250 movl saved_context_esp, %esp
10251
10252 - movl %cs:saved_magic, %eax
10253 - cmpl $0x12345678, %eax
10254 + cmpl $0x12345678, saved_magic
10255 jne bogus_magic
10256
10257 # jump to place where we left off
10258 - movl saved_eip, %eax
10259 - jmp *%eax
10260 + jmp *(saved_eip)
10261
10262 bogus_magic:
10263 jmp bogus_magic
10264 diff -urNp linux-3.0.3/arch/x86/kernel/alternative.c linux-3.0.3/arch/x86/kernel/alternative.c
10265 --- linux-3.0.3/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10266 +++ linux-3.0.3/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10267 @@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10268 if (!*poff || ptr < text || ptr >= text_end)
10269 continue;
10270 /* turn DS segment override prefix into lock prefix */
10271 - if (*ptr == 0x3e)
10272 + if (*ktla_ktva(ptr) == 0x3e)
10273 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10274 };
10275 mutex_unlock(&text_mutex);
10276 @@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10277 if (!*poff || ptr < text || ptr >= text_end)
10278 continue;
10279 /* turn lock prefix into DS segment override prefix */
10280 - if (*ptr == 0xf0)
10281 + if (*ktla_ktva(ptr) == 0xf0)
10282 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10283 };
10284 mutex_unlock(&text_mutex);
10285 @@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10286
10287 BUG_ON(p->len > MAX_PATCH_LEN);
10288 /* prep the buffer with the original instructions */
10289 - memcpy(insnbuf, p->instr, p->len);
10290 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10291 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10292 (unsigned long)p->instr, p->len);
10293
10294 @@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10295 if (smp_alt_once)
10296 free_init_pages("SMP alternatives",
10297 (unsigned long)__smp_locks,
10298 - (unsigned long)__smp_locks_end);
10299 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10300
10301 restart_nmi();
10302 }
10303 @@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10304 * instructions. And on the local CPU you need to be protected again NMI or MCE
10305 * handlers seeing an inconsistent instruction while you patch.
10306 */
10307 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10308 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10309 size_t len)
10310 {
10311 unsigned long flags;
10312 local_irq_save(flags);
10313 - memcpy(addr, opcode, len);
10314 +
10315 + pax_open_kernel();
10316 + memcpy(ktla_ktva(addr), opcode, len);
10317 sync_core();
10318 + pax_close_kernel();
10319 +
10320 local_irq_restore(flags);
10321 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10322 that causes hangs on some VIA CPUs. */
10323 @@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10324 */
10325 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10326 {
10327 - unsigned long flags;
10328 - char *vaddr;
10329 + unsigned char *vaddr = ktla_ktva(addr);
10330 struct page *pages[2];
10331 - int i;
10332 + size_t i;
10333
10334 if (!core_kernel_text((unsigned long)addr)) {
10335 - pages[0] = vmalloc_to_page(addr);
10336 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10337 + pages[0] = vmalloc_to_page(vaddr);
10338 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10339 } else {
10340 - pages[0] = virt_to_page(addr);
10341 + pages[0] = virt_to_page(vaddr);
10342 WARN_ON(!PageReserved(pages[0]));
10343 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10344 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10345 }
10346 BUG_ON(!pages[0]);
10347 - local_irq_save(flags);
10348 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10349 - if (pages[1])
10350 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10351 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10352 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10353 - clear_fixmap(FIX_TEXT_POKE0);
10354 - if (pages[1])
10355 - clear_fixmap(FIX_TEXT_POKE1);
10356 - local_flush_tlb();
10357 - sync_core();
10358 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10359 - that causes hangs on some VIA CPUs. */
10360 + text_poke_early(addr, opcode, len);
10361 for (i = 0; i < len; i++)
10362 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10363 - local_irq_restore(flags);
10364 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10365 return addr;
10366 }
10367
10368 diff -urNp linux-3.0.3/arch/x86/kernel/apic/apic.c linux-3.0.3/arch/x86/kernel/apic/apic.c
10369 --- linux-3.0.3/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10370 +++ linux-3.0.3/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10371 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10372 /*
10373 * Debug level, exported for io_apic.c
10374 */
10375 -unsigned int apic_verbosity;
10376 +int apic_verbosity;
10377
10378 int pic_mode;
10379
10380 @@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10381 apic_write(APIC_ESR, 0);
10382 v1 = apic_read(APIC_ESR);
10383 ack_APIC_irq();
10384 - atomic_inc(&irq_err_count);
10385 + atomic_inc_unchecked(&irq_err_count);
10386
10387 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10388 smp_processor_id(), v0 , v1);
10389 @@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10390 u16 *bios_cpu_apicid;
10391 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10392
10393 + pax_track_stack();
10394 +
10395 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10396 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10397
10398 diff -urNp linux-3.0.3/arch/x86/kernel/apic/io_apic.c linux-3.0.3/arch/x86/kernel/apic/io_apic.c
10399 --- linux-3.0.3/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10400 +++ linux-3.0.3/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10401 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10402 }
10403 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10404
10405 -void lock_vector_lock(void)
10406 +void lock_vector_lock(void) __acquires(vector_lock)
10407 {
10408 /* Used to the online set of cpus does not change
10409 * during assign_irq_vector.
10410 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10411 raw_spin_lock(&vector_lock);
10412 }
10413
10414 -void unlock_vector_lock(void)
10415 +void unlock_vector_lock(void) __releases(vector_lock)
10416 {
10417 raw_spin_unlock(&vector_lock);
10418 }
10419 @@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10420 ack_APIC_irq();
10421 }
10422
10423 -atomic_t irq_mis_count;
10424 +atomic_unchecked_t irq_mis_count;
10425
10426 /*
10427 * IO-APIC versions below 0x20 don't support EOI register.
10428 @@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10429 * at the cpu.
10430 */
10431 if (!(v & (1 << (i & 0x1f)))) {
10432 - atomic_inc(&irq_mis_count);
10433 + atomic_inc_unchecked(&irq_mis_count);
10434
10435 eoi_ioapic_irq(irq, cfg);
10436 }
10437 diff -urNp linux-3.0.3/arch/x86/kernel/apm_32.c linux-3.0.3/arch/x86/kernel/apm_32.c
10438 --- linux-3.0.3/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10439 +++ linux-3.0.3/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10440 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10441 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10442 * even though they are called in protected mode.
10443 */
10444 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10445 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10446 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10447
10448 static const char driver_version[] = "1.16ac"; /* no spaces */
10449 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10450 BUG_ON(cpu != 0);
10451 gdt = get_cpu_gdt_table(cpu);
10452 save_desc_40 = gdt[0x40 / 8];
10453 +
10454 + pax_open_kernel();
10455 gdt[0x40 / 8] = bad_bios_desc;
10456 + pax_close_kernel();
10457
10458 apm_irq_save(flags);
10459 APM_DO_SAVE_SEGS;
10460 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10461 &call->esi);
10462 APM_DO_RESTORE_SEGS;
10463 apm_irq_restore(flags);
10464 +
10465 + pax_open_kernel();
10466 gdt[0x40 / 8] = save_desc_40;
10467 + pax_close_kernel();
10468 +
10469 put_cpu();
10470
10471 return call->eax & 0xff;
10472 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10473 BUG_ON(cpu != 0);
10474 gdt = get_cpu_gdt_table(cpu);
10475 save_desc_40 = gdt[0x40 / 8];
10476 +
10477 + pax_open_kernel();
10478 gdt[0x40 / 8] = bad_bios_desc;
10479 + pax_close_kernel();
10480
10481 apm_irq_save(flags);
10482 APM_DO_SAVE_SEGS;
10483 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10484 &call->eax);
10485 APM_DO_RESTORE_SEGS;
10486 apm_irq_restore(flags);
10487 +
10488 + pax_open_kernel();
10489 gdt[0x40 / 8] = save_desc_40;
10490 + pax_close_kernel();
10491 +
10492 put_cpu();
10493 return error;
10494 }
10495 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10496 * code to that CPU.
10497 */
10498 gdt = get_cpu_gdt_table(0);
10499 +
10500 + pax_open_kernel();
10501 set_desc_base(&gdt[APM_CS >> 3],
10502 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10503 set_desc_base(&gdt[APM_CS_16 >> 3],
10504 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10505 set_desc_base(&gdt[APM_DS >> 3],
10506 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10507 + pax_close_kernel();
10508
10509 proc_create("apm", 0, NULL, &apm_file_ops);
10510
10511 diff -urNp linux-3.0.3/arch/x86/kernel/asm-offsets_64.c linux-3.0.3/arch/x86/kernel/asm-offsets_64.c
10512 --- linux-3.0.3/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10513 +++ linux-3.0.3/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10514 @@ -69,6 +69,7 @@ int main(void)
10515 BLANK();
10516 #undef ENTRY
10517
10518 + DEFINE(TSS_size, sizeof(struct tss_struct));
10519 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10520 BLANK();
10521
10522 diff -urNp linux-3.0.3/arch/x86/kernel/asm-offsets.c linux-3.0.3/arch/x86/kernel/asm-offsets.c
10523 --- linux-3.0.3/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10524 +++ linux-3.0.3/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10525 @@ -33,6 +33,8 @@ void common(void) {
10526 OFFSET(TI_status, thread_info, status);
10527 OFFSET(TI_addr_limit, thread_info, addr_limit);
10528 OFFSET(TI_preempt_count, thread_info, preempt_count);
10529 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10530 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10531
10532 BLANK();
10533 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10534 @@ -53,8 +55,26 @@ void common(void) {
10535 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10536 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10537 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10538 +
10539 +#ifdef CONFIG_PAX_KERNEXEC
10540 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10541 +#endif
10542 +
10543 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10544 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10545 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10546 +#ifdef CONFIG_X86_64
10547 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10548 +#endif
10549 #endif
10550
10551 +#endif
10552 +
10553 + BLANK();
10554 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10555 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10556 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10557 +
10558 #ifdef CONFIG_XEN
10559 BLANK();
10560 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10561 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/amd.c linux-3.0.3/arch/x86/kernel/cpu/amd.c
10562 --- linux-3.0.3/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10563 +++ linux-3.0.3/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10564 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10565 unsigned int size)
10566 {
10567 /* AMD errata T13 (order #21922) */
10568 - if ((c->x86 == 6)) {
10569 + if (c->x86 == 6) {
10570 /* Duron Rev A0 */
10571 if (c->x86_model == 3 && c->x86_mask == 0)
10572 size = 64;
10573 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/common.c linux-3.0.3/arch/x86/kernel/cpu/common.c
10574 --- linux-3.0.3/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10575 +++ linux-3.0.3/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10576 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10577
10578 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10579
10580 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10581 -#ifdef CONFIG_X86_64
10582 - /*
10583 - * We need valid kernel segments for data and code in long mode too
10584 - * IRET will check the segment types kkeil 2000/10/28
10585 - * Also sysret mandates a special GDT layout
10586 - *
10587 - * TLS descriptors are currently at a different place compared to i386.
10588 - * Hopefully nobody expects them at a fixed place (Wine?)
10589 - */
10590 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10591 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10592 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10593 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10594 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10595 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10596 -#else
10597 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10598 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10599 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10600 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10601 - /*
10602 - * Segments used for calling PnP BIOS have byte granularity.
10603 - * They code segments and data segments have fixed 64k limits,
10604 - * the transfer segment sizes are set at run time.
10605 - */
10606 - /* 32-bit code */
10607 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10608 - /* 16-bit code */
10609 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10610 - /* 16-bit data */
10611 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10612 - /* 16-bit data */
10613 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10614 - /* 16-bit data */
10615 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10616 - /*
10617 - * The APM segments have byte granularity and their bases
10618 - * are set at run time. All have 64k limits.
10619 - */
10620 - /* 32-bit code */
10621 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10622 - /* 16-bit code */
10623 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10624 - /* data */
10625 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10626 -
10627 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10628 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10629 - GDT_STACK_CANARY_INIT
10630 -#endif
10631 -} };
10632 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10633 -
10634 static int __init x86_xsave_setup(char *s)
10635 {
10636 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10637 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10638 {
10639 struct desc_ptr gdt_descr;
10640
10641 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10642 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10643 gdt_descr.size = GDT_SIZE - 1;
10644 load_gdt(&gdt_descr);
10645 /* Reload the per-cpu base */
10646 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10647 /* Filter out anything that depends on CPUID levels we don't have */
10648 filter_cpuid_features(c, true);
10649
10650 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10651 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10652 +#endif
10653 +
10654 /* If the model name is still unset, do table lookup. */
10655 if (!c->x86_model_id[0]) {
10656 const char *p;
10657 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10658 }
10659 __setup("clearcpuid=", setup_disablecpuid);
10660
10661 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10662 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10663 +
10664 #ifdef CONFIG_X86_64
10665 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10666
10667 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10668 EXPORT_PER_CPU_SYMBOL(current_task);
10669
10670 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10671 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10672 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10673 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10674
10675 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10676 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10677 {
10678 memset(regs, 0, sizeof(struct pt_regs));
10679 regs->fs = __KERNEL_PERCPU;
10680 - regs->gs = __KERNEL_STACK_CANARY;
10681 + savesegment(gs, regs->gs);
10682
10683 return regs;
10684 }
10685 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10686 int i;
10687
10688 cpu = stack_smp_processor_id();
10689 - t = &per_cpu(init_tss, cpu);
10690 + t = init_tss + cpu;
10691 oist = &per_cpu(orig_ist, cpu);
10692
10693 #ifdef CONFIG_NUMA
10694 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10695 switch_to_new_gdt(cpu);
10696 loadsegment(fs, 0);
10697
10698 - load_idt((const struct desc_ptr *)&idt_descr);
10699 + load_idt(&idt_descr);
10700
10701 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10702 syscall_init();
10703 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10704 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10705 barrier();
10706
10707 - x86_configure_nx();
10708 if (cpu != 0)
10709 enable_x2apic();
10710
10711 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10712 {
10713 int cpu = smp_processor_id();
10714 struct task_struct *curr = current;
10715 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10716 + struct tss_struct *t = init_tss + cpu;
10717 struct thread_struct *thread = &curr->thread;
10718
10719 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10720 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/intel.c linux-3.0.3/arch/x86/kernel/cpu/intel.c
10721 --- linux-3.0.3/arch/x86/kernel/cpu/intel.c 2011-08-23 21:44:40.000000000 -0400
10722 +++ linux-3.0.3/arch/x86/kernel/cpu/intel.c 2011-08-23 21:47:55.000000000 -0400
10723 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10724 * Update the IDT descriptor and reload the IDT so that
10725 * it uses the read-only mapped virtual address.
10726 */
10727 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10728 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10729 load_idt(&idt_descr);
10730 }
10731 #endif
10732 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/Makefile linux-3.0.3/arch/x86/kernel/cpu/Makefile
10733 --- linux-3.0.3/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10734 +++ linux-3.0.3/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10735 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10736 CFLAGS_REMOVE_perf_event.o = -pg
10737 endif
10738
10739 -# Make sure load_percpu_segment has no stackprotector
10740 -nostackp := $(call cc-option, -fno-stack-protector)
10741 -CFLAGS_common.o := $(nostackp)
10742 -
10743 obj-y := intel_cacheinfo.o scattered.o topology.o
10744 obj-y += proc.o capflags.o powerflags.o common.o
10745 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10746 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c
10747 --- linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10748 +++ linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10749 @@ -46,6 +46,7 @@
10750 #include <asm/ipi.h>
10751 #include <asm/mce.h>
10752 #include <asm/msr.h>
10753 +#include <asm/local.h>
10754
10755 #include "mce-internal.h"
10756
10757 @@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10758 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10759 m->cs, m->ip);
10760
10761 - if (m->cs == __KERNEL_CS)
10762 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10763 print_symbol("{%s}", m->ip);
10764 pr_cont("\n");
10765 }
10766 @@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10767
10768 #define PANIC_TIMEOUT 5 /* 5 seconds */
10769
10770 -static atomic_t mce_paniced;
10771 +static atomic_unchecked_t mce_paniced;
10772
10773 static int fake_panic;
10774 -static atomic_t mce_fake_paniced;
10775 +static atomic_unchecked_t mce_fake_paniced;
10776
10777 /* Panic in progress. Enable interrupts and wait for final IPI */
10778 static void wait_for_panic(void)
10779 @@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10780 /*
10781 * Make sure only one CPU runs in machine check panic
10782 */
10783 - if (atomic_inc_return(&mce_paniced) > 1)
10784 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10785 wait_for_panic();
10786 barrier();
10787
10788 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10789 console_verbose();
10790 } else {
10791 /* Don't log too much for fake panic */
10792 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10793 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10794 return;
10795 }
10796 /* First print corrected ones that are still unlogged */
10797 @@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10798 * might have been modified by someone else.
10799 */
10800 rmb();
10801 - if (atomic_read(&mce_paniced))
10802 + if (atomic_read_unchecked(&mce_paniced))
10803 wait_for_panic();
10804 if (!monarch_timeout)
10805 goto out;
10806 @@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10807 */
10808
10809 static DEFINE_SPINLOCK(mce_state_lock);
10810 -static int open_count; /* #times opened */
10811 +static local_t open_count; /* #times opened */
10812 static int open_exclu; /* already open exclusive? */
10813
10814 static int mce_open(struct inode *inode, struct file *file)
10815 {
10816 spin_lock(&mce_state_lock);
10817
10818 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10819 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10820 spin_unlock(&mce_state_lock);
10821
10822 return -EBUSY;
10823 @@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10824
10825 if (file->f_flags & O_EXCL)
10826 open_exclu = 1;
10827 - open_count++;
10828 + local_inc(&open_count);
10829
10830 spin_unlock(&mce_state_lock);
10831
10832 @@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10833 {
10834 spin_lock(&mce_state_lock);
10835
10836 - open_count--;
10837 + local_dec(&open_count);
10838 open_exclu = 0;
10839
10840 spin_unlock(&mce_state_lock);
10841 @@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10842 static void mce_reset(void)
10843 {
10844 cpu_missing = 0;
10845 - atomic_set(&mce_fake_paniced, 0);
10846 + atomic_set_unchecked(&mce_fake_paniced, 0);
10847 atomic_set(&mce_executing, 0);
10848 atomic_set(&mce_callin, 0);
10849 atomic_set(&global_nwo, 0);
10850 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c
10851 --- linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10852 +++ linux-3.0.3/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10853 @@ -215,7 +215,9 @@ static int inject_init(void)
10854 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10855 return -ENOMEM;
10856 printk(KERN_INFO "Machine check injector initialized\n");
10857 - mce_chrdev_ops.write = mce_write;
10858 + pax_open_kernel();
10859 + *(void **)&mce_chrdev_ops.write = mce_write;
10860 + pax_close_kernel();
10861 register_die_notifier(&mce_raise_nb);
10862 return 0;
10863 }
10864 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c
10865 --- linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c 2011-07-21 22:17:23.000000000 -0400
10866 +++ linux-3.0.3/arch/x86/kernel/cpu/mtrr/main.c 2011-08-23 21:47:55.000000000 -0400
10867 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10868 u64 size_or_mask, size_and_mask;
10869 static bool mtrr_aps_delayed_init;
10870
10871 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10872 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10873
10874 const struct mtrr_ops *mtrr_if;
10875
10876 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h
10877 --- linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10878 +++ linux-3.0.3/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-23 21:47:55.000000000 -0400
10879 @@ -12,8 +12,8 @@
10880 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10881
10882 struct mtrr_ops {
10883 - u32 vendor;
10884 - u32 use_intel_if;
10885 + const u32 vendor;
10886 + const u32 use_intel_if;
10887 void (*set)(unsigned int reg, unsigned long base,
10888 unsigned long size, mtrr_type type);
10889 void (*set_all)(void);
10890 diff -urNp linux-3.0.3/arch/x86/kernel/cpu/perf_event.c linux-3.0.3/arch/x86/kernel/cpu/perf_event.c
10891 --- linux-3.0.3/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10892 +++ linux-3.0.3/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10893 @@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10894 int i, j, w, wmax, num = 0;
10895 struct hw_perf_event *hwc;
10896
10897 + pax_track_stack();
10898 +
10899 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10900
10901 for (i = 0; i < n; i++) {
10902 @@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10903 break;
10904
10905 perf_callchain_store(entry, frame.return_address);
10906 - fp = frame.next_frame;
10907 + fp = (__force const void __user *)frame.next_frame;
10908 }
10909 }
10910
10911 diff -urNp linux-3.0.3/arch/x86/kernel/crash.c linux-3.0.3/arch/x86/kernel/crash.c
10912 --- linux-3.0.3/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10913 +++ linux-3.0.3/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10914 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10915 regs = args->regs;
10916
10917 #ifdef CONFIG_X86_32
10918 - if (!user_mode_vm(regs)) {
10919 + if (!user_mode(regs)) {
10920 crash_fixup_ss_esp(&fixed_regs, regs);
10921 regs = &fixed_regs;
10922 }
10923 diff -urNp linux-3.0.3/arch/x86/kernel/doublefault_32.c linux-3.0.3/arch/x86/kernel/doublefault_32.c
10924 --- linux-3.0.3/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10925 +++ linux-3.0.3/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10926 @@ -11,7 +11,7 @@
10927
10928 #define DOUBLEFAULT_STACKSIZE (1024)
10929 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10930 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10931 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10932
10933 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10934
10935 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10936 unsigned long gdt, tss;
10937
10938 store_gdt(&gdt_desc);
10939 - gdt = gdt_desc.address;
10940 + gdt = (unsigned long)gdt_desc.address;
10941
10942 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10943
10944 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10945 /* 0x2 bit is always set */
10946 .flags = X86_EFLAGS_SF | 0x2,
10947 .sp = STACK_START,
10948 - .es = __USER_DS,
10949 + .es = __KERNEL_DS,
10950 .cs = __KERNEL_CS,
10951 .ss = __KERNEL_DS,
10952 - .ds = __USER_DS,
10953 + .ds = __KERNEL_DS,
10954 .fs = __KERNEL_PERCPU,
10955
10956 .__cr3 = __pa_nodebug(swapper_pg_dir),
10957 diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack_32.c linux-3.0.3/arch/x86/kernel/dumpstack_32.c
10958 --- linux-3.0.3/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
10959 +++ linux-3.0.3/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
10960 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
10961 bp = stack_frame(task, regs);
10962
10963 for (;;) {
10964 - struct thread_info *context;
10965 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
10966
10967 - context = (struct thread_info *)
10968 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
10969 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
10970 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
10971
10972 - stack = (unsigned long *)context->previous_esp;
10973 - if (!stack)
10974 + if (stack_start == task_stack_page(task))
10975 break;
10976 + stack = *(unsigned long **)stack_start;
10977 if (ops->stack(data, "IRQ") < 0)
10978 break;
10979 touch_nmi_watchdog();
10980 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
10981 * When in-kernel, we also print out the stack and code at the
10982 * time of the fault..
10983 */
10984 - if (!user_mode_vm(regs)) {
10985 + if (!user_mode(regs)) {
10986 unsigned int code_prologue = code_bytes * 43 / 64;
10987 unsigned int code_len = code_bytes;
10988 unsigned char c;
10989 u8 *ip;
10990 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10991
10992 printk(KERN_EMERG "Stack:\n");
10993 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
10994
10995 printk(KERN_EMERG "Code: ");
10996
10997 - ip = (u8 *)regs->ip - code_prologue;
10998 + ip = (u8 *)regs->ip - code_prologue + cs_base;
10999 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11000 /* try starting at IP */
11001 - ip = (u8 *)regs->ip;
11002 + ip = (u8 *)regs->ip + cs_base;
11003 code_len = code_len - code_prologue + 1;
11004 }
11005 for (i = 0; i < code_len; i++, ip++) {
11006 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11007 printk(" Bad EIP value.");
11008 break;
11009 }
11010 - if (ip == (u8 *)regs->ip)
11011 + if (ip == (u8 *)regs->ip + cs_base)
11012 printk("<%02x> ", c);
11013 else
11014 printk("%02x ", c);
11015 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11016 {
11017 unsigned short ud2;
11018
11019 + ip = ktla_ktva(ip);
11020 if (ip < PAGE_OFFSET)
11021 return 0;
11022 if (probe_kernel_address((unsigned short *)ip, ud2))
11023 diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack_64.c linux-3.0.3/arch/x86/kernel/dumpstack_64.c
11024 --- linux-3.0.3/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11025 +++ linux-3.0.3/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11026 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11027 unsigned long *irq_stack_end =
11028 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11029 unsigned used = 0;
11030 - struct thread_info *tinfo;
11031 int graph = 0;
11032 unsigned long dummy;
11033 + void *stack_start;
11034
11035 if (!task)
11036 task = current;
11037 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11038 * current stack address. If the stacks consist of nested
11039 * exceptions
11040 */
11041 - tinfo = task_thread_info(task);
11042 for (;;) {
11043 char *id;
11044 unsigned long *estack_end;
11045 +
11046 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11047 &used, &id);
11048
11049 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11050 if (ops->stack(data, id) < 0)
11051 break;
11052
11053 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11054 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11055 data, estack_end, &graph);
11056 ops->stack(data, "<EOE>");
11057 /*
11058 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11059 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11060 if (ops->stack(data, "IRQ") < 0)
11061 break;
11062 - bp = ops->walk_stack(tinfo, stack, bp,
11063 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11064 ops, data, irq_stack_end, &graph);
11065 /*
11066 * We link to the next stack (which would be
11067 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11068 /*
11069 * This handles the process stack:
11070 */
11071 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11072 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11073 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11074 put_cpu();
11075 }
11076 EXPORT_SYMBOL(dump_trace);
11077 diff -urNp linux-3.0.3/arch/x86/kernel/dumpstack.c linux-3.0.3/arch/x86/kernel/dumpstack.c
11078 --- linux-3.0.3/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11079 +++ linux-3.0.3/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11080 @@ -2,6 +2,9 @@
11081 * Copyright (C) 1991, 1992 Linus Torvalds
11082 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11083 */
11084 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11085 +#define __INCLUDED_BY_HIDESYM 1
11086 +#endif
11087 #include <linux/kallsyms.h>
11088 #include <linux/kprobes.h>
11089 #include <linux/uaccess.h>
11090 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11091 static void
11092 print_ftrace_graph_addr(unsigned long addr, void *data,
11093 const struct stacktrace_ops *ops,
11094 - struct thread_info *tinfo, int *graph)
11095 + struct task_struct *task, int *graph)
11096 {
11097 - struct task_struct *task = tinfo->task;
11098 unsigned long ret_addr;
11099 int index = task->curr_ret_stack;
11100
11101 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11102 static inline void
11103 print_ftrace_graph_addr(unsigned long addr, void *data,
11104 const struct stacktrace_ops *ops,
11105 - struct thread_info *tinfo, int *graph)
11106 + struct task_struct *task, int *graph)
11107 { }
11108 #endif
11109
11110 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11111 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11112 */
11113
11114 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11115 - void *p, unsigned int size, void *end)
11116 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11117 {
11118 - void *t = tinfo;
11119 if (end) {
11120 if (p < end && p >= (end-THREAD_SIZE))
11121 return 1;
11122 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11123 }
11124
11125 unsigned long
11126 -print_context_stack(struct thread_info *tinfo,
11127 +print_context_stack(struct task_struct *task, void *stack_start,
11128 unsigned long *stack, unsigned long bp,
11129 const struct stacktrace_ops *ops, void *data,
11130 unsigned long *end, int *graph)
11131 {
11132 struct stack_frame *frame = (struct stack_frame *)bp;
11133
11134 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11135 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11136 unsigned long addr;
11137
11138 addr = *stack;
11139 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11140 } else {
11141 ops->address(data, addr, 0);
11142 }
11143 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11144 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11145 }
11146 stack++;
11147 }
11148 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11149 EXPORT_SYMBOL_GPL(print_context_stack);
11150
11151 unsigned long
11152 -print_context_stack_bp(struct thread_info *tinfo,
11153 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11154 unsigned long *stack, unsigned long bp,
11155 const struct stacktrace_ops *ops, void *data,
11156 unsigned long *end, int *graph)
11157 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11158 struct stack_frame *frame = (struct stack_frame *)bp;
11159 unsigned long *ret_addr = &frame->return_address;
11160
11161 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11162 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11163 unsigned long addr = *ret_addr;
11164
11165 if (!__kernel_text_address(addr))
11166 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11167 ops->address(data, addr, 1);
11168 frame = frame->next_frame;
11169 ret_addr = &frame->return_address;
11170 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11171 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11172 }
11173
11174 return (unsigned long)frame;
11175 @@ -186,7 +186,7 @@ void dump_stack(void)
11176
11177 bp = stack_frame(current, NULL);
11178 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11179 - current->pid, current->comm, print_tainted(),
11180 + task_pid_nr(current), current->comm, print_tainted(),
11181 init_utsname()->release,
11182 (int)strcspn(init_utsname()->version, " "),
11183 init_utsname()->version);
11184 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11185 }
11186 EXPORT_SYMBOL_GPL(oops_begin);
11187
11188 +extern void gr_handle_kernel_exploit(void);
11189 +
11190 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11191 {
11192 if (regs && kexec_should_crash(current))
11193 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11194 panic("Fatal exception in interrupt");
11195 if (panic_on_oops)
11196 panic("Fatal exception");
11197 - do_exit(signr);
11198 +
11199 + gr_handle_kernel_exploit();
11200 +
11201 + do_group_exit(signr);
11202 }
11203
11204 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11205 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11206
11207 show_registers(regs);
11208 #ifdef CONFIG_X86_32
11209 - if (user_mode_vm(regs)) {
11210 + if (user_mode(regs)) {
11211 sp = regs->sp;
11212 ss = regs->ss & 0xffff;
11213 } else {
11214 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11215 unsigned long flags = oops_begin();
11216 int sig = SIGSEGV;
11217
11218 - if (!user_mode_vm(regs))
11219 + if (!user_mode(regs))
11220 report_bug(regs->ip, regs);
11221
11222 if (__die(str, regs, err))
11223 diff -urNp linux-3.0.3/arch/x86/kernel/early_printk.c linux-3.0.3/arch/x86/kernel/early_printk.c
11224 --- linux-3.0.3/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11225 +++ linux-3.0.3/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11226 @@ -7,6 +7,7 @@
11227 #include <linux/pci_regs.h>
11228 #include <linux/pci_ids.h>
11229 #include <linux/errno.h>
11230 +#include <linux/sched.h>
11231 #include <asm/io.h>
11232 #include <asm/processor.h>
11233 #include <asm/fcntl.h>
11234 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11235 int n;
11236 va_list ap;
11237
11238 + pax_track_stack();
11239 +
11240 va_start(ap, fmt);
11241 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11242 early_console->write(early_console, buf, n);
11243 diff -urNp linux-3.0.3/arch/x86/kernel/entry_32.S linux-3.0.3/arch/x86/kernel/entry_32.S
11244 --- linux-3.0.3/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11245 +++ linux-3.0.3/arch/x86/kernel/entry_32.S 2011-08-23 21:48:14.000000000 -0400
11246 @@ -185,13 +185,146 @@
11247 /*CFI_REL_OFFSET gs, PT_GS*/
11248 .endm
11249 .macro SET_KERNEL_GS reg
11250 +
11251 +#ifdef CONFIG_CC_STACKPROTECTOR
11252 movl $(__KERNEL_STACK_CANARY), \reg
11253 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11254 + movl $(__USER_DS), \reg
11255 +#else
11256 + xorl \reg, \reg
11257 +#endif
11258 +
11259 movl \reg, %gs
11260 .endm
11261
11262 #endif /* CONFIG_X86_32_LAZY_GS */
11263
11264 -.macro SAVE_ALL
11265 +.macro pax_enter_kernel
11266 +#ifdef CONFIG_PAX_KERNEXEC
11267 + call pax_enter_kernel
11268 +#endif
11269 +.endm
11270 +
11271 +.macro pax_exit_kernel
11272 +#ifdef CONFIG_PAX_KERNEXEC
11273 + call pax_exit_kernel
11274 +#endif
11275 +.endm
11276 +
11277 +#ifdef CONFIG_PAX_KERNEXEC
11278 +ENTRY(pax_enter_kernel)
11279 +#ifdef CONFIG_PARAVIRT
11280 + pushl %eax
11281 + pushl %ecx
11282 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11283 + mov %eax, %esi
11284 +#else
11285 + mov %cr0, %esi
11286 +#endif
11287 + bts $16, %esi
11288 + jnc 1f
11289 + mov %cs, %esi
11290 + cmp $__KERNEL_CS, %esi
11291 + jz 3f
11292 + ljmp $__KERNEL_CS, $3f
11293 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11294 +2:
11295 +#ifdef CONFIG_PARAVIRT
11296 + mov %esi, %eax
11297 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11298 +#else
11299 + mov %esi, %cr0
11300 +#endif
11301 +3:
11302 +#ifdef CONFIG_PARAVIRT
11303 + popl %ecx
11304 + popl %eax
11305 +#endif
11306 + ret
11307 +ENDPROC(pax_enter_kernel)
11308 +
11309 +ENTRY(pax_exit_kernel)
11310 +#ifdef CONFIG_PARAVIRT
11311 + pushl %eax
11312 + pushl %ecx
11313 +#endif
11314 + mov %cs, %esi
11315 + cmp $__KERNEXEC_KERNEL_CS, %esi
11316 + jnz 2f
11317 +#ifdef CONFIG_PARAVIRT
11318 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11319 + mov %eax, %esi
11320 +#else
11321 + mov %cr0, %esi
11322 +#endif
11323 + btr $16, %esi
11324 + ljmp $__KERNEL_CS, $1f
11325 +1:
11326 +#ifdef CONFIG_PARAVIRT
11327 + mov %esi, %eax
11328 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11329 +#else
11330 + mov %esi, %cr0
11331 +#endif
11332 +2:
11333 +#ifdef CONFIG_PARAVIRT
11334 + popl %ecx
11335 + popl %eax
11336 +#endif
11337 + ret
11338 +ENDPROC(pax_exit_kernel)
11339 +#endif
11340 +
11341 +.macro pax_erase_kstack
11342 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11343 + call pax_erase_kstack
11344 +#endif
11345 +.endm
11346 +
11347 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11348 +/*
11349 + * ebp: thread_info
11350 + * ecx, edx: can be clobbered
11351 + */
11352 +ENTRY(pax_erase_kstack)
11353 + pushl %edi
11354 + pushl %eax
11355 +
11356 + mov TI_lowest_stack(%ebp), %edi
11357 + mov $-0xBEEF, %eax
11358 + std
11359 +
11360 +1: mov %edi, %ecx
11361 + and $THREAD_SIZE_asm - 1, %ecx
11362 + shr $2, %ecx
11363 + repne scasl
11364 + jecxz 2f
11365 +
11366 + cmp $2*16, %ecx
11367 + jc 2f
11368 +
11369 + mov $2*16, %ecx
11370 + repe scasl
11371 + jecxz 2f
11372 + jne 1b
11373 +
11374 +2: cld
11375 + mov %esp, %ecx
11376 + sub %edi, %ecx
11377 + shr $2, %ecx
11378 + rep stosl
11379 +
11380 + mov TI_task_thread_sp0(%ebp), %edi
11381 + sub $128, %edi
11382 + mov %edi, TI_lowest_stack(%ebp)
11383 +
11384 + popl %eax
11385 + popl %edi
11386 + ret
11387 +ENDPROC(pax_erase_kstack)
11388 +#endif
11389 +
11390 +.macro __SAVE_ALL _DS
11391 cld
11392 PUSH_GS
11393 pushl_cfi %fs
11394 @@ -214,7 +347,7 @@
11395 CFI_REL_OFFSET ecx, 0
11396 pushl_cfi %ebx
11397 CFI_REL_OFFSET ebx, 0
11398 - movl $(__USER_DS), %edx
11399 + movl $\_DS, %edx
11400 movl %edx, %ds
11401 movl %edx, %es
11402 movl $(__KERNEL_PERCPU), %edx
11403 @@ -222,6 +355,15 @@
11404 SET_KERNEL_GS %edx
11405 .endm
11406
11407 +.macro SAVE_ALL
11408 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11409 + __SAVE_ALL __KERNEL_DS
11410 + pax_enter_kernel
11411 +#else
11412 + __SAVE_ALL __USER_DS
11413 +#endif
11414 +.endm
11415 +
11416 .macro RESTORE_INT_REGS
11417 popl_cfi %ebx
11418 CFI_RESTORE ebx
11419 @@ -332,7 +474,15 @@ check_userspace:
11420 movb PT_CS(%esp), %al
11421 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11422 cmpl $USER_RPL, %eax
11423 +
11424 +#ifdef CONFIG_PAX_KERNEXEC
11425 + jae resume_userspace
11426 +
11427 + PAX_EXIT_KERNEL
11428 + jmp resume_kernel
11429 +#else
11430 jb resume_kernel # not returning to v8086 or userspace
11431 +#endif
11432
11433 ENTRY(resume_userspace)
11434 LOCKDEP_SYS_EXIT
11435 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11436 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11437 # int/exception return?
11438 jne work_pending
11439 - jmp restore_all
11440 + jmp restore_all_pax
11441 END(ret_from_exception)
11442
11443 #ifdef CONFIG_PREEMPT
11444 @@ -394,23 +544,34 @@ sysenter_past_esp:
11445 /*CFI_REL_OFFSET cs, 0*/
11446 /*
11447 * Push current_thread_info()->sysenter_return to the stack.
11448 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11449 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11450 */
11451 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11452 + pushl_cfi $0
11453 CFI_REL_OFFSET eip, 0
11454
11455 pushl_cfi %eax
11456 SAVE_ALL
11457 + GET_THREAD_INFO(%ebp)
11458 + movl TI_sysenter_return(%ebp),%ebp
11459 + movl %ebp,PT_EIP(%esp)
11460 ENABLE_INTERRUPTS(CLBR_NONE)
11461
11462 /*
11463 * Load the potential sixth argument from user stack.
11464 * Careful about security.
11465 */
11466 + movl PT_OLDESP(%esp),%ebp
11467 +
11468 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11469 + mov PT_OLDSS(%esp),%ds
11470 +1: movl %ds:(%ebp),%ebp
11471 + push %ss
11472 + pop %ds
11473 +#else
11474 cmpl $__PAGE_OFFSET-3,%ebp
11475 jae syscall_fault
11476 1: movl (%ebp),%ebp
11477 +#endif
11478 +
11479 movl %ebp,PT_EBP(%esp)
11480 .section __ex_table,"a"
11481 .align 4
11482 @@ -433,12 +594,23 @@ sysenter_do_call:
11483 testl $_TIF_ALLWORK_MASK, %ecx
11484 jne sysexit_audit
11485 sysenter_exit:
11486 +
11487 +#ifdef CONFIG_PAX_RANDKSTACK
11488 + pushl_cfi %eax
11489 + call pax_randomize_kstack
11490 + popl_cfi %eax
11491 +#endif
11492 +
11493 + pax_erase_kstack
11494 +
11495 /* if something modifies registers it must also disable sysexit */
11496 movl PT_EIP(%esp), %edx
11497 movl PT_OLDESP(%esp), %ecx
11498 xorl %ebp,%ebp
11499 TRACE_IRQS_ON
11500 1: mov PT_FS(%esp), %fs
11501 +2: mov PT_DS(%esp), %ds
11502 +3: mov PT_ES(%esp), %es
11503 PTGS_TO_GS
11504 ENABLE_INTERRUPTS_SYSEXIT
11505
11506 @@ -455,6 +627,9 @@ sysenter_audit:
11507 movl %eax,%edx /* 2nd arg: syscall number */
11508 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11509 call audit_syscall_entry
11510 +
11511 + pax_erase_kstack
11512 +
11513 pushl_cfi %ebx
11514 movl PT_EAX(%esp),%eax /* reload syscall number */
11515 jmp sysenter_do_call
11516 @@ -481,11 +656,17 @@ sysexit_audit:
11517
11518 CFI_ENDPROC
11519 .pushsection .fixup,"ax"
11520 -2: movl $0,PT_FS(%esp)
11521 +4: movl $0,PT_FS(%esp)
11522 + jmp 1b
11523 +5: movl $0,PT_DS(%esp)
11524 + jmp 1b
11525 +6: movl $0,PT_ES(%esp)
11526 jmp 1b
11527 .section __ex_table,"a"
11528 .align 4
11529 - .long 1b,2b
11530 + .long 1b,4b
11531 + .long 2b,5b
11532 + .long 3b,6b
11533 .popsection
11534 PTGS_TO_GS_EX
11535 ENDPROC(ia32_sysenter_target)
11536 @@ -518,6 +699,14 @@ syscall_exit:
11537 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11538 jne syscall_exit_work
11539
11540 +restore_all_pax:
11541 +
11542 +#ifdef CONFIG_PAX_RANDKSTACK
11543 + call pax_randomize_kstack
11544 +#endif
11545 +
11546 + pax_erase_kstack
11547 +
11548 restore_all:
11549 TRACE_IRQS_IRET
11550 restore_all_notrace:
11551 @@ -577,14 +766,34 @@ ldt_ss:
11552 * compensating for the offset by changing to the ESPFIX segment with
11553 * a base address that matches for the difference.
11554 */
11555 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11556 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11557 mov %esp, %edx /* load kernel esp */
11558 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11559 mov %dx, %ax /* eax: new kernel esp */
11560 sub %eax, %edx /* offset (low word is 0) */
11561 +#ifdef CONFIG_SMP
11562 + movl PER_CPU_VAR(cpu_number), %ebx
11563 + shll $PAGE_SHIFT_asm, %ebx
11564 + addl $cpu_gdt_table, %ebx
11565 +#else
11566 + movl $cpu_gdt_table, %ebx
11567 +#endif
11568 shr $16, %edx
11569 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11570 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11571 +
11572 +#ifdef CONFIG_PAX_KERNEXEC
11573 + mov %cr0, %esi
11574 + btr $16, %esi
11575 + mov %esi, %cr0
11576 +#endif
11577 +
11578 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11579 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11580 +
11581 +#ifdef CONFIG_PAX_KERNEXEC
11582 + bts $16, %esi
11583 + mov %esi, %cr0
11584 +#endif
11585 +
11586 pushl_cfi $__ESPFIX_SS
11587 pushl_cfi %eax /* new kernel esp */
11588 /* Disable interrupts, but do not irqtrace this section: we
11589 @@ -613,29 +822,23 @@ work_resched:
11590 movl TI_flags(%ebp), %ecx
11591 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11592 # than syscall tracing?
11593 - jz restore_all
11594 + jz restore_all_pax
11595 testb $_TIF_NEED_RESCHED, %cl
11596 jnz work_resched
11597
11598 work_notifysig: # deal with pending signals and
11599 # notify-resume requests
11600 + movl %esp, %eax
11601 #ifdef CONFIG_VM86
11602 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11603 - movl %esp, %eax
11604 - jne work_notifysig_v86 # returning to kernel-space or
11605 + jz 1f # returning to kernel-space or
11606 # vm86-space
11607 - xorl %edx, %edx
11608 - call do_notify_resume
11609 - jmp resume_userspace_sig
11610
11611 - ALIGN
11612 -work_notifysig_v86:
11613 pushl_cfi %ecx # save ti_flags for do_notify_resume
11614 call save_v86_state # %eax contains pt_regs pointer
11615 popl_cfi %ecx
11616 movl %eax, %esp
11617 -#else
11618 - movl %esp, %eax
11619 +1:
11620 #endif
11621 xorl %edx, %edx
11622 call do_notify_resume
11623 @@ -648,6 +851,9 @@ syscall_trace_entry:
11624 movl $-ENOSYS,PT_EAX(%esp)
11625 movl %esp, %eax
11626 call syscall_trace_enter
11627 +
11628 + pax_erase_kstack
11629 +
11630 /* What it returned is what we'll actually use. */
11631 cmpl $(nr_syscalls), %eax
11632 jnae syscall_call
11633 @@ -670,6 +876,10 @@ END(syscall_exit_work)
11634
11635 RING0_INT_FRAME # can't unwind into user space anyway
11636 syscall_fault:
11637 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11638 + push %ss
11639 + pop %ds
11640 +#endif
11641 GET_THREAD_INFO(%ebp)
11642 movl $-EFAULT,PT_EAX(%esp)
11643 jmp resume_userspace
11644 @@ -752,6 +962,36 @@ ptregs_clone:
11645 CFI_ENDPROC
11646 ENDPROC(ptregs_clone)
11647
11648 + ALIGN;
11649 +ENTRY(kernel_execve)
11650 + CFI_STARTPROC
11651 + pushl_cfi %ebp
11652 + sub $PT_OLDSS+4,%esp
11653 + pushl_cfi %edi
11654 + pushl_cfi %ecx
11655 + pushl_cfi %eax
11656 + lea 3*4(%esp),%edi
11657 + mov $PT_OLDSS/4+1,%ecx
11658 + xorl %eax,%eax
11659 + rep stosl
11660 + popl_cfi %eax
11661 + popl_cfi %ecx
11662 + popl_cfi %edi
11663 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11664 + pushl_cfi %esp
11665 + call sys_execve
11666 + add $4,%esp
11667 + CFI_ADJUST_CFA_OFFSET -4
11668 + GET_THREAD_INFO(%ebp)
11669 + test %eax,%eax
11670 + jz syscall_exit
11671 + add $PT_OLDSS+4,%esp
11672 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11673 + popl_cfi %ebp
11674 + ret
11675 + CFI_ENDPROC
11676 +ENDPROC(kernel_execve)
11677 +
11678 .macro FIXUP_ESPFIX_STACK
11679 /*
11680 * Switch back for ESPFIX stack to the normal zerobased stack
11681 @@ -761,8 +1001,15 @@ ENDPROC(ptregs_clone)
11682 * normal stack and adjusts ESP with the matching offset.
11683 */
11684 /* fixup the stack */
11685 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11686 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11687 +#ifdef CONFIG_SMP
11688 + movl PER_CPU_VAR(cpu_number), %ebx
11689 + shll $PAGE_SHIFT_asm, %ebx
11690 + addl $cpu_gdt_table, %ebx
11691 +#else
11692 + movl $cpu_gdt_table, %ebx
11693 +#endif
11694 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11695 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11696 shl $16, %eax
11697 addl %esp, %eax /* the adjusted stack pointer */
11698 pushl_cfi $__KERNEL_DS
11699 @@ -1213,7 +1460,6 @@ return_to_handler:
11700 jmp *%ecx
11701 #endif
11702
11703 -.section .rodata,"a"
11704 #include "syscall_table_32.S"
11705
11706 syscall_table_size=(.-sys_call_table)
11707 @@ -1259,9 +1505,12 @@ error_code:
11708 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11709 REG_TO_PTGS %ecx
11710 SET_KERNEL_GS %ecx
11711 - movl $(__USER_DS), %ecx
11712 + movl $(__KERNEL_DS), %ecx
11713 movl %ecx, %ds
11714 movl %ecx, %es
11715 +
11716 + pax_enter_kernel
11717 +
11718 TRACE_IRQS_OFF
11719 movl %esp,%eax # pt_regs pointer
11720 call *%edi
11721 @@ -1346,6 +1595,9 @@ nmi_stack_correct:
11722 xorl %edx,%edx # zero error code
11723 movl %esp,%eax # pt_regs pointer
11724 call do_nmi
11725 +
11726 + pax_exit_kernel
11727 +
11728 jmp restore_all_notrace
11729 CFI_ENDPROC
11730
11731 @@ -1382,6 +1634,9 @@ nmi_espfix_stack:
11732 FIXUP_ESPFIX_STACK # %eax == %esp
11733 xorl %edx,%edx # zero error code
11734 call do_nmi
11735 +
11736 + pax_exit_kernel
11737 +
11738 RESTORE_REGS
11739 lss 12+4(%esp), %esp # back to espfix stack
11740 CFI_ADJUST_CFA_OFFSET -24
11741 diff -urNp linux-3.0.3/arch/x86/kernel/entry_64.S linux-3.0.3/arch/x86/kernel/entry_64.S
11742 --- linux-3.0.3/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11743 +++ linux-3.0.3/arch/x86/kernel/entry_64.S 2011-08-23 21:48:14.000000000 -0400
11744 @@ -53,6 +53,7 @@
11745 #include <asm/paravirt.h>
11746 #include <asm/ftrace.h>
11747 #include <asm/percpu.h>
11748 +#include <asm/pgtable.h>
11749
11750 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11751 #include <linux/elf-em.h>
11752 @@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64)
11753 ENDPROC(native_usergs_sysret64)
11754 #endif /* CONFIG_PARAVIRT */
11755
11756 + .macro ljmpq sel, off
11757 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11758 + .byte 0x48; ljmp *1234f(%rip)
11759 + .pushsection .rodata
11760 + .align 16
11761 + 1234: .quad \off; .word \sel
11762 + .popsection
11763 +#else
11764 + pushq $\sel
11765 + pushq $\off
11766 + lretq
11767 +#endif
11768 + .endm
11769 +
11770 + .macro pax_enter_kernel
11771 +#ifdef CONFIG_PAX_KERNEXEC
11772 + call pax_enter_kernel
11773 +#endif
11774 + .endm
11775 +
11776 + .macro pax_exit_kernel
11777 +#ifdef CONFIG_PAX_KERNEXEC
11778 + call pax_exit_kernel
11779 +#endif
11780 + .endm
11781 +
11782 +#ifdef CONFIG_PAX_KERNEXEC
11783 +ENTRY(pax_enter_kernel)
11784 + pushq %rdi
11785 +
11786 +#ifdef CONFIG_PARAVIRT
11787 + PV_SAVE_REGS(CLBR_RDI)
11788 +#endif
11789 +
11790 + GET_CR0_INTO_RDI
11791 + bts $16,%rdi
11792 + jnc 1f
11793 + mov %cs,%edi
11794 + cmp $__KERNEL_CS,%edi
11795 + jz 3f
11796 + ljmpq __KERNEL_CS,3f
11797 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11798 +2: SET_RDI_INTO_CR0
11799 +3:
11800 +
11801 +#ifdef CONFIG_PARAVIRT
11802 + PV_RESTORE_REGS(CLBR_RDI)
11803 +#endif
11804 +
11805 + popq %rdi
11806 + retq
11807 +ENDPROC(pax_enter_kernel)
11808 +
11809 +ENTRY(pax_exit_kernel)
11810 + pushq %rdi
11811 +
11812 +#ifdef CONFIG_PARAVIRT
11813 + PV_SAVE_REGS(CLBR_RDI)
11814 +#endif
11815 +
11816 + mov %cs,%rdi
11817 + cmp $__KERNEXEC_KERNEL_CS,%edi
11818 + jnz 2f
11819 + GET_CR0_INTO_RDI
11820 + btr $16,%rdi
11821 + ljmpq __KERNEL_CS,1f
11822 +1: SET_RDI_INTO_CR0
11823 +2:
11824 +
11825 +#ifdef CONFIG_PARAVIRT
11826 + PV_RESTORE_REGS(CLBR_RDI);
11827 +#endif
11828 +
11829 + popq %rdi
11830 + retq
11831 +ENDPROC(pax_exit_kernel)
11832 +#endif
11833 +
11834 + .macro pax_enter_kernel_user
11835 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11836 + call pax_enter_kernel_user
11837 +#endif
11838 + .endm
11839 +
11840 + .macro pax_exit_kernel_user
11841 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11842 + call pax_exit_kernel_user
11843 +#endif
11844 +#ifdef CONFIG_PAX_RANDKSTACK
11845 + push %rax
11846 + call pax_randomize_kstack
11847 + pop %rax
11848 +#endif
11849 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11850 + call pax_erase_kstack
11851 +#endif
11852 + .endm
11853 +
11854 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11855 +ENTRY(pax_enter_kernel_user)
11856 + pushq %rdi
11857 + pushq %rbx
11858 +
11859 +#ifdef CONFIG_PARAVIRT
11860 + PV_SAVE_REGS(CLBR_RDI)
11861 +#endif
11862 +
11863 + GET_CR3_INTO_RDI
11864 + mov %rdi,%rbx
11865 + add $__START_KERNEL_map,%rbx
11866 + sub phys_base(%rip),%rbx
11867 +
11868 +#ifdef CONFIG_PARAVIRT
11869 + pushq %rdi
11870 + cmpl $0, pv_info+PARAVIRT_enabled
11871 + jz 1f
11872 + i = 0
11873 + .rept USER_PGD_PTRS
11874 + mov i*8(%rbx),%rsi
11875 + mov $0,%sil
11876 + lea i*8(%rbx),%rdi
11877 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11878 + i = i + 1
11879 + .endr
11880 + jmp 2f
11881 +1:
11882 +#endif
11883 +
11884 + i = 0
11885 + .rept USER_PGD_PTRS
11886 + movb $0,i*8(%rbx)
11887 + i = i + 1
11888 + .endr
11889 +
11890 +#ifdef CONFIG_PARAVIRT
11891 +2: popq %rdi
11892 +#endif
11893 + SET_RDI_INTO_CR3
11894 +
11895 +#ifdef CONFIG_PAX_KERNEXEC
11896 + GET_CR0_INTO_RDI
11897 + bts $16,%rdi
11898 + SET_RDI_INTO_CR0
11899 +#endif
11900 +
11901 +#ifdef CONFIG_PARAVIRT
11902 + PV_RESTORE_REGS(CLBR_RDI)
11903 +#endif
11904 +
11905 + popq %rbx
11906 + popq %rdi
11907 + retq
11908 +ENDPROC(pax_enter_kernel_user)
11909 +
11910 +ENTRY(pax_exit_kernel_user)
11911 + push %rdi
11912 +
11913 +#ifdef CONFIG_PARAVIRT
11914 + pushq %rbx
11915 + PV_SAVE_REGS(CLBR_RDI)
11916 +#endif
11917 +
11918 +#ifdef CONFIG_PAX_KERNEXEC
11919 + GET_CR0_INTO_RDI
11920 + btr $16,%rdi
11921 + SET_RDI_INTO_CR0
11922 +#endif
11923 +
11924 + GET_CR3_INTO_RDI
11925 + add $__START_KERNEL_map,%rdi
11926 + sub phys_base(%rip),%rdi
11927 +
11928 +#ifdef CONFIG_PARAVIRT
11929 + cmpl $0, pv_info+PARAVIRT_enabled
11930 + jz 1f
11931 + mov %rdi,%rbx
11932 + i = 0
11933 + .rept USER_PGD_PTRS
11934 + mov i*8(%rbx),%rsi
11935 + mov $0x67,%sil
11936 + lea i*8(%rbx),%rdi
11937 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11938 + i = i + 1
11939 + .endr
11940 + jmp 2f
11941 +1:
11942 +#endif
11943 +
11944 + i = 0
11945 + .rept USER_PGD_PTRS
11946 + movb $0x67,i*8(%rdi)
11947 + i = i + 1
11948 + .endr
11949 +
11950 +#ifdef CONFIG_PARAVIRT
11951 +2: PV_RESTORE_REGS(CLBR_RDI)
11952 + popq %rbx
11953 +#endif
11954 +
11955 + popq %rdi
11956 + retq
11957 +ENDPROC(pax_exit_kernel_user)
11958 +#endif
11959 +
11960 + .macro pax_erase_kstack
11961 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11962 + call pax_erase_kstack
11963 +#endif
11964 + .endm
11965 +
11966 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11967 +/*
11968 + * r10: thread_info
11969 + * rcx, rdx: can be clobbered
11970 + */
11971 +ENTRY(pax_erase_kstack)
11972 + pushq %rdi
11973 + pushq %rax
11974 +
11975 + GET_THREAD_INFO(%r10)
11976 + mov TI_lowest_stack(%r10), %rdi
11977 + mov $-0xBEEF, %rax
11978 + std
11979 +
11980 +1: mov %edi, %ecx
11981 + and $THREAD_SIZE_asm - 1, %ecx
11982 + shr $3, %ecx
11983 + repne scasq
11984 + jecxz 2f
11985 +
11986 + cmp $2*8, %ecx
11987 + jc 2f
11988 +
11989 + mov $2*8, %ecx
11990 + repe scasq
11991 + jecxz 2f
11992 + jne 1b
11993 +
11994 +2: cld
11995 + mov %esp, %ecx
11996 + sub %edi, %ecx
11997 + shr $3, %ecx
11998 + rep stosq
11999 +
12000 + mov TI_task_thread_sp0(%r10), %rdi
12001 + sub $256, %rdi
12002 + mov %rdi, TI_lowest_stack(%r10)
12003 +
12004 + popq %rax
12005 + popq %rdi
12006 + ret
12007 +ENDPROC(pax_erase_kstack)
12008 +#endif
12009
12010 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12011 #ifdef CONFIG_TRACE_IRQFLAGS
12012 @@ -318,7 +572,7 @@ ENTRY(save_args)
12013 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12014 movq_cfi rbp, 8 /* push %rbp */
12015 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12016 - testl $3, CS(%rdi)
12017 + testb $3, CS(%rdi)
12018 je 1f
12019 SWAPGS
12020 /*
12021 @@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
12022
12023 RESTORE_REST
12024
12025 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12026 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12027 je int_ret_from_sys_call
12028
12029 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12030 @@ -455,7 +709,7 @@ END(ret_from_fork)
12031 ENTRY(system_call)
12032 CFI_STARTPROC simple
12033 CFI_SIGNAL_FRAME
12034 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12035 + CFI_DEF_CFA rsp,0
12036 CFI_REGISTER rip,rcx
12037 /*CFI_REGISTER rflags,r11*/
12038 SWAPGS_UNSAFE_STACK
12039 @@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
12040
12041 movq %rsp,PER_CPU_VAR(old_rsp)
12042 movq PER_CPU_VAR(kernel_stack),%rsp
12043 + pax_enter_kernel_user
12044 /*
12045 * No need to follow this irqs off/on section - it's straight
12046 * and short:
12047 */
12048 ENABLE_INTERRUPTS(CLBR_NONE)
12049 - SAVE_ARGS 8,1
12050 + SAVE_ARGS 8*6,1
12051 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12052 movq %rcx,RIP-ARGOFFSET(%rsp)
12053 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12054 @@ -502,6 +757,7 @@ sysret_check:
12055 andl %edi,%edx
12056 jnz sysret_careful
12057 CFI_REMEMBER_STATE
12058 + pax_exit_kernel_user
12059 /*
12060 * sysretq will re-enable interrupts:
12061 */
12062 @@ -560,6 +816,9 @@ auditsys:
12063 movq %rax,%rsi /* 2nd arg: syscall number */
12064 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12065 call audit_syscall_entry
12066 +
12067 + pax_erase_kstack
12068 +
12069 LOAD_ARGS 0 /* reload call-clobbered registers */
12070 jmp system_call_fastpath
12071
12072 @@ -590,6 +849,9 @@ tracesys:
12073 FIXUP_TOP_OF_STACK %rdi
12074 movq %rsp,%rdi
12075 call syscall_trace_enter
12076 +
12077 + pax_erase_kstack
12078 +
12079 /*
12080 * Reload arg registers from stack in case ptrace changed them.
12081 * We don't reload %rax because syscall_trace_enter() returned
12082 @@ -611,7 +873,7 @@ tracesys:
12083 GLOBAL(int_ret_from_sys_call)
12084 DISABLE_INTERRUPTS(CLBR_NONE)
12085 TRACE_IRQS_OFF
12086 - testl $3,CS-ARGOFFSET(%rsp)
12087 + testb $3,CS-ARGOFFSET(%rsp)
12088 je retint_restore_args
12089 movl $_TIF_ALLWORK_MASK,%edi
12090 /* edi: mask to check */
12091 @@ -793,6 +1055,16 @@ END(interrupt)
12092 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12093 call save_args
12094 PARTIAL_FRAME 0
12095 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12096 + testb $3, CS(%rdi)
12097 + jnz 1f
12098 + pax_enter_kernel
12099 + jmp 2f
12100 +1: pax_enter_kernel_user
12101 +2:
12102 +#else
12103 + pax_enter_kernel
12104 +#endif
12105 call \func
12106 .endm
12107
12108 @@ -825,7 +1097,7 @@ ret_from_intr:
12109 CFI_ADJUST_CFA_OFFSET -8
12110 exit_intr:
12111 GET_THREAD_INFO(%rcx)
12112 - testl $3,CS-ARGOFFSET(%rsp)
12113 + testb $3,CS-ARGOFFSET(%rsp)
12114 je retint_kernel
12115
12116 /* Interrupt came from user space */
12117 @@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space
12118 * The iretq could re-enable interrupts:
12119 */
12120 DISABLE_INTERRUPTS(CLBR_ANY)
12121 + pax_exit_kernel_user
12122 TRACE_IRQS_IRETQ
12123 SWAPGS
12124 jmp restore_args
12125
12126 retint_restore_args: /* return to kernel space */
12127 DISABLE_INTERRUPTS(CLBR_ANY)
12128 + pax_exit_kernel
12129 /*
12130 * The iretq could re-enable interrupts:
12131 */
12132 @@ -1027,6 +1301,16 @@ ENTRY(\sym)
12133 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12134 call error_entry
12135 DEFAULT_FRAME 0
12136 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12137 + testb $3, CS(%rsp)
12138 + jnz 1f
12139 + pax_enter_kernel
12140 + jmp 2f
12141 +1: pax_enter_kernel_user
12142 +2:
12143 +#else
12144 + pax_enter_kernel
12145 +#endif
12146 movq %rsp,%rdi /* pt_regs pointer */
12147 xorl %esi,%esi /* no error code */
12148 call \do_sym
12149 @@ -1044,6 +1328,16 @@ ENTRY(\sym)
12150 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12151 call save_paranoid
12152 TRACE_IRQS_OFF
12153 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12154 + testb $3, CS(%rsp)
12155 + jnz 1f
12156 + pax_enter_kernel
12157 + jmp 2f
12158 +1: pax_enter_kernel_user
12159 +2:
12160 +#else
12161 + pax_enter_kernel
12162 +#endif
12163 movq %rsp,%rdi /* pt_regs pointer */
12164 xorl %esi,%esi /* no error code */
12165 call \do_sym
12166 @@ -1052,7 +1346,7 @@ ENTRY(\sym)
12167 END(\sym)
12168 .endm
12169
12170 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12171 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12172 .macro paranoidzeroentry_ist sym do_sym ist
12173 ENTRY(\sym)
12174 INTR_FRAME
12175 @@ -1062,8 +1356,24 @@ ENTRY(\sym)
12176 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12177 call save_paranoid
12178 TRACE_IRQS_OFF
12179 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12180 + testb $3, CS(%rsp)
12181 + jnz 1f
12182 + pax_enter_kernel
12183 + jmp 2f
12184 +1: pax_enter_kernel_user
12185 +2:
12186 +#else
12187 + pax_enter_kernel
12188 +#endif
12189 movq %rsp,%rdi /* pt_regs pointer */
12190 xorl %esi,%esi /* no error code */
12191 +#ifdef CONFIG_SMP
12192 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12193 + lea init_tss(%r12), %r12
12194 +#else
12195 + lea init_tss(%rip), %r12
12196 +#endif
12197 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12198 call \do_sym
12199 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12200 @@ -1080,6 +1390,16 @@ ENTRY(\sym)
12201 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12202 call error_entry
12203 DEFAULT_FRAME 0
12204 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12205 + testb $3, CS(%rsp)
12206 + jnz 1f
12207 + pax_enter_kernel
12208 + jmp 2f
12209 +1: pax_enter_kernel_user
12210 +2:
12211 +#else
12212 + pax_enter_kernel
12213 +#endif
12214 movq %rsp,%rdi /* pt_regs pointer */
12215 movq ORIG_RAX(%rsp),%rsi /* get error code */
12216 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12217 @@ -1099,6 +1419,16 @@ ENTRY(\sym)
12218 call save_paranoid
12219 DEFAULT_FRAME 0
12220 TRACE_IRQS_OFF
12221 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12222 + testb $3, CS(%rsp)
12223 + jnz 1f
12224 + pax_enter_kernel
12225 + jmp 2f
12226 +1: pax_enter_kernel_user
12227 +2:
12228 +#else
12229 + pax_enter_kernel
12230 +#endif
12231 movq %rsp,%rdi /* pt_regs pointer */
12232 movq ORIG_RAX(%rsp),%rsi /* get error code */
12233 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12234 @@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit)
12235 TRACE_IRQS_OFF
12236 testl %ebx,%ebx /* swapgs needed? */
12237 jnz paranoid_restore
12238 - testl $3,CS(%rsp)
12239 + testb $3,CS(%rsp)
12240 jnz paranoid_userspace
12241 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12242 + pax_exit_kernel
12243 + TRACE_IRQS_IRETQ 0
12244 + SWAPGS_UNSAFE_STACK
12245 + RESTORE_ALL 8
12246 + jmp irq_return
12247 +#endif
12248 paranoid_swapgs:
12249 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12250 + pax_exit_kernel_user
12251 +#else
12252 + pax_exit_kernel
12253 +#endif
12254 TRACE_IRQS_IRETQ 0
12255 SWAPGS_UNSAFE_STACK
12256 RESTORE_ALL 8
12257 jmp irq_return
12258 paranoid_restore:
12259 + pax_exit_kernel
12260 TRACE_IRQS_IRETQ 0
12261 RESTORE_ALL 8
12262 jmp irq_return
12263 @@ -1426,7 +1769,7 @@ ENTRY(error_entry)
12264 movq_cfi r14, R14+8
12265 movq_cfi r15, R15+8
12266 xorl %ebx,%ebx
12267 - testl $3,CS+8(%rsp)
12268 + testb $3,CS+8(%rsp)
12269 je error_kernelspace
12270 error_swapgs:
12271 SWAPGS
12272 @@ -1490,6 +1833,16 @@ ENTRY(nmi)
12273 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12274 call save_paranoid
12275 DEFAULT_FRAME 0
12276 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12277 + testb $3, CS(%rsp)
12278 + jnz 1f
12279 + pax_enter_kernel
12280 + jmp 2f
12281 +1: pax_enter_kernel_user
12282 +2:
12283 +#else
12284 + pax_enter_kernel
12285 +#endif
12286 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12287 movq %rsp,%rdi
12288 movq $-1,%rsi
12289 @@ -1500,11 +1853,25 @@ ENTRY(nmi)
12290 DISABLE_INTERRUPTS(CLBR_NONE)
12291 testl %ebx,%ebx /* swapgs needed? */
12292 jnz nmi_restore
12293 - testl $3,CS(%rsp)
12294 + testb $3,CS(%rsp)
12295 jnz nmi_userspace
12296 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12297 + pax_exit_kernel
12298 + SWAPGS_UNSAFE_STACK
12299 + RESTORE_ALL 8
12300 + jmp irq_return
12301 +#endif
12302 nmi_swapgs:
12303 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12304 + pax_exit_kernel_user
12305 +#else
12306 + pax_exit_kernel
12307 +#endif
12308 SWAPGS_UNSAFE_STACK
12309 + RESTORE_ALL 8
12310 + jmp irq_return
12311 nmi_restore:
12312 + pax_exit_kernel
12313 RESTORE_ALL 8
12314 jmp irq_return
12315 nmi_userspace:
12316 diff -urNp linux-3.0.3/arch/x86/kernel/ftrace.c linux-3.0.3/arch/x86/kernel/ftrace.c
12317 --- linux-3.0.3/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12318 +++ linux-3.0.3/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12319 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12320 static const void *mod_code_newcode; /* holds the text to write to the IP */
12321
12322 static unsigned nmi_wait_count;
12323 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12324 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12325
12326 int ftrace_arch_read_dyn_info(char *buf, int size)
12327 {
12328 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12329
12330 r = snprintf(buf, size, "%u %u",
12331 nmi_wait_count,
12332 - atomic_read(&nmi_update_count));
12333 + atomic_read_unchecked(&nmi_update_count));
12334 return r;
12335 }
12336
12337 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12338
12339 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12340 smp_rmb();
12341 + pax_open_kernel();
12342 ftrace_mod_code();
12343 - atomic_inc(&nmi_update_count);
12344 + pax_close_kernel();
12345 + atomic_inc_unchecked(&nmi_update_count);
12346 }
12347 /* Must have previous changes seen before executions */
12348 smp_mb();
12349 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12350 {
12351 unsigned char replaced[MCOUNT_INSN_SIZE];
12352
12353 + ip = ktla_ktva(ip);
12354 +
12355 /*
12356 * Note: Due to modules and __init, code can
12357 * disappear and change, we need to protect against faulting
12358 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12359 unsigned char old[MCOUNT_INSN_SIZE], *new;
12360 int ret;
12361
12362 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12363 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12364 new = ftrace_call_replace(ip, (unsigned long)func);
12365 ret = ftrace_modify_code(ip, old, new);
12366
12367 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12368 {
12369 unsigned char code[MCOUNT_INSN_SIZE];
12370
12371 + ip = ktla_ktva(ip);
12372 +
12373 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12374 return -EFAULT;
12375
12376 diff -urNp linux-3.0.3/arch/x86/kernel/head32.c linux-3.0.3/arch/x86/kernel/head32.c
12377 --- linux-3.0.3/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12378 +++ linux-3.0.3/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12379 @@ -19,6 +19,7 @@
12380 #include <asm/io_apic.h>
12381 #include <asm/bios_ebda.h>
12382 #include <asm/tlbflush.h>
12383 +#include <asm/boot.h>
12384
12385 static void __init i386_default_early_setup(void)
12386 {
12387 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12388 {
12389 memblock_init();
12390
12391 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12392 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12393
12394 #ifdef CONFIG_BLK_DEV_INITRD
12395 /* Reserve INITRD */
12396 diff -urNp linux-3.0.3/arch/x86/kernel/head_32.S linux-3.0.3/arch/x86/kernel/head_32.S
12397 --- linux-3.0.3/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12398 +++ linux-3.0.3/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12399 @@ -25,6 +25,12 @@
12400 /* Physical address */
12401 #define pa(X) ((X) - __PAGE_OFFSET)
12402
12403 +#ifdef CONFIG_PAX_KERNEXEC
12404 +#define ta(X) (X)
12405 +#else
12406 +#define ta(X) ((X) - __PAGE_OFFSET)
12407 +#endif
12408 +
12409 /*
12410 * References to members of the new_cpu_data structure.
12411 */
12412 @@ -54,11 +60,7 @@
12413 * and small than max_low_pfn, otherwise will waste some page table entries
12414 */
12415
12416 -#if PTRS_PER_PMD > 1
12417 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12418 -#else
12419 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12420 -#endif
12421 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12422
12423 /* Number of possible pages in the lowmem region */
12424 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12425 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12426 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12427
12428 /*
12429 + * Real beginning of normal "text" segment
12430 + */
12431 +ENTRY(stext)
12432 +ENTRY(_stext)
12433 +
12434 +/*
12435 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12436 * %esi points to the real-mode code as a 32-bit pointer.
12437 * CS and DS must be 4 GB flat segments, but we don't depend on
12438 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12439 * can.
12440 */
12441 __HEAD
12442 +
12443 +#ifdef CONFIG_PAX_KERNEXEC
12444 + jmp startup_32
12445 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12446 +.fill PAGE_SIZE-5,1,0xcc
12447 +#endif
12448 +
12449 ENTRY(startup_32)
12450 movl pa(stack_start),%ecx
12451
12452 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12453 2:
12454 leal -__PAGE_OFFSET(%ecx),%esp
12455
12456 +#ifdef CONFIG_SMP
12457 + movl $pa(cpu_gdt_table),%edi
12458 + movl $__per_cpu_load,%eax
12459 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12460 + rorl $16,%eax
12461 + movb %al,__KERNEL_PERCPU + 4(%edi)
12462 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12463 + movl $__per_cpu_end - 1,%eax
12464 + subl $__per_cpu_start,%eax
12465 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12466 +#endif
12467 +
12468 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12469 + movl $NR_CPUS,%ecx
12470 + movl $pa(cpu_gdt_table),%edi
12471 +1:
12472 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12473 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12474 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12475 + addl $PAGE_SIZE_asm,%edi
12476 + loop 1b
12477 +#endif
12478 +
12479 +#ifdef CONFIG_PAX_KERNEXEC
12480 + movl $pa(boot_gdt),%edi
12481 + movl $__LOAD_PHYSICAL_ADDR,%eax
12482 + movw %ax,__BOOT_CS + 2(%edi)
12483 + rorl $16,%eax
12484 + movb %al,__BOOT_CS + 4(%edi)
12485 + movb %ah,__BOOT_CS + 7(%edi)
12486 + rorl $16,%eax
12487 +
12488 + ljmp $(__BOOT_CS),$1f
12489 +1:
12490 +
12491 + movl $NR_CPUS,%ecx
12492 + movl $pa(cpu_gdt_table),%edi
12493 + addl $__PAGE_OFFSET,%eax
12494 +1:
12495 + movw %ax,__KERNEL_CS + 2(%edi)
12496 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12497 + rorl $16,%eax
12498 + movb %al,__KERNEL_CS + 4(%edi)
12499 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12500 + movb %ah,__KERNEL_CS + 7(%edi)
12501 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12502 + rorl $16,%eax
12503 + addl $PAGE_SIZE_asm,%edi
12504 + loop 1b
12505 +#endif
12506 +
12507 /*
12508 * Clear BSS first so that there are no surprises...
12509 */
12510 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12511 movl %eax, pa(max_pfn_mapped)
12512
12513 /* Do early initialization of the fixmap area */
12514 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12515 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12516 +#ifdef CONFIG_COMPAT_VDSO
12517 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12518 +#else
12519 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12520 +#endif
12521 #else /* Not PAE */
12522
12523 page_pde_offset = (__PAGE_OFFSET >> 20);
12524 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12525 movl %eax, pa(max_pfn_mapped)
12526
12527 /* Do early initialization of the fixmap area */
12528 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12529 - movl %eax,pa(initial_page_table+0xffc)
12530 +#ifdef CONFIG_COMPAT_VDSO
12531 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12532 +#else
12533 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12534 +#endif
12535 #endif
12536
12537 #ifdef CONFIG_PARAVIRT
12538 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12539 cmpl $num_subarch_entries, %eax
12540 jae bad_subarch
12541
12542 - movl pa(subarch_entries)(,%eax,4), %eax
12543 - subl $__PAGE_OFFSET, %eax
12544 - jmp *%eax
12545 + jmp *pa(subarch_entries)(,%eax,4)
12546
12547 bad_subarch:
12548 WEAK(lguest_entry)
12549 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12550 __INITDATA
12551
12552 subarch_entries:
12553 - .long default_entry /* normal x86/PC */
12554 - .long lguest_entry /* lguest hypervisor */
12555 - .long xen_entry /* Xen hypervisor */
12556 - .long default_entry /* Moorestown MID */
12557 + .long ta(default_entry) /* normal x86/PC */
12558 + .long ta(lguest_entry) /* lguest hypervisor */
12559 + .long ta(xen_entry) /* Xen hypervisor */
12560 + .long ta(default_entry) /* Moorestown MID */
12561 num_subarch_entries = (. - subarch_entries) / 4
12562 .previous
12563 #else
12564 @@ -312,6 +382,7 @@ default_entry:
12565 orl %edx,%eax
12566 movl %eax,%cr4
12567
12568 +#ifdef CONFIG_X86_PAE
12569 testb $X86_CR4_PAE, %al # check if PAE is enabled
12570 jz 6f
12571
12572 @@ -340,6 +411,9 @@ default_entry:
12573 /* Make changes effective */
12574 wrmsr
12575
12576 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12577 +#endif
12578 +
12579 6:
12580
12581 /*
12582 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12583 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12584 movl %eax,%ss # after changing gdt.
12585
12586 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12587 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12588 movl %eax,%ds
12589 movl %eax,%es
12590
12591 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12592 */
12593 cmpb $0,ready
12594 jne 1f
12595 - movl $gdt_page,%eax
12596 + movl $cpu_gdt_table,%eax
12597 movl $stack_canary,%ecx
12598 +#ifdef CONFIG_SMP
12599 + addl $__per_cpu_load,%ecx
12600 +#endif
12601 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12602 shrl $16, %ecx
12603 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12604 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12605 1:
12606 -#endif
12607 movl $(__KERNEL_STACK_CANARY),%eax
12608 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12609 + movl $(__USER_DS),%eax
12610 +#else
12611 + xorl %eax,%eax
12612 +#endif
12613 movl %eax,%gs
12614
12615 xorl %eax,%eax # Clear LDT
12616 @@ -558,22 +639,22 @@ early_page_fault:
12617 jmp early_fault
12618
12619 early_fault:
12620 - cld
12621 #ifdef CONFIG_PRINTK
12622 + cmpl $1,%ss:early_recursion_flag
12623 + je hlt_loop
12624 + incl %ss:early_recursion_flag
12625 + cld
12626 pusha
12627 movl $(__KERNEL_DS),%eax
12628 movl %eax,%ds
12629 movl %eax,%es
12630 - cmpl $2,early_recursion_flag
12631 - je hlt_loop
12632 - incl early_recursion_flag
12633 movl %cr2,%eax
12634 pushl %eax
12635 pushl %edx /* trapno */
12636 pushl $fault_msg
12637 call printk
12638 +; call dump_stack
12639 #endif
12640 - call dump_stack
12641 hlt_loop:
12642 hlt
12643 jmp hlt_loop
12644 @@ -581,8 +662,11 @@ hlt_loop:
12645 /* This is the default interrupt "handler" :-) */
12646 ALIGN
12647 ignore_int:
12648 - cld
12649 #ifdef CONFIG_PRINTK
12650 + cmpl $2,%ss:early_recursion_flag
12651 + je hlt_loop
12652 + incl %ss:early_recursion_flag
12653 + cld
12654 pushl %eax
12655 pushl %ecx
12656 pushl %edx
12657 @@ -591,9 +675,6 @@ ignore_int:
12658 movl $(__KERNEL_DS),%eax
12659 movl %eax,%ds
12660 movl %eax,%es
12661 - cmpl $2,early_recursion_flag
12662 - je hlt_loop
12663 - incl early_recursion_flag
12664 pushl 16(%esp)
12665 pushl 24(%esp)
12666 pushl 32(%esp)
12667 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12668 /*
12669 * BSS section
12670 */
12671 -__PAGE_ALIGNED_BSS
12672 - .align PAGE_SIZE
12673 #ifdef CONFIG_X86_PAE
12674 +.section .initial_pg_pmd,"a",@progbits
12675 initial_pg_pmd:
12676 .fill 1024*KPMDS,4,0
12677 #else
12678 +.section .initial_page_table,"a",@progbits
12679 ENTRY(initial_page_table)
12680 .fill 1024,4,0
12681 #endif
12682 +.section .initial_pg_fixmap,"a",@progbits
12683 initial_pg_fixmap:
12684 .fill 1024,4,0
12685 +.section .empty_zero_page,"a",@progbits
12686 ENTRY(empty_zero_page)
12687 .fill 4096,1,0
12688 +.section .swapper_pg_dir,"a",@progbits
12689 ENTRY(swapper_pg_dir)
12690 +#ifdef CONFIG_X86_PAE
12691 + .fill 4,8,0
12692 +#else
12693 .fill 1024,4,0
12694 +#endif
12695 +
12696 +/*
12697 + * The IDT has to be page-aligned to simplify the Pentium
12698 + * F0 0F bug workaround.. We have a special link segment
12699 + * for this.
12700 + */
12701 +.section .idt,"a",@progbits
12702 +ENTRY(idt_table)
12703 + .fill 256,8,0
12704
12705 /*
12706 * This starts the data section.
12707 */
12708 #ifdef CONFIG_X86_PAE
12709 -__PAGE_ALIGNED_DATA
12710 - /* Page-aligned for the benefit of paravirt? */
12711 - .align PAGE_SIZE
12712 +.section .initial_page_table,"a",@progbits
12713 ENTRY(initial_page_table)
12714 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12715 # if KPMDS == 3
12716 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12717 # error "Kernel PMDs should be 1, 2 or 3"
12718 # endif
12719 .align PAGE_SIZE /* needs to be page-sized too */
12720 +
12721 +#ifdef CONFIG_PAX_PER_CPU_PGD
12722 +ENTRY(cpu_pgd)
12723 + .rept NR_CPUS
12724 + .fill 4,8,0
12725 + .endr
12726 +#endif
12727 +
12728 #endif
12729
12730 .data
12731 .balign 4
12732 ENTRY(stack_start)
12733 - .long init_thread_union+THREAD_SIZE
12734 + .long init_thread_union+THREAD_SIZE-8
12735 +
12736 +ready: .byte 0
12737
12738 +.section .rodata,"a",@progbits
12739 early_recursion_flag:
12740 .long 0
12741
12742 -ready: .byte 0
12743 -
12744 int_msg:
12745 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12746
12747 @@ -707,7 +811,7 @@ fault_msg:
12748 .word 0 # 32 bit align gdt_desc.address
12749 boot_gdt_descr:
12750 .word __BOOT_DS+7
12751 - .long boot_gdt - __PAGE_OFFSET
12752 + .long pa(boot_gdt)
12753
12754 .word 0 # 32-bit align idt_desc.address
12755 idt_descr:
12756 @@ -718,7 +822,7 @@ idt_descr:
12757 .word 0 # 32 bit align gdt_desc.address
12758 ENTRY(early_gdt_descr)
12759 .word GDT_ENTRIES*8-1
12760 - .long gdt_page /* Overwritten for secondary CPUs */
12761 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12762
12763 /*
12764 * The boot_gdt must mirror the equivalent in setup.S and is
12765 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12766 .align L1_CACHE_BYTES
12767 ENTRY(boot_gdt)
12768 .fill GDT_ENTRY_BOOT_CS,8,0
12769 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12770 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12771 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12772 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12773 +
12774 + .align PAGE_SIZE_asm
12775 +ENTRY(cpu_gdt_table)
12776 + .rept NR_CPUS
12777 + .quad 0x0000000000000000 /* NULL descriptor */
12778 + .quad 0x0000000000000000 /* 0x0b reserved */
12779 + .quad 0x0000000000000000 /* 0x13 reserved */
12780 + .quad 0x0000000000000000 /* 0x1b reserved */
12781 +
12782 +#ifdef CONFIG_PAX_KERNEXEC
12783 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12784 +#else
12785 + .quad 0x0000000000000000 /* 0x20 unused */
12786 +#endif
12787 +
12788 + .quad 0x0000000000000000 /* 0x28 unused */
12789 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12790 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12791 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12792 + .quad 0x0000000000000000 /* 0x4b reserved */
12793 + .quad 0x0000000000000000 /* 0x53 reserved */
12794 + .quad 0x0000000000000000 /* 0x5b reserved */
12795 +
12796 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12797 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12798 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12799 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12800 +
12801 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12802 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12803 +
12804 + /*
12805 + * Segments used for calling PnP BIOS have byte granularity.
12806 + * The code segments and data segments have fixed 64k limits,
12807 + * the transfer segment sizes are set at run time.
12808 + */
12809 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12810 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12811 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12812 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12813 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12814 +
12815 + /*
12816 + * The APM segments have byte granularity and their bases
12817 + * are set at run time. All have 64k limits.
12818 + */
12819 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12820 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12821 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12822 +
12823 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12824 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12825 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12826 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12827 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12828 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12829 +
12830 + /* Be sure this is zeroed to avoid false validations in Xen */
12831 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12832 + .endr
12833 diff -urNp linux-3.0.3/arch/x86/kernel/head_64.S linux-3.0.3/arch/x86/kernel/head_64.S
12834 --- linux-3.0.3/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12835 +++ linux-3.0.3/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12836 @@ -19,6 +19,7 @@
12837 #include <asm/cache.h>
12838 #include <asm/processor-flags.h>
12839 #include <asm/percpu.h>
12840 +#include <asm/cpufeature.h>
12841
12842 #ifdef CONFIG_PARAVIRT
12843 #include <asm/asm-offsets.h>
12844 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12845 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12846 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12847 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12848 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12849 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12850 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12851 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12852
12853 .text
12854 __HEAD
12855 @@ -85,35 +90,22 @@ startup_64:
12856 */
12857 addq %rbp, init_level4_pgt + 0(%rip)
12858 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12859 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12860 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12861 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12862
12863 addq %rbp, level3_ident_pgt + 0(%rip)
12864 +#ifndef CONFIG_XEN
12865 + addq %rbp, level3_ident_pgt + 8(%rip)
12866 +#endif
12867
12868 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12869 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12870 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12871
12872 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12873 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12874 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12875
12876 - /* Add an Identity mapping if I am above 1G */
12877 - leaq _text(%rip), %rdi
12878 - andq $PMD_PAGE_MASK, %rdi
12879 -
12880 - movq %rdi, %rax
12881 - shrq $PUD_SHIFT, %rax
12882 - andq $(PTRS_PER_PUD - 1), %rax
12883 - jz ident_complete
12884 -
12885 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12886 - leaq level3_ident_pgt(%rip), %rbx
12887 - movq %rdx, 0(%rbx, %rax, 8)
12888 -
12889 - movq %rdi, %rax
12890 - shrq $PMD_SHIFT, %rax
12891 - andq $(PTRS_PER_PMD - 1), %rax
12892 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12893 - leaq level2_spare_pgt(%rip), %rbx
12894 - movq %rdx, 0(%rbx, %rax, 8)
12895 -ident_complete:
12896 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12897 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12898
12899 /*
12900 * Fixup the kernel text+data virtual addresses. Note that
12901 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12902 * after the boot processor executes this code.
12903 */
12904
12905 - /* Enable PAE mode and PGE */
12906 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12907 + /* Enable PAE mode and PSE/PGE */
12908 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12909 movq %rax, %cr4
12910
12911 /* Setup early boot stage 4 level pagetables. */
12912 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12913 movl $MSR_EFER, %ecx
12914 rdmsr
12915 btsl $_EFER_SCE, %eax /* Enable System Call */
12916 - btl $20,%edi /* No Execute supported? */
12917 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12918 jnc 1f
12919 btsl $_EFER_NX, %eax
12920 + leaq init_level4_pgt(%rip), %rdi
12921 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12922 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12923 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12924 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12925 1: wrmsr /* Make changes effective */
12926
12927 /* Setup cr0 */
12928 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12929 bad_address:
12930 jmp bad_address
12931
12932 - .section ".init.text","ax"
12933 + __INIT
12934 #ifdef CONFIG_EARLY_PRINTK
12935 .globl early_idt_handlers
12936 early_idt_handlers:
12937 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12938 #endif /* EARLY_PRINTK */
12939 1: hlt
12940 jmp 1b
12941 + .previous
12942
12943 #ifdef CONFIG_EARLY_PRINTK
12944 + __INITDATA
12945 early_recursion_flag:
12946 .long 0
12947 + .previous
12948
12949 + .section .rodata,"a",@progbits
12950 early_idt_msg:
12951 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12952 early_idt_ripmsg:
12953 .asciz "RIP %s\n"
12954 -#endif /* CONFIG_EARLY_PRINTK */
12955 .previous
12956 +#endif /* CONFIG_EARLY_PRINTK */
12957
12958 + .section .rodata,"a",@progbits
12959 #define NEXT_PAGE(name) \
12960 .balign PAGE_SIZE; \
12961 ENTRY(name)
12962 @@ -338,7 +340,6 @@ ENTRY(name)
12963 i = i + 1 ; \
12964 .endr
12965
12966 - .data
12967 /*
12968 * This default setting generates an ident mapping at address 0x100000
12969 * and a mapping for the kernel that precisely maps virtual address
12970 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12971 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12972 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12973 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12974 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
12975 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12976 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12977 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12978 .org init_level4_pgt + L4_START_KERNEL*8, 0
12979 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12980 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12981
12982 +#ifdef CONFIG_PAX_PER_CPU_PGD
12983 +NEXT_PAGE(cpu_pgd)
12984 + .rept NR_CPUS
12985 + .fill 512,8,0
12986 + .endr
12987 +#endif
12988 +
12989 NEXT_PAGE(level3_ident_pgt)
12990 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12991 +#ifdef CONFIG_XEN
12992 .fill 511,8,0
12993 +#else
12994 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
12995 + .fill 510,8,0
12996 +#endif
12997 +
12998 +NEXT_PAGE(level3_vmalloc_pgt)
12999 + .fill 512,8,0
13000 +
13001 +NEXT_PAGE(level3_vmemmap_pgt)
13002 + .fill L3_VMEMMAP_START,8,0
13003 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13004
13005 NEXT_PAGE(level3_kernel_pgt)
13006 .fill L3_START_KERNEL,8,0
13007 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13008 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13009 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13010
13011 +NEXT_PAGE(level2_vmemmap_pgt)
13012 + .fill 512,8,0
13013 +
13014 NEXT_PAGE(level2_fixmap_pgt)
13015 - .fill 506,8,0
13016 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13017 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13018 - .fill 5,8,0
13019 + .fill 507,8,0
13020 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13021 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13022 + .fill 4,8,0
13023
13024 -NEXT_PAGE(level1_fixmap_pgt)
13025 +NEXT_PAGE(level1_vsyscall_pgt)
13026 .fill 512,8,0
13027
13028 -NEXT_PAGE(level2_ident_pgt)
13029 - /* Since I easily can, map the first 1G.
13030 + /* Since I easily can, map the first 2G.
13031 * Don't set NX because code runs from these pages.
13032 */
13033 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13034 +NEXT_PAGE(level2_ident_pgt)
13035 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13036
13037 NEXT_PAGE(level2_kernel_pgt)
13038 /*
13039 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13040 * If you want to increase this then increase MODULES_VADDR
13041 * too.)
13042 */
13043 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13044 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13045 -
13046 -NEXT_PAGE(level2_spare_pgt)
13047 - .fill 512, 8, 0
13048 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13049
13050 #undef PMDS
13051 #undef NEXT_PAGE
13052
13053 - .data
13054 + .align PAGE_SIZE
13055 +ENTRY(cpu_gdt_table)
13056 + .rept NR_CPUS
13057 + .quad 0x0000000000000000 /* NULL descriptor */
13058 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13059 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13060 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13061 + .quad 0x00cffb000000ffff /* __USER32_CS */
13062 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13063 + .quad 0x00affb000000ffff /* __USER_CS */
13064 +
13065 +#ifdef CONFIG_PAX_KERNEXEC
13066 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13067 +#else
13068 + .quad 0x0 /* unused */
13069 +#endif
13070 +
13071 + .quad 0,0 /* TSS */
13072 + .quad 0,0 /* LDT */
13073 + .quad 0,0,0 /* three TLS descriptors */
13074 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13075 + /* asm/segment.h:GDT_ENTRIES must match this */
13076 +
13077 + /* zero the remaining page */
13078 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13079 + .endr
13080 +
13081 .align 16
13082 .globl early_gdt_descr
13083 early_gdt_descr:
13084 .word GDT_ENTRIES*8-1
13085 early_gdt_descr_base:
13086 - .quad INIT_PER_CPU_VAR(gdt_page)
13087 + .quad cpu_gdt_table
13088
13089 ENTRY(phys_base)
13090 /* This must match the first entry in level2_kernel_pgt */
13091 .quad 0x0000000000000000
13092
13093 #include "../../x86/xen/xen-head.S"
13094 -
13095 - .section .bss, "aw", @nobits
13096 +
13097 + .section .rodata,"a",@progbits
13098 .align L1_CACHE_BYTES
13099 ENTRY(idt_table)
13100 - .skip IDT_ENTRIES * 16
13101 + .fill 512,8,0
13102
13103 __PAGE_ALIGNED_BSS
13104 .align PAGE_SIZE
13105 diff -urNp linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c
13106 --- linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13107 +++ linux-3.0.3/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13108 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13109 EXPORT_SYMBOL(cmpxchg8b_emu);
13110 #endif
13111
13112 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13113 +
13114 /* Networking helper routines. */
13115 EXPORT_SYMBOL(csum_partial_copy_generic);
13116 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13117 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13118
13119 EXPORT_SYMBOL(__get_user_1);
13120 EXPORT_SYMBOL(__get_user_2);
13121 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13122
13123 EXPORT_SYMBOL(csum_partial);
13124 EXPORT_SYMBOL(empty_zero_page);
13125 +
13126 +#ifdef CONFIG_PAX_KERNEXEC
13127 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13128 +#endif
13129 diff -urNp linux-3.0.3/arch/x86/kernel/i8259.c linux-3.0.3/arch/x86/kernel/i8259.c
13130 --- linux-3.0.3/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13131 +++ linux-3.0.3/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13132 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13133 "spurious 8259A interrupt: IRQ%d.\n", irq);
13134 spurious_irq_mask |= irqmask;
13135 }
13136 - atomic_inc(&irq_err_count);
13137 + atomic_inc_unchecked(&irq_err_count);
13138 /*
13139 * Theoretically we do not have to handle this IRQ,
13140 * but in Linux this does not cause problems and is
13141 diff -urNp linux-3.0.3/arch/x86/kernel/init_task.c linux-3.0.3/arch/x86/kernel/init_task.c
13142 --- linux-3.0.3/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13143 +++ linux-3.0.3/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13144 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13145 * way process stacks are handled. This is done by having a special
13146 * "init_task" linker map entry..
13147 */
13148 -union thread_union init_thread_union __init_task_data =
13149 - { INIT_THREAD_INFO(init_task) };
13150 +union thread_union init_thread_union __init_task_data;
13151
13152 /*
13153 * Initial task structure.
13154 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13155 * section. Since TSS's are completely CPU-local, we want them
13156 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13157 */
13158 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13159 -
13160 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13161 +EXPORT_SYMBOL(init_tss);
13162 diff -urNp linux-3.0.3/arch/x86/kernel/ioport.c linux-3.0.3/arch/x86/kernel/ioport.c
13163 --- linux-3.0.3/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13164 +++ linux-3.0.3/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13165 @@ -6,6 +6,7 @@
13166 #include <linux/sched.h>
13167 #include <linux/kernel.h>
13168 #include <linux/capability.h>
13169 +#include <linux/security.h>
13170 #include <linux/errno.h>
13171 #include <linux/types.h>
13172 #include <linux/ioport.h>
13173 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13174
13175 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13176 return -EINVAL;
13177 +#ifdef CONFIG_GRKERNSEC_IO
13178 + if (turn_on && grsec_disable_privio) {
13179 + gr_handle_ioperm();
13180 + return -EPERM;
13181 + }
13182 +#endif
13183 if (turn_on && !capable(CAP_SYS_RAWIO))
13184 return -EPERM;
13185
13186 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13187 * because the ->io_bitmap_max value must match the bitmap
13188 * contents:
13189 */
13190 - tss = &per_cpu(init_tss, get_cpu());
13191 + tss = init_tss + get_cpu();
13192
13193 if (turn_on)
13194 bitmap_clear(t->io_bitmap_ptr, from, num);
13195 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13196 return -EINVAL;
13197 /* Trying to gain more privileges? */
13198 if (level > old) {
13199 +#ifdef CONFIG_GRKERNSEC_IO
13200 + if (grsec_disable_privio) {
13201 + gr_handle_iopl();
13202 + return -EPERM;
13203 + }
13204 +#endif
13205 if (!capable(CAP_SYS_RAWIO))
13206 return -EPERM;
13207 }
13208 diff -urNp linux-3.0.3/arch/x86/kernel/irq_32.c linux-3.0.3/arch/x86/kernel/irq_32.c
13209 --- linux-3.0.3/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13210 +++ linux-3.0.3/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13211 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13212 __asm__ __volatile__("andl %%esp,%0" :
13213 "=r" (sp) : "0" (THREAD_SIZE - 1));
13214
13215 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13216 + return sp < STACK_WARN;
13217 }
13218
13219 static void print_stack_overflow(void)
13220 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13221 * per-CPU IRQ handling contexts (thread information and stack)
13222 */
13223 union irq_ctx {
13224 - struct thread_info tinfo;
13225 - u32 stack[THREAD_SIZE/sizeof(u32)];
13226 + unsigned long previous_esp;
13227 + u32 stack[THREAD_SIZE/sizeof(u32)];
13228 } __attribute__((aligned(THREAD_SIZE)));
13229
13230 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13231 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13232 static inline int
13233 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13234 {
13235 - union irq_ctx *curctx, *irqctx;
13236 + union irq_ctx *irqctx;
13237 u32 *isp, arg1, arg2;
13238
13239 - curctx = (union irq_ctx *) current_thread_info();
13240 irqctx = __this_cpu_read(hardirq_ctx);
13241
13242 /*
13243 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13244 * handler) we can't do that and just have to keep using the
13245 * current stack (which is the irq stack already after all)
13246 */
13247 - if (unlikely(curctx == irqctx))
13248 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13249 return 0;
13250
13251 /* build the stack frame on the IRQ stack */
13252 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13253 - irqctx->tinfo.task = curctx->tinfo.task;
13254 - irqctx->tinfo.previous_esp = current_stack_pointer;
13255 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13256 + irqctx->previous_esp = current_stack_pointer;
13257
13258 - /*
13259 - * Copy the softirq bits in preempt_count so that the
13260 - * softirq checks work in the hardirq context.
13261 - */
13262 - irqctx->tinfo.preempt_count =
13263 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13264 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13265 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13266 + __set_fs(MAKE_MM_SEG(0));
13267 +#endif
13268
13269 if (unlikely(overflow))
13270 call_on_stack(print_stack_overflow, isp);
13271 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13272 : "0" (irq), "1" (desc), "2" (isp),
13273 "D" (desc->handle_irq)
13274 : "memory", "cc", "ecx");
13275 +
13276 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13277 + __set_fs(current_thread_info()->addr_limit);
13278 +#endif
13279 +
13280 return 1;
13281 }
13282
13283 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13284 */
13285 void __cpuinit irq_ctx_init(int cpu)
13286 {
13287 - union irq_ctx *irqctx;
13288 -
13289 if (per_cpu(hardirq_ctx, cpu))
13290 return;
13291
13292 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13293 - THREAD_FLAGS,
13294 - THREAD_ORDER));
13295 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13296 - irqctx->tinfo.cpu = cpu;
13297 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13298 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13299 -
13300 - per_cpu(hardirq_ctx, cpu) = irqctx;
13301 -
13302 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13303 - THREAD_FLAGS,
13304 - THREAD_ORDER));
13305 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13306 - irqctx->tinfo.cpu = cpu;
13307 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13308 -
13309 - per_cpu(softirq_ctx, cpu) = irqctx;
13310 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13311 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13312
13313 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13314 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13315 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13316 asmlinkage void do_softirq(void)
13317 {
13318 unsigned long flags;
13319 - struct thread_info *curctx;
13320 union irq_ctx *irqctx;
13321 u32 *isp;
13322
13323 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13324 local_irq_save(flags);
13325
13326 if (local_softirq_pending()) {
13327 - curctx = current_thread_info();
13328 irqctx = __this_cpu_read(softirq_ctx);
13329 - irqctx->tinfo.task = curctx->task;
13330 - irqctx->tinfo.previous_esp = current_stack_pointer;
13331 + irqctx->previous_esp = current_stack_pointer;
13332
13333 /* build the stack frame on the softirq stack */
13334 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13335 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13336 +
13337 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13338 + __set_fs(MAKE_MM_SEG(0));
13339 +#endif
13340
13341 call_on_stack(__do_softirq, isp);
13342 +
13343 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13344 + __set_fs(current_thread_info()->addr_limit);
13345 +#endif
13346 +
13347 /*
13348 * Shouldn't happen, we returned above if in_interrupt():
13349 */
13350 diff -urNp linux-3.0.3/arch/x86/kernel/irq.c linux-3.0.3/arch/x86/kernel/irq.c
13351 --- linux-3.0.3/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13352 +++ linux-3.0.3/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13353 @@ -17,7 +17,7 @@
13354 #include <asm/mce.h>
13355 #include <asm/hw_irq.h>
13356
13357 -atomic_t irq_err_count;
13358 +atomic_unchecked_t irq_err_count;
13359
13360 /* Function pointer for generic interrupt vector handling */
13361 void (*x86_platform_ipi_callback)(void) = NULL;
13362 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13363 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13364 seq_printf(p, " Machine check polls\n");
13365 #endif
13366 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13367 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13368 #if defined(CONFIG_X86_IO_APIC)
13369 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13370 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13371 #endif
13372 return 0;
13373 }
13374 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13375
13376 u64 arch_irq_stat(void)
13377 {
13378 - u64 sum = atomic_read(&irq_err_count);
13379 + u64 sum = atomic_read_unchecked(&irq_err_count);
13380
13381 #ifdef CONFIG_X86_IO_APIC
13382 - sum += atomic_read(&irq_mis_count);
13383 + sum += atomic_read_unchecked(&irq_mis_count);
13384 #endif
13385 return sum;
13386 }
13387 diff -urNp linux-3.0.3/arch/x86/kernel/kgdb.c linux-3.0.3/arch/x86/kernel/kgdb.c
13388 --- linux-3.0.3/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13389 +++ linux-3.0.3/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13390 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13391 #ifdef CONFIG_X86_32
13392 switch (regno) {
13393 case GDB_SS:
13394 - if (!user_mode_vm(regs))
13395 + if (!user_mode(regs))
13396 *(unsigned long *)mem = __KERNEL_DS;
13397 break;
13398 case GDB_SP:
13399 - if (!user_mode_vm(regs))
13400 + if (!user_mode(regs))
13401 *(unsigned long *)mem = kernel_stack_pointer(regs);
13402 break;
13403 case GDB_GS:
13404 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13405 case 'k':
13406 /* clear the trace bit */
13407 linux_regs->flags &= ~X86_EFLAGS_TF;
13408 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13409 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13410
13411 /* set the trace bit if we're stepping */
13412 if (remcomInBuffer[0] == 's') {
13413 linux_regs->flags |= X86_EFLAGS_TF;
13414 - atomic_set(&kgdb_cpu_doing_single_step,
13415 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13416 raw_smp_processor_id());
13417 }
13418
13419 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13420 return NOTIFY_DONE;
13421
13422 case DIE_DEBUG:
13423 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13424 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13425 if (user_mode(regs))
13426 return single_step_cont(regs, args);
13427 break;
13428 diff -urNp linux-3.0.3/arch/x86/kernel/kprobes.c linux-3.0.3/arch/x86/kernel/kprobes.c
13429 --- linux-3.0.3/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13430 +++ linux-3.0.3/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13431 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13432 } __attribute__((packed)) *insn;
13433
13434 insn = (struct __arch_relative_insn *)from;
13435 +
13436 + pax_open_kernel();
13437 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13438 insn->op = op;
13439 + pax_close_kernel();
13440 }
13441
13442 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13443 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13444 kprobe_opcode_t opcode;
13445 kprobe_opcode_t *orig_opcodes = opcodes;
13446
13447 - if (search_exception_tables((unsigned long)opcodes))
13448 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13449 return 0; /* Page fault may occur on this address. */
13450
13451 retry:
13452 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13453 }
13454 }
13455 insn_get_length(&insn);
13456 + pax_open_kernel();
13457 memcpy(dest, insn.kaddr, insn.length);
13458 + pax_close_kernel();
13459
13460 #ifdef CONFIG_X86_64
13461 if (insn_rip_relative(&insn)) {
13462 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13463 (u8 *) dest;
13464 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13465 disp = (u8 *) dest + insn_offset_displacement(&insn);
13466 + pax_open_kernel();
13467 *(s32 *) disp = (s32) newdisp;
13468 + pax_close_kernel();
13469 }
13470 #endif
13471 return insn.length;
13472 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13473 */
13474 __copy_instruction(p->ainsn.insn, p->addr, 0);
13475
13476 - if (can_boost(p->addr))
13477 + if (can_boost(ktla_ktva(p->addr)))
13478 p->ainsn.boostable = 0;
13479 else
13480 p->ainsn.boostable = -1;
13481
13482 - p->opcode = *p->addr;
13483 + p->opcode = *(ktla_ktva(p->addr));
13484 }
13485
13486 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13487 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13488 * nor set current_kprobe, because it doesn't use single
13489 * stepping.
13490 */
13491 - regs->ip = (unsigned long)p->ainsn.insn;
13492 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13493 preempt_enable_no_resched();
13494 return;
13495 }
13496 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13497 if (p->opcode == BREAKPOINT_INSTRUCTION)
13498 regs->ip = (unsigned long)p->addr;
13499 else
13500 - regs->ip = (unsigned long)p->ainsn.insn;
13501 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13502 }
13503
13504 /*
13505 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13506 setup_singlestep(p, regs, kcb, 0);
13507 return 1;
13508 }
13509 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13510 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13511 /*
13512 * The breakpoint instruction was removed right
13513 * after we hit it. Another cpu has removed
13514 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13515 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13516 {
13517 unsigned long *tos = stack_addr(regs);
13518 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13519 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13520 unsigned long orig_ip = (unsigned long)p->addr;
13521 kprobe_opcode_t *insn = p->ainsn.insn;
13522
13523 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13524 struct die_args *args = data;
13525 int ret = NOTIFY_DONE;
13526
13527 - if (args->regs && user_mode_vm(args->regs))
13528 + if (args->regs && user_mode(args->regs))
13529 return ret;
13530
13531 switch (val) {
13532 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13533 * Verify if the address gap is in 2GB range, because this uses
13534 * a relative jump.
13535 */
13536 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13537 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13538 if (abs(rel) > 0x7fffffff)
13539 return -ERANGE;
13540
13541 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13542 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13543
13544 /* Set probe function call */
13545 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13546 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13547
13548 /* Set returning jmp instruction at the tail of out-of-line buffer */
13549 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13550 - (u8 *)op->kp.addr + op->optinsn.size);
13551 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13552
13553 flush_icache_range((unsigned long) buf,
13554 (unsigned long) buf + TMPL_END_IDX +
13555 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13556 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13557
13558 /* Backup instructions which will be replaced by jump address */
13559 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13560 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13561 RELATIVE_ADDR_SIZE);
13562
13563 insn_buf[0] = RELATIVEJUMP_OPCODE;
13564 diff -urNp linux-3.0.3/arch/x86/kernel/kvm.c linux-3.0.3/arch/x86/kernel/kvm.c
13565 --- linux-3.0.3/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13566 +++ linux-3.0.3/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13567 @@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13568 pv_mmu_ops.set_pud = kvm_set_pud;
13569 #if PAGETABLE_LEVELS == 4
13570 pv_mmu_ops.set_pgd = kvm_set_pgd;
13571 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13572 #endif
13573 #endif
13574 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13575 diff -urNp linux-3.0.3/arch/x86/kernel/ldt.c linux-3.0.3/arch/x86/kernel/ldt.c
13576 --- linux-3.0.3/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13577 +++ linux-3.0.3/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13578 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13579 if (reload) {
13580 #ifdef CONFIG_SMP
13581 preempt_disable();
13582 - load_LDT(pc);
13583 + load_LDT_nolock(pc);
13584 if (!cpumask_equal(mm_cpumask(current->mm),
13585 cpumask_of(smp_processor_id())))
13586 smp_call_function(flush_ldt, current->mm, 1);
13587 preempt_enable();
13588 #else
13589 - load_LDT(pc);
13590 + load_LDT_nolock(pc);
13591 #endif
13592 }
13593 if (oldsize) {
13594 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13595 return err;
13596
13597 for (i = 0; i < old->size; i++)
13598 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13599 + write_ldt_entry(new->ldt, i, old->ldt + i);
13600 return 0;
13601 }
13602
13603 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13604 retval = copy_ldt(&mm->context, &old_mm->context);
13605 mutex_unlock(&old_mm->context.lock);
13606 }
13607 +
13608 + if (tsk == current) {
13609 + mm->context.vdso = 0;
13610 +
13611 +#ifdef CONFIG_X86_32
13612 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13613 + mm->context.user_cs_base = 0UL;
13614 + mm->context.user_cs_limit = ~0UL;
13615 +
13616 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13617 + cpus_clear(mm->context.cpu_user_cs_mask);
13618 +#endif
13619 +
13620 +#endif
13621 +#endif
13622 +
13623 + }
13624 +
13625 return retval;
13626 }
13627
13628 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13629 }
13630 }
13631
13632 +#ifdef CONFIG_PAX_SEGMEXEC
13633 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13634 + error = -EINVAL;
13635 + goto out_unlock;
13636 + }
13637 +#endif
13638 +
13639 fill_ldt(&ldt, &ldt_info);
13640 if (oldmode)
13641 ldt.avl = 0;
13642 diff -urNp linux-3.0.3/arch/x86/kernel/machine_kexec_32.c linux-3.0.3/arch/x86/kernel/machine_kexec_32.c
13643 --- linux-3.0.3/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13644 +++ linux-3.0.3/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13645 @@ -27,7 +27,7 @@
13646 #include <asm/cacheflush.h>
13647 #include <asm/debugreg.h>
13648
13649 -static void set_idt(void *newidt, __u16 limit)
13650 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13651 {
13652 struct desc_ptr curidt;
13653
13654 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13655 }
13656
13657
13658 -static void set_gdt(void *newgdt, __u16 limit)
13659 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13660 {
13661 struct desc_ptr curgdt;
13662
13663 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13664 }
13665
13666 control_page = page_address(image->control_code_page);
13667 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13668 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13669
13670 relocate_kernel_ptr = control_page;
13671 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13672 diff -urNp linux-3.0.3/arch/x86/kernel/microcode_intel.c linux-3.0.3/arch/x86/kernel/microcode_intel.c
13673 --- linux-3.0.3/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13674 +++ linux-3.0.3/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13675 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13676
13677 static int get_ucode_user(void *to, const void *from, size_t n)
13678 {
13679 - return copy_from_user(to, from, n);
13680 + return copy_from_user(to, (__force const void __user *)from, n);
13681 }
13682
13683 static enum ucode_state
13684 request_microcode_user(int cpu, const void __user *buf, size_t size)
13685 {
13686 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13687 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13688 }
13689
13690 static void microcode_fini_cpu(int cpu)
13691 diff -urNp linux-3.0.3/arch/x86/kernel/module.c linux-3.0.3/arch/x86/kernel/module.c
13692 --- linux-3.0.3/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13693 +++ linux-3.0.3/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13694 @@ -36,21 +36,66 @@
13695 #define DEBUGP(fmt...)
13696 #endif
13697
13698 -void *module_alloc(unsigned long size)
13699 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13700 {
13701 if (PAGE_ALIGN(size) > MODULES_LEN)
13702 return NULL;
13703 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13704 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13705 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13706 -1, __builtin_return_address(0));
13707 }
13708
13709 +void *module_alloc(unsigned long size)
13710 +{
13711 +
13712 +#ifdef CONFIG_PAX_KERNEXEC
13713 + return __module_alloc(size, PAGE_KERNEL);
13714 +#else
13715 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13716 +#endif
13717 +
13718 +}
13719 +
13720 /* Free memory returned from module_alloc */
13721 void module_free(struct module *mod, void *module_region)
13722 {
13723 vfree(module_region);
13724 }
13725
13726 +#ifdef CONFIG_PAX_KERNEXEC
13727 +#ifdef CONFIG_X86_32
13728 +void *module_alloc_exec(unsigned long size)
13729 +{
13730 + struct vm_struct *area;
13731 +
13732 + if (size == 0)
13733 + return NULL;
13734 +
13735 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13736 + return area ? area->addr : NULL;
13737 +}
13738 +EXPORT_SYMBOL(module_alloc_exec);
13739 +
13740 +void module_free_exec(struct module *mod, void *module_region)
13741 +{
13742 + vunmap(module_region);
13743 +}
13744 +EXPORT_SYMBOL(module_free_exec);
13745 +#else
13746 +void module_free_exec(struct module *mod, void *module_region)
13747 +{
13748 + module_free(mod, module_region);
13749 +}
13750 +EXPORT_SYMBOL(module_free_exec);
13751 +
13752 +void *module_alloc_exec(unsigned long size)
13753 +{
13754 + return __module_alloc(size, PAGE_KERNEL_RX);
13755 +}
13756 +EXPORT_SYMBOL(module_alloc_exec);
13757 +#endif
13758 +#endif
13759 +
13760 /* We don't need anything special. */
13761 int module_frob_arch_sections(Elf_Ehdr *hdr,
13762 Elf_Shdr *sechdrs,
13763 @@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13764 unsigned int i;
13765 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13766 Elf32_Sym *sym;
13767 - uint32_t *location;
13768 + uint32_t *plocation, location;
13769
13770 DEBUGP("Applying relocate section %u to %u\n", relsec,
13771 sechdrs[relsec].sh_info);
13772 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13773 /* This is where to make the change */
13774 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13775 - + rel[i].r_offset;
13776 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13777 + location = (uint32_t)plocation;
13778 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13779 + plocation = ktla_ktva((void *)plocation);
13780 /* This is the symbol it is referring to. Note that all
13781 undefined symbols have been resolved. */
13782 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13783 @@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13784 switch (ELF32_R_TYPE(rel[i].r_info)) {
13785 case R_386_32:
13786 /* We add the value into the location given */
13787 - *location += sym->st_value;
13788 + pax_open_kernel();
13789 + *plocation += sym->st_value;
13790 + pax_close_kernel();
13791 break;
13792 case R_386_PC32:
13793 /* Add the value, subtract its postition */
13794 - *location += sym->st_value - (uint32_t)location;
13795 + pax_open_kernel();
13796 + *plocation += sym->st_value - location;
13797 + pax_close_kernel();
13798 break;
13799 default:
13800 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13801 @@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13802 case R_X86_64_NONE:
13803 break;
13804 case R_X86_64_64:
13805 + pax_open_kernel();
13806 *(u64 *)loc = val;
13807 + pax_close_kernel();
13808 break;
13809 case R_X86_64_32:
13810 + pax_open_kernel();
13811 *(u32 *)loc = val;
13812 + pax_close_kernel();
13813 if (val != *(u32 *)loc)
13814 goto overflow;
13815 break;
13816 case R_X86_64_32S:
13817 + pax_open_kernel();
13818 *(s32 *)loc = val;
13819 + pax_close_kernel();
13820 if ((s64)val != *(s32 *)loc)
13821 goto overflow;
13822 break;
13823 case R_X86_64_PC32:
13824 val -= (u64)loc;
13825 + pax_open_kernel();
13826 *(u32 *)loc = val;
13827 + pax_close_kernel();
13828 +
13829 #if 0
13830 if ((s64)val != *(s32 *)loc)
13831 goto overflow;
13832 diff -urNp linux-3.0.3/arch/x86/kernel/paravirt.c linux-3.0.3/arch/x86/kernel/paravirt.c
13833 --- linux-3.0.3/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13834 +++ linux-3.0.3/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13835 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13836 {
13837 return x;
13838 }
13839 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13840 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13841 +#endif
13842
13843 void __init default_banner(void)
13844 {
13845 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13846 * corresponding structure. */
13847 static void *get_call_destination(u8 type)
13848 {
13849 - struct paravirt_patch_template tmpl = {
13850 + const struct paravirt_patch_template tmpl = {
13851 .pv_init_ops = pv_init_ops,
13852 .pv_time_ops = pv_time_ops,
13853 .pv_cpu_ops = pv_cpu_ops,
13854 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13855 .pv_lock_ops = pv_lock_ops,
13856 #endif
13857 };
13858 +
13859 + pax_track_stack();
13860 +
13861 return *((void **)&tmpl + type);
13862 }
13863
13864 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13865 if (opfunc == NULL)
13866 /* If there's no function, patch it with a ud2a (BUG) */
13867 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13868 - else if (opfunc == _paravirt_nop)
13869 + else if (opfunc == (void *)_paravirt_nop)
13870 /* If the operation is a nop, then nop the callsite */
13871 ret = paravirt_patch_nop();
13872
13873 /* identity functions just return their single argument */
13874 - else if (opfunc == _paravirt_ident_32)
13875 + else if (opfunc == (void *)_paravirt_ident_32)
13876 ret = paravirt_patch_ident_32(insnbuf, len);
13877 - else if (opfunc == _paravirt_ident_64)
13878 + else if (opfunc == (void *)_paravirt_ident_64)
13879 ret = paravirt_patch_ident_64(insnbuf, len);
13880 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13881 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13882 + ret = paravirt_patch_ident_64(insnbuf, len);
13883 +#endif
13884
13885 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13886 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13887 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13888 if (insn_len > len || start == NULL)
13889 insn_len = len;
13890 else
13891 - memcpy(insnbuf, start, insn_len);
13892 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13893
13894 return insn_len;
13895 }
13896 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13897 preempt_enable();
13898 }
13899
13900 -struct pv_info pv_info = {
13901 +struct pv_info pv_info __read_only = {
13902 .name = "bare hardware",
13903 .paravirt_enabled = 0,
13904 .kernel_rpl = 0,
13905 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13906 };
13907
13908 -struct pv_init_ops pv_init_ops = {
13909 +struct pv_init_ops pv_init_ops __read_only = {
13910 .patch = native_patch,
13911 };
13912
13913 -struct pv_time_ops pv_time_ops = {
13914 +struct pv_time_ops pv_time_ops __read_only = {
13915 .sched_clock = native_sched_clock,
13916 };
13917
13918 -struct pv_irq_ops pv_irq_ops = {
13919 +struct pv_irq_ops pv_irq_ops __read_only = {
13920 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13921 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13922 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13923 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13924 #endif
13925 };
13926
13927 -struct pv_cpu_ops pv_cpu_ops = {
13928 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13929 .cpuid = native_cpuid,
13930 .get_debugreg = native_get_debugreg,
13931 .set_debugreg = native_set_debugreg,
13932 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13933 .end_context_switch = paravirt_nop,
13934 };
13935
13936 -struct pv_apic_ops pv_apic_ops = {
13937 +struct pv_apic_ops pv_apic_ops __read_only = {
13938 #ifdef CONFIG_X86_LOCAL_APIC
13939 .startup_ipi_hook = paravirt_nop,
13940 #endif
13941 };
13942
13943 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13944 +#ifdef CONFIG_X86_32
13945 +#ifdef CONFIG_X86_PAE
13946 +/* 64-bit pagetable entries */
13947 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13948 +#else
13949 /* 32-bit pagetable entries */
13950 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13951 +#endif
13952 #else
13953 /* 64-bit pagetable entries */
13954 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13955 #endif
13956
13957 -struct pv_mmu_ops pv_mmu_ops = {
13958 +struct pv_mmu_ops pv_mmu_ops __read_only = {
13959
13960 .read_cr2 = native_read_cr2,
13961 .write_cr2 = native_write_cr2,
13962 @@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
13963 .make_pud = PTE_IDENT,
13964
13965 .set_pgd = native_set_pgd,
13966 + .set_pgd_batched = native_set_pgd_batched,
13967 #endif
13968 #endif /* PAGETABLE_LEVELS >= 3 */
13969
13970 @@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13971 },
13972
13973 .set_fixmap = native_set_fixmap,
13974 +
13975 +#ifdef CONFIG_PAX_KERNEXEC
13976 + .pax_open_kernel = native_pax_open_kernel,
13977 + .pax_close_kernel = native_pax_close_kernel,
13978 +#endif
13979 +
13980 };
13981
13982 EXPORT_SYMBOL_GPL(pv_time_ops);
13983 diff -urNp linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c
13984 --- linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
13985 +++ linux-3.0.3/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
13986 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13987 arch_spin_lock(lock);
13988 }
13989
13990 -struct pv_lock_ops pv_lock_ops = {
13991 +struct pv_lock_ops pv_lock_ops __read_only = {
13992 #ifdef CONFIG_SMP
13993 .spin_is_locked = __ticket_spin_is_locked,
13994 .spin_is_contended = __ticket_spin_is_contended,
13995 diff -urNp linux-3.0.3/arch/x86/kernel/pci-iommu_table.c linux-3.0.3/arch/x86/kernel/pci-iommu_table.c
13996 --- linux-3.0.3/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
13997 +++ linux-3.0.3/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
13998 @@ -2,7 +2,7 @@
13999 #include <asm/iommu_table.h>
14000 #include <linux/string.h>
14001 #include <linux/kallsyms.h>
14002 -
14003 +#include <linux/sched.h>
14004
14005 #define DEBUG 1
14006
14007 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14008 {
14009 struct iommu_table_entry *p, *q, *x;
14010
14011 + pax_track_stack();
14012 +
14013 /* Simple cyclic dependency checker. */
14014 for (p = start; p < finish; p++) {
14015 q = find_dependents_of(start, finish, p);
14016 diff -urNp linux-3.0.3/arch/x86/kernel/process_32.c linux-3.0.3/arch/x86/kernel/process_32.c
14017 --- linux-3.0.3/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14018 +++ linux-3.0.3/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14019 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14020 unsigned long thread_saved_pc(struct task_struct *tsk)
14021 {
14022 return ((unsigned long *)tsk->thread.sp)[3];
14023 +//XXX return tsk->thread.eip;
14024 }
14025
14026 #ifndef CONFIG_SMP
14027 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14028 unsigned long sp;
14029 unsigned short ss, gs;
14030
14031 - if (user_mode_vm(regs)) {
14032 + if (user_mode(regs)) {
14033 sp = regs->sp;
14034 ss = regs->ss & 0xffff;
14035 - gs = get_user_gs(regs);
14036 } else {
14037 sp = kernel_stack_pointer(regs);
14038 savesegment(ss, ss);
14039 - savesegment(gs, gs);
14040 }
14041 + gs = get_user_gs(regs);
14042
14043 show_regs_common();
14044
14045 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14046 struct task_struct *tsk;
14047 int err;
14048
14049 - childregs = task_pt_regs(p);
14050 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14051 *childregs = *regs;
14052 childregs->ax = 0;
14053 childregs->sp = sp;
14054
14055 p->thread.sp = (unsigned long) childregs;
14056 p->thread.sp0 = (unsigned long) (childregs+1);
14057 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14058
14059 p->thread.ip = (unsigned long) ret_from_fork;
14060
14061 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14062 struct thread_struct *prev = &prev_p->thread,
14063 *next = &next_p->thread;
14064 int cpu = smp_processor_id();
14065 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14066 + struct tss_struct *tss = init_tss + cpu;
14067 bool preload_fpu;
14068
14069 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14070 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14071 */
14072 lazy_save_gs(prev->gs);
14073
14074 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14075 + __set_fs(task_thread_info(next_p)->addr_limit);
14076 +#endif
14077 +
14078 /*
14079 * Load the per-thread Thread-Local Storage descriptor.
14080 */
14081 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14082 */
14083 arch_end_context_switch(next_p);
14084
14085 + percpu_write(current_task, next_p);
14086 + percpu_write(current_tinfo, &next_p->tinfo);
14087 +
14088 if (preload_fpu)
14089 __math_state_restore();
14090
14091 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14092 if (prev->gs | next->gs)
14093 lazy_load_gs(next->gs);
14094
14095 - percpu_write(current_task, next_p);
14096 -
14097 return prev_p;
14098 }
14099
14100 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14101 } while (count++ < 16);
14102 return 0;
14103 }
14104 -
14105 diff -urNp linux-3.0.3/arch/x86/kernel/process_64.c linux-3.0.3/arch/x86/kernel/process_64.c
14106 --- linux-3.0.3/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14107 +++ linux-3.0.3/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14108 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14109 void exit_idle(void)
14110 {
14111 /* idle loop has pid 0 */
14112 - if (current->pid)
14113 + if (task_pid_nr(current))
14114 return;
14115 __exit_idle();
14116 }
14117 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14118 struct pt_regs *childregs;
14119 struct task_struct *me = current;
14120
14121 - childregs = ((struct pt_regs *)
14122 - (THREAD_SIZE + task_stack_page(p))) - 1;
14123 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14124 *childregs = *regs;
14125
14126 childregs->ax = 0;
14127 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14128 p->thread.sp = (unsigned long) childregs;
14129 p->thread.sp0 = (unsigned long) (childregs+1);
14130 p->thread.usersp = me->thread.usersp;
14131 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14132
14133 set_tsk_thread_flag(p, TIF_FORK);
14134
14135 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14136 struct thread_struct *prev = &prev_p->thread;
14137 struct thread_struct *next = &next_p->thread;
14138 int cpu = smp_processor_id();
14139 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14140 + struct tss_struct *tss = init_tss + cpu;
14141 unsigned fsindex, gsindex;
14142 bool preload_fpu;
14143
14144 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14145 prev->usersp = percpu_read(old_rsp);
14146 percpu_write(old_rsp, next->usersp);
14147 percpu_write(current_task, next_p);
14148 + percpu_write(current_tinfo, &next_p->tinfo);
14149
14150 - percpu_write(kernel_stack,
14151 - (unsigned long)task_stack_page(next_p) +
14152 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14153 + percpu_write(kernel_stack, next->sp0);
14154
14155 /*
14156 * Now maybe reload the debug registers and handle I/O bitmaps
14157 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14158 if (!p || p == current || p->state == TASK_RUNNING)
14159 return 0;
14160 stack = (unsigned long)task_stack_page(p);
14161 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14162 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14163 return 0;
14164 fp = *(u64 *)(p->thread.sp);
14165 do {
14166 - if (fp < (unsigned long)stack ||
14167 - fp >= (unsigned long)stack+THREAD_SIZE)
14168 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14169 return 0;
14170 ip = *(u64 *)(fp+8);
14171 if (!in_sched_functions(ip))
14172 diff -urNp linux-3.0.3/arch/x86/kernel/process.c linux-3.0.3/arch/x86/kernel/process.c
14173 --- linux-3.0.3/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14174 +++ linux-3.0.3/arch/x86/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
14175 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14176
14177 void free_thread_info(struct thread_info *ti)
14178 {
14179 - free_thread_xstate(ti->task);
14180 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14181 }
14182
14183 +static struct kmem_cache *task_struct_cachep;
14184 +
14185 void arch_task_cache_init(void)
14186 {
14187 - task_xstate_cachep =
14188 - kmem_cache_create("task_xstate", xstate_size,
14189 + /* create a slab on which task_structs can be allocated */
14190 + task_struct_cachep =
14191 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14192 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14193 +
14194 + task_xstate_cachep =
14195 + kmem_cache_create("task_xstate", xstate_size,
14196 __alignof__(union thread_xstate),
14197 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14198 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14199 +}
14200 +
14201 +struct task_struct *alloc_task_struct_node(int node)
14202 +{
14203 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14204 +}
14205 +
14206 +void free_task_struct(struct task_struct *task)
14207 +{
14208 + free_thread_xstate(task);
14209 + kmem_cache_free(task_struct_cachep, task);
14210 }
14211
14212 /*
14213 @@ -70,7 +87,7 @@ void exit_thread(void)
14214 unsigned long *bp = t->io_bitmap_ptr;
14215
14216 if (bp) {
14217 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14218 + struct tss_struct *tss = init_tss + get_cpu();
14219
14220 t->io_bitmap_ptr = NULL;
14221 clear_thread_flag(TIF_IO_BITMAP);
14222 @@ -106,7 +123,7 @@ void show_regs_common(void)
14223
14224 printk(KERN_CONT "\n");
14225 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14226 - current->pid, current->comm, print_tainted(),
14227 + task_pid_nr(current), current->comm, print_tainted(),
14228 init_utsname()->release,
14229 (int)strcspn(init_utsname()->version, " "),
14230 init_utsname()->version);
14231 @@ -120,6 +137,9 @@ void flush_thread(void)
14232 {
14233 struct task_struct *tsk = current;
14234
14235 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14236 + loadsegment(gs, 0);
14237 +#endif
14238 flush_ptrace_hw_breakpoint(tsk);
14239 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14240 /*
14241 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14242 regs.di = (unsigned long) arg;
14243
14244 #ifdef CONFIG_X86_32
14245 - regs.ds = __USER_DS;
14246 - regs.es = __USER_DS;
14247 + regs.ds = __KERNEL_DS;
14248 + regs.es = __KERNEL_DS;
14249 regs.fs = __KERNEL_PERCPU;
14250 - regs.gs = __KERNEL_STACK_CANARY;
14251 + savesegment(gs, regs.gs);
14252 #else
14253 regs.ss = __KERNEL_DS;
14254 #endif
14255 @@ -403,7 +423,7 @@ void default_idle(void)
14256 EXPORT_SYMBOL(default_idle);
14257 #endif
14258
14259 -void stop_this_cpu(void *dummy)
14260 +__noreturn void stop_this_cpu(void *dummy)
14261 {
14262 local_irq_disable();
14263 /*
14264 @@ -668,16 +688,34 @@ static int __init idle_setup(char *str)
14265 }
14266 early_param("idle", idle_setup);
14267
14268 -unsigned long arch_align_stack(unsigned long sp)
14269 +#ifdef CONFIG_PAX_RANDKSTACK
14270 +asmlinkage void pax_randomize_kstack(void)
14271 {
14272 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14273 - sp -= get_random_int() % 8192;
14274 - return sp & ~0xf;
14275 -}
14276 + struct thread_struct *thread = &current->thread;
14277 + unsigned long time;
14278
14279 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14280 -{
14281 - unsigned long range_end = mm->brk + 0x02000000;
14282 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14283 -}
14284 + if (!randomize_va_space)
14285 + return;
14286 +
14287 + rdtscl(time);
14288 +
14289 + /* P4 seems to return a 0 LSB, ignore it */
14290 +#ifdef CONFIG_MPENTIUM4
14291 + time &= 0x3EUL;
14292 + time <<= 2;
14293 +#elif defined(CONFIG_X86_64)
14294 + time &= 0xFUL;
14295 + time <<= 4;
14296 +#else
14297 + time &= 0x1FUL;
14298 + time <<= 3;
14299 +#endif
14300 +
14301 + thread->sp0 ^= time;
14302 + load_sp0(init_tss + smp_processor_id(), thread);
14303
14304 +#ifdef CONFIG_X86_64
14305 + percpu_write(kernel_stack, thread->sp0);
14306 +#endif
14307 +}
14308 +#endif
14309 diff -urNp linux-3.0.3/arch/x86/kernel/ptrace.c linux-3.0.3/arch/x86/kernel/ptrace.c
14310 --- linux-3.0.3/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14311 +++ linux-3.0.3/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14312 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14313 unsigned long addr, unsigned long data)
14314 {
14315 int ret;
14316 - unsigned long __user *datap = (unsigned long __user *)data;
14317 + unsigned long __user *datap = (__force unsigned long __user *)data;
14318
14319 switch (request) {
14320 /* read the word at location addr in the USER area. */
14321 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14322 if ((int) addr < 0)
14323 return -EIO;
14324 ret = do_get_thread_area(child, addr,
14325 - (struct user_desc __user *)data);
14326 + (__force struct user_desc __user *) data);
14327 break;
14328
14329 case PTRACE_SET_THREAD_AREA:
14330 if ((int) addr < 0)
14331 return -EIO;
14332 ret = do_set_thread_area(child, addr,
14333 - (struct user_desc __user *)data, 0);
14334 + (__force struct user_desc __user *) data, 0);
14335 break;
14336 #endif
14337
14338 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14339 memset(info, 0, sizeof(*info));
14340 info->si_signo = SIGTRAP;
14341 info->si_code = si_code;
14342 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14343 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14344 }
14345
14346 void user_single_step_siginfo(struct task_struct *tsk,
14347 diff -urNp linux-3.0.3/arch/x86/kernel/pvclock.c linux-3.0.3/arch/x86/kernel/pvclock.c
14348 --- linux-3.0.3/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14349 +++ linux-3.0.3/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14350 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14351 return pv_tsc_khz;
14352 }
14353
14354 -static atomic64_t last_value = ATOMIC64_INIT(0);
14355 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14356
14357 void pvclock_resume(void)
14358 {
14359 - atomic64_set(&last_value, 0);
14360 + atomic64_set_unchecked(&last_value, 0);
14361 }
14362
14363 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14364 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14365 * updating at the same time, and one of them could be slightly behind,
14366 * making the assumption that last_value always go forward fail to hold.
14367 */
14368 - last = atomic64_read(&last_value);
14369 + last = atomic64_read_unchecked(&last_value);
14370 do {
14371 if (ret < last)
14372 return last;
14373 - last = atomic64_cmpxchg(&last_value, last, ret);
14374 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14375 } while (unlikely(last != ret));
14376
14377 return ret;
14378 diff -urNp linux-3.0.3/arch/x86/kernel/reboot.c linux-3.0.3/arch/x86/kernel/reboot.c
14379 --- linux-3.0.3/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14380 +++ linux-3.0.3/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14381 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14382 EXPORT_SYMBOL(pm_power_off);
14383
14384 static const struct desc_ptr no_idt = {};
14385 -static int reboot_mode;
14386 +static unsigned short reboot_mode;
14387 enum reboot_type reboot_type = BOOT_ACPI;
14388 int reboot_force;
14389
14390 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
14391 extern const unsigned char machine_real_restart_asm[];
14392 extern const u64 machine_real_restart_gdt[3];
14393
14394 -void machine_real_restart(unsigned int type)
14395 +__noreturn void machine_real_restart(unsigned int type)
14396 {
14397 void *restart_va;
14398 unsigned long restart_pa;
14399 - void (*restart_lowmem)(unsigned int);
14400 + void (* __noreturn restart_lowmem)(unsigned int);
14401 u64 *lowmem_gdt;
14402
14403 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14404 + struct desc_struct *gdt;
14405 +#endif
14406 +
14407 local_irq_disable();
14408
14409 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14410 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14411 boot)". This seems like a fairly standard thing that gets set by
14412 REBOOT.COM programs, and the previous reset routine did this
14413 too. */
14414 - *((unsigned short *)0x472) = reboot_mode;
14415 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14416
14417 /* Patch the GDT in the low memory trampoline */
14418 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14419
14420 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14421 restart_pa = virt_to_phys(restart_va);
14422 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14423 + restart_lowmem = (void *)restart_pa;
14424
14425 /* GDT[0]: GDT self-pointer */
14426 lowmem_gdt[0] =
14427 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14428 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14429
14430 /* Jump to the identity-mapped low memory code */
14431 +
14432 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14433 + gdt = get_cpu_gdt_table(smp_processor_id());
14434 + pax_open_kernel();
14435 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14436 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14437 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14438 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14439 +#endif
14440 +#ifdef CONFIG_PAX_KERNEXEC
14441 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14442 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14443 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14444 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14445 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14446 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14447 +#endif
14448 + pax_close_kernel();
14449 +#endif
14450 +
14451 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14452 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14453 + unreachable();
14454 +#else
14455 restart_lowmem(type);
14456 +#endif
14457 +
14458 }
14459 #ifdef CONFIG_APM_MODULE
14460 EXPORT_SYMBOL(machine_real_restart);
14461 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14462 * try to force a triple fault and then cycle between hitting the keyboard
14463 * controller and doing that
14464 */
14465 -static void native_machine_emergency_restart(void)
14466 +__noreturn static void native_machine_emergency_restart(void)
14467 {
14468 int i;
14469 int attempt = 0;
14470 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14471 #endif
14472 }
14473
14474 -static void __machine_emergency_restart(int emergency)
14475 +static __noreturn void __machine_emergency_restart(int emergency)
14476 {
14477 reboot_emergency = emergency;
14478 machine_ops.emergency_restart();
14479 }
14480
14481 -static void native_machine_restart(char *__unused)
14482 +static __noreturn void native_machine_restart(char *__unused)
14483 {
14484 printk("machine restart\n");
14485
14486 @@ -662,7 +692,7 @@ static void native_machine_restart(char
14487 __machine_emergency_restart(0);
14488 }
14489
14490 -static void native_machine_halt(void)
14491 +static __noreturn void native_machine_halt(void)
14492 {
14493 /* stop other cpus and apics */
14494 machine_shutdown();
14495 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
14496 stop_this_cpu(NULL);
14497 }
14498
14499 -static void native_machine_power_off(void)
14500 +__noreturn static void native_machine_power_off(void)
14501 {
14502 if (pm_power_off) {
14503 if (!reboot_force)
14504 @@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14505 }
14506 /* a fallback in case there is no PM info available */
14507 tboot_shutdown(TB_SHUTDOWN_HALT);
14508 + unreachable();
14509 }
14510
14511 struct machine_ops machine_ops = {
14512 diff -urNp linux-3.0.3/arch/x86/kernel/setup.c linux-3.0.3/arch/x86/kernel/setup.c
14513 --- linux-3.0.3/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14514 +++ linux-3.0.3/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14515 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14516 * area (640->1Mb) as ram even though it is not.
14517 * take them out.
14518 */
14519 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14520 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14521 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14522 }
14523
14524 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14525
14526 if (!boot_params.hdr.root_flags)
14527 root_mountflags &= ~MS_RDONLY;
14528 - init_mm.start_code = (unsigned long) _text;
14529 - init_mm.end_code = (unsigned long) _etext;
14530 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14531 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14532 init_mm.end_data = (unsigned long) _edata;
14533 init_mm.brk = _brk_end;
14534
14535 - code_resource.start = virt_to_phys(_text);
14536 - code_resource.end = virt_to_phys(_etext)-1;
14537 - data_resource.start = virt_to_phys(_etext);
14538 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14539 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14540 + data_resource.start = virt_to_phys(_sdata);
14541 data_resource.end = virt_to_phys(_edata)-1;
14542 bss_resource.start = virt_to_phys(&__bss_start);
14543 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14544 diff -urNp linux-3.0.3/arch/x86/kernel/setup_percpu.c linux-3.0.3/arch/x86/kernel/setup_percpu.c
14545 --- linux-3.0.3/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14546 +++ linux-3.0.3/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14547 @@ -21,19 +21,17 @@
14548 #include <asm/cpu.h>
14549 #include <asm/stackprotector.h>
14550
14551 -DEFINE_PER_CPU(int, cpu_number);
14552 +#ifdef CONFIG_SMP
14553 +DEFINE_PER_CPU(unsigned int, cpu_number);
14554 EXPORT_PER_CPU_SYMBOL(cpu_number);
14555 +#endif
14556
14557 -#ifdef CONFIG_X86_64
14558 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14559 -#else
14560 -#define BOOT_PERCPU_OFFSET 0
14561 -#endif
14562
14563 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14564 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14565
14566 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14567 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14568 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14569 };
14570 EXPORT_SYMBOL(__per_cpu_offset);
14571 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14572 {
14573 #ifdef CONFIG_X86_32
14574 struct desc_struct gdt;
14575 + unsigned long base = per_cpu_offset(cpu);
14576
14577 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14578 - 0x2 | DESCTYPE_S, 0x8);
14579 - gdt.s = 1;
14580 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14581 + 0x83 | DESCTYPE_S, 0xC);
14582 write_gdt_entry(get_cpu_gdt_table(cpu),
14583 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14584 #endif
14585 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14586 /* alrighty, percpu areas up and running */
14587 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14588 for_each_possible_cpu(cpu) {
14589 +#ifdef CONFIG_CC_STACKPROTECTOR
14590 +#ifdef CONFIG_X86_32
14591 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14592 +#endif
14593 +#endif
14594 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14595 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14596 per_cpu(cpu_number, cpu) = cpu;
14597 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14598 */
14599 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14600 #endif
14601 +#ifdef CONFIG_CC_STACKPROTECTOR
14602 +#ifdef CONFIG_X86_32
14603 + if (!cpu)
14604 + per_cpu(stack_canary.canary, cpu) = canary;
14605 +#endif
14606 +#endif
14607 /*
14608 * Up to this point, the boot CPU has been using .init.data
14609 * area. Reload any changed state for the boot CPU.
14610 diff -urNp linux-3.0.3/arch/x86/kernel/signal.c linux-3.0.3/arch/x86/kernel/signal.c
14611 --- linux-3.0.3/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14612 +++ linux-3.0.3/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14613 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14614 * Align the stack pointer according to the i386 ABI,
14615 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14616 */
14617 - sp = ((sp + 4) & -16ul) - 4;
14618 + sp = ((sp - 12) & -16ul) - 4;
14619 #else /* !CONFIG_X86_32 */
14620 sp = round_down(sp, 16) - 8;
14621 #endif
14622 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14623 * Return an always-bogus address instead so we will die with SIGSEGV.
14624 */
14625 if (onsigstack && !likely(on_sig_stack(sp)))
14626 - return (void __user *)-1L;
14627 + return (__force void __user *)-1L;
14628
14629 /* save i387 state */
14630 if (used_math() && save_i387_xstate(*fpstate) < 0)
14631 - return (void __user *)-1L;
14632 + return (__force void __user *)-1L;
14633
14634 return (void __user *)sp;
14635 }
14636 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14637 }
14638
14639 if (current->mm->context.vdso)
14640 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14641 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14642 else
14643 - restorer = &frame->retcode;
14644 + restorer = (void __user *)&frame->retcode;
14645 if (ka->sa.sa_flags & SA_RESTORER)
14646 restorer = ka->sa.sa_restorer;
14647
14648 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14649 * reasons and because gdb uses it as a signature to notice
14650 * signal handler stack frames.
14651 */
14652 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14653 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14654
14655 if (err)
14656 return -EFAULT;
14657 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14658 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14659
14660 /* Set up to return from userspace. */
14661 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14662 + if (current->mm->context.vdso)
14663 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14664 + else
14665 + restorer = (void __user *)&frame->retcode;
14666 if (ka->sa.sa_flags & SA_RESTORER)
14667 restorer = ka->sa.sa_restorer;
14668 put_user_ex(restorer, &frame->pretcode);
14669 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14670 * reasons and because gdb uses it as a signature to notice
14671 * signal handler stack frames.
14672 */
14673 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14674 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14675 } put_user_catch(err);
14676
14677 if (err)
14678 @@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14679 int signr;
14680 sigset_t *oldset;
14681
14682 + pax_track_stack();
14683 +
14684 /*
14685 * We want the common case to go fast, which is why we may in certain
14686 * cases get here from kernel mode. Just return without doing anything
14687 @@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14688 * X86_32: vm86 regs switched out by assembly code before reaching
14689 * here, so testing against kernel CS suffices.
14690 */
14691 - if (!user_mode(regs))
14692 + if (!user_mode_novm(regs))
14693 return;
14694
14695 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14696 diff -urNp linux-3.0.3/arch/x86/kernel/smpboot.c linux-3.0.3/arch/x86/kernel/smpboot.c
14697 --- linux-3.0.3/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14698 +++ linux-3.0.3/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14699 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14700 set_idle_for_cpu(cpu, c_idle.idle);
14701 do_rest:
14702 per_cpu(current_task, cpu) = c_idle.idle;
14703 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14704 #ifdef CONFIG_X86_32
14705 /* Stack for startup_32 can be just as for start_secondary onwards */
14706 irq_ctx_init(cpu);
14707 #else
14708 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14709 initial_gs = per_cpu_offset(cpu);
14710 - per_cpu(kernel_stack, cpu) =
14711 - (unsigned long)task_stack_page(c_idle.idle) -
14712 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14713 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14714 #endif
14715 +
14716 + pax_open_kernel();
14717 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14718 + pax_close_kernel();
14719 +
14720 initial_code = (unsigned long)start_secondary;
14721 stack_start = c_idle.idle->thread.sp;
14722
14723 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14724
14725 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14726
14727 +#ifdef CONFIG_PAX_PER_CPU_PGD
14728 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14729 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14730 + KERNEL_PGD_PTRS);
14731 +#endif
14732 +
14733 err = do_boot_cpu(apicid, cpu);
14734 if (err) {
14735 pr_debug("do_boot_cpu failed %d\n", err);
14736 diff -urNp linux-3.0.3/arch/x86/kernel/step.c linux-3.0.3/arch/x86/kernel/step.c
14737 --- linux-3.0.3/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14738 +++ linux-3.0.3/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14739 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14740 struct desc_struct *desc;
14741 unsigned long base;
14742
14743 - seg &= ~7UL;
14744 + seg >>= 3;
14745
14746 mutex_lock(&child->mm->context.lock);
14747 - if (unlikely((seg >> 3) >= child->mm->context.size))
14748 + if (unlikely(seg >= child->mm->context.size))
14749 addr = -1L; /* bogus selector, access would fault */
14750 else {
14751 desc = child->mm->context.ldt + seg;
14752 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14753 addr += base;
14754 }
14755 mutex_unlock(&child->mm->context.lock);
14756 - }
14757 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14758 + addr = ktla_ktva(addr);
14759
14760 return addr;
14761 }
14762 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14763 unsigned char opcode[15];
14764 unsigned long addr = convert_ip_to_linear(child, regs);
14765
14766 + if (addr == -EINVAL)
14767 + return 0;
14768 +
14769 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14770 for (i = 0; i < copied; i++) {
14771 switch (opcode[i]) {
14772 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14773
14774 #ifdef CONFIG_X86_64
14775 case 0x40 ... 0x4f:
14776 - if (regs->cs != __USER_CS)
14777 + if ((regs->cs & 0xffff) != __USER_CS)
14778 /* 32-bit mode: register increment */
14779 return 0;
14780 /* 64-bit mode: REX prefix */
14781 diff -urNp linux-3.0.3/arch/x86/kernel/syscall_table_32.S linux-3.0.3/arch/x86/kernel/syscall_table_32.S
14782 --- linux-3.0.3/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14783 +++ linux-3.0.3/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14784 @@ -1,3 +1,4 @@
14785 +.section .rodata,"a",@progbits
14786 ENTRY(sys_call_table)
14787 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14788 .long sys_exit
14789 diff -urNp linux-3.0.3/arch/x86/kernel/sys_i386_32.c linux-3.0.3/arch/x86/kernel/sys_i386_32.c
14790 --- linux-3.0.3/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14791 +++ linux-3.0.3/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14792 @@ -24,17 +24,224 @@
14793
14794 #include <asm/syscalls.h>
14795
14796 -/*
14797 - * Do a system call from kernel instead of calling sys_execve so we
14798 - * end up with proper pt_regs.
14799 - */
14800 -int kernel_execve(const char *filename,
14801 - const char *const argv[],
14802 - const char *const envp[])
14803 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14804 {
14805 - long __res;
14806 - asm volatile ("int $0x80"
14807 - : "=a" (__res)
14808 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14809 - return __res;
14810 + unsigned long pax_task_size = TASK_SIZE;
14811 +
14812 +#ifdef CONFIG_PAX_SEGMEXEC
14813 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14814 + pax_task_size = SEGMEXEC_TASK_SIZE;
14815 +#endif
14816 +
14817 + if (len > pax_task_size || addr > pax_task_size - len)
14818 + return -EINVAL;
14819 +
14820 + return 0;
14821 +}
14822 +
14823 +unsigned long
14824 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14825 + unsigned long len, unsigned long pgoff, unsigned long flags)
14826 +{
14827 + struct mm_struct *mm = current->mm;
14828 + struct vm_area_struct *vma;
14829 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14830 +
14831 +#ifdef CONFIG_PAX_SEGMEXEC
14832 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14833 + pax_task_size = SEGMEXEC_TASK_SIZE;
14834 +#endif
14835 +
14836 + pax_task_size -= PAGE_SIZE;
14837 +
14838 + if (len > pax_task_size)
14839 + return -ENOMEM;
14840 +
14841 + if (flags & MAP_FIXED)
14842 + return addr;
14843 +
14844 +#ifdef CONFIG_PAX_RANDMMAP
14845 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14846 +#endif
14847 +
14848 + if (addr) {
14849 + addr = PAGE_ALIGN(addr);
14850 + if (pax_task_size - len >= addr) {
14851 + vma = find_vma(mm, addr);
14852 + if (check_heap_stack_gap(vma, addr, len))
14853 + return addr;
14854 + }
14855 + }
14856 + if (len > mm->cached_hole_size) {
14857 + start_addr = addr = mm->free_area_cache;
14858 + } else {
14859 + start_addr = addr = mm->mmap_base;
14860 + mm->cached_hole_size = 0;
14861 + }
14862 +
14863 +#ifdef CONFIG_PAX_PAGEEXEC
14864 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14865 + start_addr = 0x00110000UL;
14866 +
14867 +#ifdef CONFIG_PAX_RANDMMAP
14868 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14869 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14870 +#endif
14871 +
14872 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14873 + start_addr = addr = mm->mmap_base;
14874 + else
14875 + addr = start_addr;
14876 + }
14877 +#endif
14878 +
14879 +full_search:
14880 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14881 + /* At this point: (!vma || addr < vma->vm_end). */
14882 + if (pax_task_size - len < addr) {
14883 + /*
14884 + * Start a new search - just in case we missed
14885 + * some holes.
14886 + */
14887 + if (start_addr != mm->mmap_base) {
14888 + start_addr = addr = mm->mmap_base;
14889 + mm->cached_hole_size = 0;
14890 + goto full_search;
14891 + }
14892 + return -ENOMEM;
14893 + }
14894 + if (check_heap_stack_gap(vma, addr, len))
14895 + break;
14896 + if (addr + mm->cached_hole_size < vma->vm_start)
14897 + mm->cached_hole_size = vma->vm_start - addr;
14898 + addr = vma->vm_end;
14899 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14900 + start_addr = addr = mm->mmap_base;
14901 + mm->cached_hole_size = 0;
14902 + goto full_search;
14903 + }
14904 + }
14905 +
14906 + /*
14907 + * Remember the place where we stopped the search:
14908 + */
14909 + mm->free_area_cache = addr + len;
14910 + return addr;
14911 +}
14912 +
14913 +unsigned long
14914 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14915 + const unsigned long len, const unsigned long pgoff,
14916 + const unsigned long flags)
14917 +{
14918 + struct vm_area_struct *vma;
14919 + struct mm_struct *mm = current->mm;
14920 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14921 +
14922 +#ifdef CONFIG_PAX_SEGMEXEC
14923 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14924 + pax_task_size = SEGMEXEC_TASK_SIZE;
14925 +#endif
14926 +
14927 + pax_task_size -= PAGE_SIZE;
14928 +
14929 + /* requested length too big for entire address space */
14930 + if (len > pax_task_size)
14931 + return -ENOMEM;
14932 +
14933 + if (flags & MAP_FIXED)
14934 + return addr;
14935 +
14936 +#ifdef CONFIG_PAX_PAGEEXEC
14937 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14938 + goto bottomup;
14939 +#endif
14940 +
14941 +#ifdef CONFIG_PAX_RANDMMAP
14942 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14943 +#endif
14944 +
14945 + /* requesting a specific address */
14946 + if (addr) {
14947 + addr = PAGE_ALIGN(addr);
14948 + if (pax_task_size - len >= addr) {
14949 + vma = find_vma(mm, addr);
14950 + if (check_heap_stack_gap(vma, addr, len))
14951 + return addr;
14952 + }
14953 + }
14954 +
14955 + /* check if free_area_cache is useful for us */
14956 + if (len <= mm->cached_hole_size) {
14957 + mm->cached_hole_size = 0;
14958 + mm->free_area_cache = mm->mmap_base;
14959 + }
14960 +
14961 + /* either no address requested or can't fit in requested address hole */
14962 + addr = mm->free_area_cache;
14963 +
14964 + /* make sure it can fit in the remaining address space */
14965 + if (addr > len) {
14966 + vma = find_vma(mm, addr-len);
14967 + if (check_heap_stack_gap(vma, addr - len, len))
14968 + /* remember the address as a hint for next time */
14969 + return (mm->free_area_cache = addr-len);
14970 + }
14971 +
14972 + if (mm->mmap_base < len)
14973 + goto bottomup;
14974 +
14975 + addr = mm->mmap_base-len;
14976 +
14977 + do {
14978 + /*
14979 + * Lookup failure means no vma is above this address,
14980 + * else if new region fits below vma->vm_start,
14981 + * return with success:
14982 + */
14983 + vma = find_vma(mm, addr);
14984 + if (check_heap_stack_gap(vma, addr, len))
14985 + /* remember the address as a hint for next time */
14986 + return (mm->free_area_cache = addr);
14987 +
14988 + /* remember the largest hole we saw so far */
14989 + if (addr + mm->cached_hole_size < vma->vm_start)
14990 + mm->cached_hole_size = vma->vm_start - addr;
14991 +
14992 + /* try just below the current vma->vm_start */
14993 + addr = skip_heap_stack_gap(vma, len);
14994 + } while (!IS_ERR_VALUE(addr));
14995 +
14996 +bottomup:
14997 + /*
14998 + * A failed mmap() very likely causes application failure,
14999 + * so fall back to the bottom-up function here. This scenario
15000 + * can happen with large stack limits and large mmap()
15001 + * allocations.
15002 + */
15003 +
15004 +#ifdef CONFIG_PAX_SEGMEXEC
15005 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15006 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15007 + else
15008 +#endif
15009 +
15010 + mm->mmap_base = TASK_UNMAPPED_BASE;
15011 +
15012 +#ifdef CONFIG_PAX_RANDMMAP
15013 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15014 + mm->mmap_base += mm->delta_mmap;
15015 +#endif
15016 +
15017 + mm->free_area_cache = mm->mmap_base;
15018 + mm->cached_hole_size = ~0UL;
15019 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15020 + /*
15021 + * Restore the topdown base:
15022 + */
15023 + mm->mmap_base = base;
15024 + mm->free_area_cache = base;
15025 + mm->cached_hole_size = ~0UL;
15026 +
15027 + return addr;
15028 }
15029 diff -urNp linux-3.0.3/arch/x86/kernel/sys_x86_64.c linux-3.0.3/arch/x86/kernel/sys_x86_64.c
15030 --- linux-3.0.3/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15031 +++ linux-3.0.3/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15032 @@ -32,8 +32,8 @@ out:
15033 return error;
15034 }
15035
15036 -static void find_start_end(unsigned long flags, unsigned long *begin,
15037 - unsigned long *end)
15038 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15039 + unsigned long *begin, unsigned long *end)
15040 {
15041 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15042 unsigned long new_begin;
15043 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15044 *begin = new_begin;
15045 }
15046 } else {
15047 - *begin = TASK_UNMAPPED_BASE;
15048 + *begin = mm->mmap_base;
15049 *end = TASK_SIZE;
15050 }
15051 }
15052 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15053 if (flags & MAP_FIXED)
15054 return addr;
15055
15056 - find_start_end(flags, &begin, &end);
15057 + find_start_end(mm, flags, &begin, &end);
15058
15059 if (len > end)
15060 return -ENOMEM;
15061
15062 +#ifdef CONFIG_PAX_RANDMMAP
15063 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15064 +#endif
15065 +
15066 if (addr) {
15067 addr = PAGE_ALIGN(addr);
15068 vma = find_vma(mm, addr);
15069 - if (end - len >= addr &&
15070 - (!vma || addr + len <= vma->vm_start))
15071 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15072 return addr;
15073 }
15074 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15075 @@ -106,7 +109,7 @@ full_search:
15076 }
15077 return -ENOMEM;
15078 }
15079 - if (!vma || addr + len <= vma->vm_start) {
15080 + if (check_heap_stack_gap(vma, addr, len)) {
15081 /*
15082 * Remember the place where we stopped the search:
15083 */
15084 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15085 {
15086 struct vm_area_struct *vma;
15087 struct mm_struct *mm = current->mm;
15088 - unsigned long addr = addr0;
15089 + unsigned long base = mm->mmap_base, addr = addr0;
15090
15091 /* requested length too big for entire address space */
15092 if (len > TASK_SIZE)
15093 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15094 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15095 goto bottomup;
15096
15097 +#ifdef CONFIG_PAX_RANDMMAP
15098 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15099 +#endif
15100 +
15101 /* requesting a specific address */
15102 if (addr) {
15103 addr = PAGE_ALIGN(addr);
15104 - vma = find_vma(mm, addr);
15105 - if (TASK_SIZE - len >= addr &&
15106 - (!vma || addr + len <= vma->vm_start))
15107 - return addr;
15108 + if (TASK_SIZE - len >= addr) {
15109 + vma = find_vma(mm, addr);
15110 + if (check_heap_stack_gap(vma, addr, len))
15111 + return addr;
15112 + }
15113 }
15114
15115 /* check if free_area_cache is useful for us */
15116 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15117 /* make sure it can fit in the remaining address space */
15118 if (addr > len) {
15119 vma = find_vma(mm, addr-len);
15120 - if (!vma || addr <= vma->vm_start)
15121 + if (check_heap_stack_gap(vma, addr - len, len))
15122 /* remember the address as a hint for next time */
15123 return mm->free_area_cache = addr-len;
15124 }
15125 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15126 * return with success:
15127 */
15128 vma = find_vma(mm, addr);
15129 - if (!vma || addr+len <= vma->vm_start)
15130 + if (check_heap_stack_gap(vma, addr, len))
15131 /* remember the address as a hint for next time */
15132 return mm->free_area_cache = addr;
15133
15134 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15135 mm->cached_hole_size = vma->vm_start - addr;
15136
15137 /* try just below the current vma->vm_start */
15138 - addr = vma->vm_start-len;
15139 - } while (len < vma->vm_start);
15140 + addr = skip_heap_stack_gap(vma, len);
15141 + } while (!IS_ERR_VALUE(addr));
15142
15143 bottomup:
15144 /*
15145 @@ -198,13 +206,21 @@ bottomup:
15146 * can happen with large stack limits and large mmap()
15147 * allocations.
15148 */
15149 + mm->mmap_base = TASK_UNMAPPED_BASE;
15150 +
15151 +#ifdef CONFIG_PAX_RANDMMAP
15152 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15153 + mm->mmap_base += mm->delta_mmap;
15154 +#endif
15155 +
15156 + mm->free_area_cache = mm->mmap_base;
15157 mm->cached_hole_size = ~0UL;
15158 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15159 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15160 /*
15161 * Restore the topdown base:
15162 */
15163 - mm->free_area_cache = mm->mmap_base;
15164 + mm->mmap_base = base;
15165 + mm->free_area_cache = base;
15166 mm->cached_hole_size = ~0UL;
15167
15168 return addr;
15169 diff -urNp linux-3.0.3/arch/x86/kernel/tboot.c linux-3.0.3/arch/x86/kernel/tboot.c
15170 --- linux-3.0.3/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15171 +++ linux-3.0.3/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15172 @@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15173
15174 void tboot_shutdown(u32 shutdown_type)
15175 {
15176 - void (*shutdown)(void);
15177 + void (* __noreturn shutdown)(void);
15178
15179 if (!tboot_enabled())
15180 return;
15181 @@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15182
15183 switch_to_tboot_pt();
15184
15185 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15186 + shutdown = (void *)tboot->shutdown_entry;
15187 shutdown();
15188
15189 /* should not reach here */
15190 @@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15191 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15192 }
15193
15194 -static atomic_t ap_wfs_count;
15195 +static atomic_unchecked_t ap_wfs_count;
15196
15197 static int tboot_wait_for_aps(int num_aps)
15198 {
15199 @@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15200 {
15201 switch (action) {
15202 case CPU_DYING:
15203 - atomic_inc(&ap_wfs_count);
15204 + atomic_inc_unchecked(&ap_wfs_count);
15205 if (num_online_cpus() == 1)
15206 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15207 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15208 return NOTIFY_BAD;
15209 break;
15210 }
15211 @@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15212
15213 tboot_create_trampoline();
15214
15215 - atomic_set(&ap_wfs_count, 0);
15216 + atomic_set_unchecked(&ap_wfs_count, 0);
15217 register_hotcpu_notifier(&tboot_cpu_notifier);
15218 return 0;
15219 }
15220 diff -urNp linux-3.0.3/arch/x86/kernel/time.c linux-3.0.3/arch/x86/kernel/time.c
15221 --- linux-3.0.3/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15222 +++ linux-3.0.3/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15223 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15224 {
15225 unsigned long pc = instruction_pointer(regs);
15226
15227 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15228 + if (!user_mode(regs) && in_lock_functions(pc)) {
15229 #ifdef CONFIG_FRAME_POINTER
15230 - return *(unsigned long *)(regs->bp + sizeof(long));
15231 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15232 #else
15233 unsigned long *sp =
15234 (unsigned long *)kernel_stack_pointer(regs);
15235 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15236 * or above a saved flags. Eflags has bits 22-31 zero,
15237 * kernel addresses don't.
15238 */
15239 +
15240 +#ifdef CONFIG_PAX_KERNEXEC
15241 + return ktla_ktva(sp[0]);
15242 +#else
15243 if (sp[0] >> 22)
15244 return sp[0];
15245 if (sp[1] >> 22)
15246 return sp[1];
15247 #endif
15248 +
15249 +#endif
15250 }
15251 return pc;
15252 }
15253 diff -urNp linux-3.0.3/arch/x86/kernel/tls.c linux-3.0.3/arch/x86/kernel/tls.c
15254 --- linux-3.0.3/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15255 +++ linux-3.0.3/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15256 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15257 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15258 return -EINVAL;
15259
15260 +#ifdef CONFIG_PAX_SEGMEXEC
15261 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15262 + return -EINVAL;
15263 +#endif
15264 +
15265 set_tls_desc(p, idx, &info, 1);
15266
15267 return 0;
15268 diff -urNp linux-3.0.3/arch/x86/kernel/trampoline_32.S linux-3.0.3/arch/x86/kernel/trampoline_32.S
15269 --- linux-3.0.3/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15270 +++ linux-3.0.3/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15271 @@ -32,6 +32,12 @@
15272 #include <asm/segment.h>
15273 #include <asm/page_types.h>
15274
15275 +#ifdef CONFIG_PAX_KERNEXEC
15276 +#define ta(X) (X)
15277 +#else
15278 +#define ta(X) ((X) - __PAGE_OFFSET)
15279 +#endif
15280 +
15281 #ifdef CONFIG_SMP
15282
15283 .section ".x86_trampoline","a"
15284 @@ -62,7 +68,7 @@ r_base = .
15285 inc %ax # protected mode (PE) bit
15286 lmsw %ax # into protected mode
15287 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15288 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15289 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15290
15291 # These need to be in the same 64K segment as the above;
15292 # hence we don't use the boot_gdt_descr defined in head.S
15293 diff -urNp linux-3.0.3/arch/x86/kernel/trampoline_64.S linux-3.0.3/arch/x86/kernel/trampoline_64.S
15294 --- linux-3.0.3/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15295 +++ linux-3.0.3/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15296 @@ -90,7 +90,7 @@ startup_32:
15297 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15298 movl %eax, %ds
15299
15300 - movl $X86_CR4_PAE, %eax
15301 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15302 movl %eax, %cr4 # Enable PAE mode
15303
15304 # Setup trampoline 4 level pagetables
15305 @@ -138,7 +138,7 @@ tidt:
15306 # so the kernel can live anywhere
15307 .balign 4
15308 tgdt:
15309 - .short tgdt_end - tgdt # gdt limit
15310 + .short tgdt_end - tgdt - 1 # gdt limit
15311 .long tgdt - r_base
15312 .short 0
15313 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15314 diff -urNp linux-3.0.3/arch/x86/kernel/traps.c linux-3.0.3/arch/x86/kernel/traps.c
15315 --- linux-3.0.3/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15316 +++ linux-3.0.3/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15317 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15318
15319 /* Do we ignore FPU interrupts ? */
15320 char ignore_fpu_irq;
15321 -
15322 -/*
15323 - * The IDT has to be page-aligned to simplify the Pentium
15324 - * F0 0F bug workaround.
15325 - */
15326 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15327 #endif
15328
15329 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15330 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15331 }
15332
15333 static void __kprobes
15334 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15335 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15336 long error_code, siginfo_t *info)
15337 {
15338 struct task_struct *tsk = current;
15339
15340 #ifdef CONFIG_X86_32
15341 - if (regs->flags & X86_VM_MASK) {
15342 + if (v8086_mode(regs)) {
15343 /*
15344 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15345 * On nmi (interrupt 2), do_trap should not be called.
15346 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15347 }
15348 #endif
15349
15350 - if (!user_mode(regs))
15351 + if (!user_mode_novm(regs))
15352 goto kernel_trap;
15353
15354 #ifdef CONFIG_X86_32
15355 @@ -157,7 +151,7 @@ trap_signal:
15356 printk_ratelimit()) {
15357 printk(KERN_INFO
15358 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15359 - tsk->comm, tsk->pid, str,
15360 + tsk->comm, task_pid_nr(tsk), str,
15361 regs->ip, regs->sp, error_code);
15362 print_vma_addr(" in ", regs->ip);
15363 printk("\n");
15364 @@ -174,8 +168,20 @@ kernel_trap:
15365 if (!fixup_exception(regs)) {
15366 tsk->thread.error_code = error_code;
15367 tsk->thread.trap_no = trapnr;
15368 +
15369 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15370 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15371 + str = "PAX: suspicious stack segment fault";
15372 +#endif
15373 +
15374 die(str, regs, error_code);
15375 }
15376 +
15377 +#ifdef CONFIG_PAX_REFCOUNT
15378 + if (trapnr == 4)
15379 + pax_report_refcount_overflow(regs);
15380 +#endif
15381 +
15382 return;
15383
15384 #ifdef CONFIG_X86_32
15385 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15386 conditional_sti(regs);
15387
15388 #ifdef CONFIG_X86_32
15389 - if (regs->flags & X86_VM_MASK)
15390 + if (v8086_mode(regs))
15391 goto gp_in_vm86;
15392 #endif
15393
15394 tsk = current;
15395 - if (!user_mode(regs))
15396 + if (!user_mode_novm(regs))
15397 goto gp_in_kernel;
15398
15399 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15400 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15401 + struct mm_struct *mm = tsk->mm;
15402 + unsigned long limit;
15403 +
15404 + down_write(&mm->mmap_sem);
15405 + limit = mm->context.user_cs_limit;
15406 + if (limit < TASK_SIZE) {
15407 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15408 + up_write(&mm->mmap_sem);
15409 + return;
15410 + }
15411 + up_write(&mm->mmap_sem);
15412 + }
15413 +#endif
15414 +
15415 tsk->thread.error_code = error_code;
15416 tsk->thread.trap_no = 13;
15417
15418 @@ -304,6 +326,13 @@ gp_in_kernel:
15419 if (notify_die(DIE_GPF, "general protection fault", regs,
15420 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15421 return;
15422 +
15423 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15424 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15425 + die("PAX: suspicious general protection fault", regs, error_code);
15426 + else
15427 +#endif
15428 +
15429 die("general protection fault", regs, error_code);
15430 }
15431
15432 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15433 dotraplinkage notrace __kprobes void
15434 do_nmi(struct pt_regs *regs, long error_code)
15435 {
15436 +
15437 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15438 + if (!user_mode(regs)) {
15439 + unsigned long cs = regs->cs & 0xFFFF;
15440 + unsigned long ip = ktva_ktla(regs->ip);
15441 +
15442 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15443 + regs->ip = ip;
15444 + }
15445 +#endif
15446 +
15447 nmi_enter();
15448
15449 inc_irq_stat(__nmi_count);
15450 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15451 /* It's safe to allow irq's after DR6 has been saved */
15452 preempt_conditional_sti(regs);
15453
15454 - if (regs->flags & X86_VM_MASK) {
15455 + if (v8086_mode(regs)) {
15456 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15457 error_code, 1);
15458 preempt_conditional_cli(regs);
15459 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15460 * We already checked v86 mode above, so we can check for kernel mode
15461 * by just checking the CPL of CS.
15462 */
15463 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15464 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15465 tsk->thread.debugreg6 &= ~DR_STEP;
15466 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15467 regs->flags &= ~X86_EFLAGS_TF;
15468 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15469 return;
15470 conditional_sti(regs);
15471
15472 - if (!user_mode_vm(regs))
15473 + if (!user_mode(regs))
15474 {
15475 if (!fixup_exception(regs)) {
15476 task->thread.error_code = error_code;
15477 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15478 void __math_state_restore(void)
15479 {
15480 struct thread_info *thread = current_thread_info();
15481 - struct task_struct *tsk = thread->task;
15482 + struct task_struct *tsk = current;
15483
15484 /*
15485 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15486 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15487 */
15488 asmlinkage void math_state_restore(void)
15489 {
15490 - struct thread_info *thread = current_thread_info();
15491 - struct task_struct *tsk = thread->task;
15492 + struct task_struct *tsk = current;
15493
15494 if (!tsk_used_math(tsk)) {
15495 local_irq_enable();
15496 diff -urNp linux-3.0.3/arch/x86/kernel/verify_cpu.S linux-3.0.3/arch/x86/kernel/verify_cpu.S
15497 --- linux-3.0.3/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15498 +++ linux-3.0.3/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15499 @@ -20,6 +20,7 @@
15500 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15501 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15502 * arch/x86/kernel/head_32.S: processor startup
15503 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15504 *
15505 * verify_cpu, returns the status of longmode and SSE in register %eax.
15506 * 0: Success 1: Failure
15507 diff -urNp linux-3.0.3/arch/x86/kernel/vm86_32.c linux-3.0.3/arch/x86/kernel/vm86_32.c
15508 --- linux-3.0.3/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15509 +++ linux-3.0.3/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15510 @@ -41,6 +41,7 @@
15511 #include <linux/ptrace.h>
15512 #include <linux/audit.h>
15513 #include <linux/stddef.h>
15514 +#include <linux/grsecurity.h>
15515
15516 #include <asm/uaccess.h>
15517 #include <asm/io.h>
15518 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15519 do_exit(SIGSEGV);
15520 }
15521
15522 - tss = &per_cpu(init_tss, get_cpu());
15523 + tss = init_tss + get_cpu();
15524 current->thread.sp0 = current->thread.saved_sp0;
15525 current->thread.sysenter_cs = __KERNEL_CS;
15526 load_sp0(tss, &current->thread);
15527 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15528 struct task_struct *tsk;
15529 int tmp, ret = -EPERM;
15530
15531 +#ifdef CONFIG_GRKERNSEC_VM86
15532 + if (!capable(CAP_SYS_RAWIO)) {
15533 + gr_handle_vm86();
15534 + goto out;
15535 + }
15536 +#endif
15537 +
15538 tsk = current;
15539 if (tsk->thread.saved_sp0)
15540 goto out;
15541 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15542 int tmp, ret;
15543 struct vm86plus_struct __user *v86;
15544
15545 +#ifdef CONFIG_GRKERNSEC_VM86
15546 + if (!capable(CAP_SYS_RAWIO)) {
15547 + gr_handle_vm86();
15548 + ret = -EPERM;
15549 + goto out;
15550 + }
15551 +#endif
15552 +
15553 tsk = current;
15554 switch (cmd) {
15555 case VM86_REQUEST_IRQ:
15556 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15557 tsk->thread.saved_fs = info->regs32->fs;
15558 tsk->thread.saved_gs = get_user_gs(info->regs32);
15559
15560 - tss = &per_cpu(init_tss, get_cpu());
15561 + tss = init_tss + get_cpu();
15562 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15563 if (cpu_has_sep)
15564 tsk->thread.sysenter_cs = 0;
15565 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15566 goto cannot_handle;
15567 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15568 goto cannot_handle;
15569 - intr_ptr = (unsigned long __user *) (i << 2);
15570 + intr_ptr = (__force unsigned long __user *) (i << 2);
15571 if (get_user(segoffs, intr_ptr))
15572 goto cannot_handle;
15573 if ((segoffs >> 16) == BIOSSEG)
15574 diff -urNp linux-3.0.3/arch/x86/kernel/vmlinux.lds.S linux-3.0.3/arch/x86/kernel/vmlinux.lds.S
15575 --- linux-3.0.3/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15576 +++ linux-3.0.3/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15577 @@ -26,6 +26,13 @@
15578 #include <asm/page_types.h>
15579 #include <asm/cache.h>
15580 #include <asm/boot.h>
15581 +#include <asm/segment.h>
15582 +
15583 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15584 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15585 +#else
15586 +#define __KERNEL_TEXT_OFFSET 0
15587 +#endif
15588
15589 #undef i386 /* in case the preprocessor is a 32bit one */
15590
15591 @@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15592
15593 PHDRS {
15594 text PT_LOAD FLAGS(5); /* R_E */
15595 +#ifdef CONFIG_X86_32
15596 + module PT_LOAD FLAGS(5); /* R_E */
15597 +#endif
15598 +#ifdef CONFIG_XEN
15599 + rodata PT_LOAD FLAGS(5); /* R_E */
15600 +#else
15601 + rodata PT_LOAD FLAGS(4); /* R__ */
15602 +#endif
15603 data PT_LOAD FLAGS(6); /* RW_ */
15604 #ifdef CONFIG_X86_64
15605 user PT_LOAD FLAGS(5); /* R_E */
15606 +#endif
15607 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15608 #ifdef CONFIG_SMP
15609 percpu PT_LOAD FLAGS(6); /* RW_ */
15610 #endif
15611 + text.init PT_LOAD FLAGS(5); /* R_E */
15612 + text.exit PT_LOAD FLAGS(5); /* R_E */
15613 init PT_LOAD FLAGS(7); /* RWE */
15614 -#endif
15615 note PT_NOTE FLAGS(0); /* ___ */
15616 }
15617
15618 SECTIONS
15619 {
15620 #ifdef CONFIG_X86_32
15621 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15622 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15623 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15624 #else
15625 - . = __START_KERNEL;
15626 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15627 + . = __START_KERNEL;
15628 #endif
15629
15630 /* Text and read-only data */
15631 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15632 - _text = .;
15633 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15634 /* bootstrapping code */
15635 +#ifdef CONFIG_X86_32
15636 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15637 +#else
15638 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15639 +#endif
15640 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15641 + _text = .;
15642 HEAD_TEXT
15643 #ifdef CONFIG_X86_32
15644 . = ALIGN(PAGE_SIZE);
15645 @@ -109,13 +131,47 @@ SECTIONS
15646 IRQENTRY_TEXT
15647 *(.fixup)
15648 *(.gnu.warning)
15649 - /* End of text section */
15650 - _etext = .;
15651 } :text = 0x9090
15652
15653 - NOTES :text :note
15654 + . += __KERNEL_TEXT_OFFSET;
15655 +
15656 +#ifdef CONFIG_X86_32
15657 + . = ALIGN(PAGE_SIZE);
15658 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15659 +
15660 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15661 + MODULES_EXEC_VADDR = .;
15662 + BYTE(0)
15663 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15664 + . = ALIGN(HPAGE_SIZE);
15665 + MODULES_EXEC_END = . - 1;
15666 +#endif
15667 +
15668 + } :module
15669 +#endif
15670 +
15671 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15672 + /* End of text section */
15673 + _etext = . - __KERNEL_TEXT_OFFSET;
15674 + }
15675 +
15676 +#ifdef CONFIG_X86_32
15677 + . = ALIGN(PAGE_SIZE);
15678 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15679 + *(.idt)
15680 + . = ALIGN(PAGE_SIZE);
15681 + *(.empty_zero_page)
15682 + *(.initial_pg_fixmap)
15683 + *(.initial_pg_pmd)
15684 + *(.initial_page_table)
15685 + *(.swapper_pg_dir)
15686 + } :rodata
15687 +#endif
15688 +
15689 + . = ALIGN(PAGE_SIZE);
15690 + NOTES :rodata :note
15691
15692 - EXCEPTION_TABLE(16) :text = 0x9090
15693 + EXCEPTION_TABLE(16) :rodata
15694
15695 #if defined(CONFIG_DEBUG_RODATA)
15696 /* .text should occupy whole number of pages */
15697 @@ -127,16 +183,20 @@ SECTIONS
15698
15699 /* Data */
15700 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15701 +
15702 +#ifdef CONFIG_PAX_KERNEXEC
15703 + . = ALIGN(HPAGE_SIZE);
15704 +#else
15705 + . = ALIGN(PAGE_SIZE);
15706 +#endif
15707 +
15708 /* Start of data section */
15709 _sdata = .;
15710
15711 /* init_task */
15712 INIT_TASK_DATA(THREAD_SIZE)
15713
15714 -#ifdef CONFIG_X86_32
15715 - /* 32 bit has nosave before _edata */
15716 NOSAVE_DATA
15717 -#endif
15718
15719 PAGE_ALIGNED_DATA(PAGE_SIZE)
15720
15721 @@ -208,12 +268,19 @@ SECTIONS
15722 #endif /* CONFIG_X86_64 */
15723
15724 /* Init code and data - will be freed after init */
15725 - . = ALIGN(PAGE_SIZE);
15726 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15727 + BYTE(0)
15728 +
15729 +#ifdef CONFIG_PAX_KERNEXEC
15730 + . = ALIGN(HPAGE_SIZE);
15731 +#else
15732 + . = ALIGN(PAGE_SIZE);
15733 +#endif
15734 +
15735 __init_begin = .; /* paired with __init_end */
15736 - }
15737 + } :init.begin
15738
15739 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15740 +#ifdef CONFIG_SMP
15741 /*
15742 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15743 * output PHDR, so the next output section - .init.text - should
15744 @@ -222,12 +289,27 @@ SECTIONS
15745 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15746 #endif
15747
15748 - INIT_TEXT_SECTION(PAGE_SIZE)
15749 -#ifdef CONFIG_X86_64
15750 - :init
15751 -#endif
15752 + . = ALIGN(PAGE_SIZE);
15753 + init_begin = .;
15754 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15755 + VMLINUX_SYMBOL(_sinittext) = .;
15756 + INIT_TEXT
15757 + VMLINUX_SYMBOL(_einittext) = .;
15758 + . = ALIGN(PAGE_SIZE);
15759 + } :text.init
15760
15761 - INIT_DATA_SECTION(16)
15762 + /*
15763 + * .exit.text is discard at runtime, not link time, to deal with
15764 + * references from .altinstructions and .eh_frame
15765 + */
15766 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15767 + EXIT_TEXT
15768 + . = ALIGN(16);
15769 + } :text.exit
15770 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15771 +
15772 + . = ALIGN(PAGE_SIZE);
15773 + INIT_DATA_SECTION(16) :init
15774
15775 /*
15776 * Code and data for a variety of lowlevel trampolines, to be
15777 @@ -301,19 +383,12 @@ SECTIONS
15778 }
15779
15780 . = ALIGN(8);
15781 - /*
15782 - * .exit.text is discard at runtime, not link time, to deal with
15783 - * references from .altinstructions and .eh_frame
15784 - */
15785 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15786 - EXIT_TEXT
15787 - }
15788
15789 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15790 EXIT_DATA
15791 }
15792
15793 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15794 +#ifndef CONFIG_SMP
15795 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15796 #endif
15797
15798 @@ -332,16 +407,10 @@ SECTIONS
15799 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15800 __smp_locks = .;
15801 *(.smp_locks)
15802 - . = ALIGN(PAGE_SIZE);
15803 __smp_locks_end = .;
15804 + . = ALIGN(PAGE_SIZE);
15805 }
15806
15807 -#ifdef CONFIG_X86_64
15808 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15809 - NOSAVE_DATA
15810 - }
15811 -#endif
15812 -
15813 /* BSS */
15814 . = ALIGN(PAGE_SIZE);
15815 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15816 @@ -357,6 +426,7 @@ SECTIONS
15817 __brk_base = .;
15818 . += 64 * 1024; /* 64k alignment slop space */
15819 *(.brk_reservation) /* areas brk users have reserved */
15820 + . = ALIGN(HPAGE_SIZE);
15821 __brk_limit = .;
15822 }
15823
15824 @@ -383,13 +453,12 @@ SECTIONS
15825 * for the boot processor.
15826 */
15827 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15828 -INIT_PER_CPU(gdt_page);
15829 INIT_PER_CPU(irq_stack_union);
15830
15831 /*
15832 * Build-time check on the image size:
15833 */
15834 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15835 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15836 "kernel image bigger than KERNEL_IMAGE_SIZE");
15837
15838 #ifdef CONFIG_SMP
15839 diff -urNp linux-3.0.3/arch/x86/kernel/vsyscall_64.c linux-3.0.3/arch/x86/kernel/vsyscall_64.c
15840 --- linux-3.0.3/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15841 +++ linux-3.0.3/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15842 @@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15843 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15844 {
15845 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15846 - .sysctl_enabled = 1,
15847 + .sysctl_enabled = 0,
15848 };
15849
15850 void update_vsyscall_tz(void)
15851 @@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15852 static ctl_table kernel_table2[] = {
15853 { .procname = "vsyscall64",
15854 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15855 - .mode = 0644,
15856 + .mode = 0444,
15857 .proc_handler = proc_dointvec },
15858 {}
15859 };
15860 diff -urNp linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c
15861 --- linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15862 +++ linux-3.0.3/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15863 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15864 EXPORT_SYMBOL(copy_user_generic_string);
15865 EXPORT_SYMBOL(copy_user_generic_unrolled);
15866 EXPORT_SYMBOL(__copy_user_nocache);
15867 -EXPORT_SYMBOL(_copy_from_user);
15868 -EXPORT_SYMBOL(_copy_to_user);
15869
15870 EXPORT_SYMBOL(copy_page);
15871 EXPORT_SYMBOL(clear_page);
15872 diff -urNp linux-3.0.3/arch/x86/kernel/xsave.c linux-3.0.3/arch/x86/kernel/xsave.c
15873 --- linux-3.0.3/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15874 +++ linux-3.0.3/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15875 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15876 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15877 return -EINVAL;
15878
15879 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15880 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15881 fx_sw_user->extended_size -
15882 FP_XSTATE_MAGIC2_SIZE));
15883 if (err)
15884 @@ -267,7 +267,7 @@ fx_only:
15885 * the other extended state.
15886 */
15887 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15888 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15889 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15890 }
15891
15892 /*
15893 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15894 if (use_xsave())
15895 err = restore_user_xstate(buf);
15896 else
15897 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
15898 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
15899 buf);
15900 if (unlikely(err)) {
15901 /*
15902 diff -urNp linux-3.0.3/arch/x86/kvm/emulate.c linux-3.0.3/arch/x86/kvm/emulate.c
15903 --- linux-3.0.3/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15904 +++ linux-3.0.3/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15905 @@ -96,7 +96,7 @@
15906 #define Src2ImmByte (2<<29)
15907 #define Src2One (3<<29)
15908 #define Src2Imm (4<<29)
15909 -#define Src2Mask (7<<29)
15910 +#define Src2Mask (7U<<29)
15911
15912 #define X2(x...) x, x
15913 #define X3(x...) X2(x), x
15914 @@ -207,6 +207,7 @@ struct gprefix {
15915
15916 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15917 do { \
15918 + unsigned long _tmp; \
15919 __asm__ __volatile__ ( \
15920 _PRE_EFLAGS("0", "4", "2") \
15921 _op _suffix " %"_x"3,%1; " \
15922 @@ -220,8 +221,6 @@ struct gprefix {
15923 /* Raw emulation: instruction has two explicit operands. */
15924 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15925 do { \
15926 - unsigned long _tmp; \
15927 - \
15928 switch ((_dst).bytes) { \
15929 case 2: \
15930 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15931 @@ -237,7 +236,6 @@ struct gprefix {
15932
15933 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15934 do { \
15935 - unsigned long _tmp; \
15936 switch ((_dst).bytes) { \
15937 case 1: \
15938 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
15939 diff -urNp linux-3.0.3/arch/x86/kvm/lapic.c linux-3.0.3/arch/x86/kvm/lapic.c
15940 --- linux-3.0.3/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
15941 +++ linux-3.0.3/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
15942 @@ -53,7 +53,7 @@
15943 #define APIC_BUS_CYCLE_NS 1
15944
15945 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15946 -#define apic_debug(fmt, arg...)
15947 +#define apic_debug(fmt, arg...) do {} while (0)
15948
15949 #define APIC_LVT_NUM 6
15950 /* 14 is the version for Xeon and Pentium 8.4.8*/
15951 diff -urNp linux-3.0.3/arch/x86/kvm/mmu.c linux-3.0.3/arch/x86/kvm/mmu.c
15952 --- linux-3.0.3/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
15953 +++ linux-3.0.3/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
15954 @@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15955
15956 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
15957
15958 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
15959 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
15960
15961 /*
15962 * Assume that the pte write on a page table of the same type
15963 @@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
15964 }
15965
15966 spin_lock(&vcpu->kvm->mmu_lock);
15967 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15968 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
15969 gentry = 0;
15970 kvm_mmu_free_some_pages(vcpu);
15971 ++vcpu->kvm->stat.mmu_pte_write;
15972 diff -urNp linux-3.0.3/arch/x86/kvm/paging_tmpl.h linux-3.0.3/arch/x86/kvm/paging_tmpl.h
15973 --- linux-3.0.3/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
15974 +++ linux-3.0.3/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
15975 @@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
15976 unsigned long mmu_seq;
15977 bool map_writable;
15978
15979 + pax_track_stack();
15980 +
15981 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
15982
15983 r = mmu_topup_memory_caches(vcpu);
15984 @@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
15985 if (need_flush)
15986 kvm_flush_remote_tlbs(vcpu->kvm);
15987
15988 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
15989 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
15990
15991 spin_unlock(&vcpu->kvm->mmu_lock);
15992
15993 diff -urNp linux-3.0.3/arch/x86/kvm/svm.c linux-3.0.3/arch/x86/kvm/svm.c
15994 --- linux-3.0.3/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
15995 +++ linux-3.0.3/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
15996 @@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
15997 int cpu = raw_smp_processor_id();
15998
15999 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16000 +
16001 + pax_open_kernel();
16002 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16003 + pax_close_kernel();
16004 +
16005 load_TR_desc();
16006 }
16007
16008 @@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16009 #endif
16010 #endif
16011
16012 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16013 + __set_fs(current_thread_info()->addr_limit);
16014 +#endif
16015 +
16016 reload_tss(vcpu);
16017
16018 local_irq_disable();
16019 diff -urNp linux-3.0.3/arch/x86/kvm/vmx.c linux-3.0.3/arch/x86/kvm/vmx.c
16020 --- linux-3.0.3/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16021 +++ linux-3.0.3/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16022 @@ -797,7 +797,11 @@ static void reload_tss(void)
16023 struct desc_struct *descs;
16024
16025 descs = (void *)gdt->address;
16026 +
16027 + pax_open_kernel();
16028 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16029 + pax_close_kernel();
16030 +
16031 load_TR_desc();
16032 }
16033
16034 @@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16035 if (!cpu_has_vmx_flexpriority())
16036 flexpriority_enabled = 0;
16037
16038 - if (!cpu_has_vmx_tpr_shadow())
16039 - kvm_x86_ops->update_cr8_intercept = NULL;
16040 + if (!cpu_has_vmx_tpr_shadow()) {
16041 + pax_open_kernel();
16042 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16043 + pax_close_kernel();
16044 + }
16045
16046 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16047 kvm_disable_largepages();
16048 @@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16049 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16050
16051 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16052 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16053 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16054 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16055 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16056 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16057 @@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16058 "jmp .Lkvm_vmx_return \n\t"
16059 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16060 ".Lkvm_vmx_return: "
16061 +
16062 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16063 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16064 + ".Lkvm_vmx_return2: "
16065 +#endif
16066 +
16067 /* Save guest registers, load host registers, keep flags */
16068 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16069 "pop %0 \n\t"
16070 @@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16071 #endif
16072 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16073 [wordsize]"i"(sizeof(ulong))
16074 +
16075 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16076 + ,[cs]"i"(__KERNEL_CS)
16077 +#endif
16078 +
16079 : "cc", "memory"
16080 , R"ax", R"bx", R"di", R"si"
16081 #ifdef CONFIG_X86_64
16082 @@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16083
16084 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16085
16086 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16087 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16088 +
16089 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16090 + loadsegment(fs, __KERNEL_PERCPU);
16091 +#endif
16092 +
16093 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16094 + __set_fs(current_thread_info()->addr_limit);
16095 +#endif
16096 +
16097 vmx->launched = 1;
16098
16099 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16100 diff -urNp linux-3.0.3/arch/x86/kvm/x86.c linux-3.0.3/arch/x86/kvm/x86.c
16101 --- linux-3.0.3/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16102 +++ linux-3.0.3/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16103 @@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16104 if (n < msr_list.nmsrs)
16105 goto out;
16106 r = -EFAULT;
16107 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16108 + goto out;
16109 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16110 num_msrs_to_save * sizeof(u32)))
16111 goto out;
16112 @@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16113 struct kvm_cpuid2 *cpuid,
16114 struct kvm_cpuid_entry2 __user *entries)
16115 {
16116 - int r;
16117 + int r, i;
16118
16119 r = -E2BIG;
16120 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16121 goto out;
16122 r = -EFAULT;
16123 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16124 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16125 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16126 goto out;
16127 + for (i = 0; i < cpuid->nent; ++i) {
16128 + struct kvm_cpuid_entry2 cpuid_entry;
16129 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16130 + goto out;
16131 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16132 + }
16133 vcpu->arch.cpuid_nent = cpuid->nent;
16134 kvm_apic_set_version(vcpu);
16135 kvm_x86_ops->cpuid_update(vcpu);
16136 @@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16137 struct kvm_cpuid2 *cpuid,
16138 struct kvm_cpuid_entry2 __user *entries)
16139 {
16140 - int r;
16141 + int r, i;
16142
16143 r = -E2BIG;
16144 if (cpuid->nent < vcpu->arch.cpuid_nent)
16145 goto out;
16146 r = -EFAULT;
16147 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16148 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16149 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16150 goto out;
16151 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16152 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16153 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16154 + goto out;
16155 + }
16156 return 0;
16157
16158 out:
16159 @@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16160 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16161 struct kvm_interrupt *irq)
16162 {
16163 - if (irq->irq < 0 || irq->irq >= 256)
16164 + if (irq->irq >= 256)
16165 return -EINVAL;
16166 if (irqchip_in_kernel(vcpu->kvm))
16167 return -ENXIO;
16168 @@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16169 }
16170 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16171
16172 -int kvm_arch_init(void *opaque)
16173 +int kvm_arch_init(const void *opaque)
16174 {
16175 int r;
16176 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16177 diff -urNp linux-3.0.3/arch/x86/lguest/boot.c linux-3.0.3/arch/x86/lguest/boot.c
16178 --- linux-3.0.3/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16179 +++ linux-3.0.3/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16180 @@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16181 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16182 * Launcher to reboot us.
16183 */
16184 -static void lguest_restart(char *reason)
16185 +static __noreturn void lguest_restart(char *reason)
16186 {
16187 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16188 + BUG();
16189 }
16190
16191 /*G:050
16192 diff -urNp linux-3.0.3/arch/x86/lib/atomic64_32.c linux-3.0.3/arch/x86/lib/atomic64_32.c
16193 --- linux-3.0.3/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16194 +++ linux-3.0.3/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16195 @@ -8,18 +8,30 @@
16196
16197 long long atomic64_read_cx8(long long, const atomic64_t *v);
16198 EXPORT_SYMBOL(atomic64_read_cx8);
16199 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16200 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16201 long long atomic64_set_cx8(long long, const atomic64_t *v);
16202 EXPORT_SYMBOL(atomic64_set_cx8);
16203 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16204 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16205 long long atomic64_xchg_cx8(long long, unsigned high);
16206 EXPORT_SYMBOL(atomic64_xchg_cx8);
16207 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16208 EXPORT_SYMBOL(atomic64_add_return_cx8);
16209 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16210 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16211 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16212 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16213 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16214 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16215 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16216 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16217 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16218 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16219 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16220 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16221 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16222 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16223 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16224 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16225 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16226 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16227 #ifndef CONFIG_X86_CMPXCHG64
16228 long long atomic64_read_386(long long, const atomic64_t *v);
16229 EXPORT_SYMBOL(atomic64_read_386);
16230 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16231 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16232 long long atomic64_set_386(long long, const atomic64_t *v);
16233 EXPORT_SYMBOL(atomic64_set_386);
16234 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16235 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16236 long long atomic64_xchg_386(long long, unsigned high);
16237 EXPORT_SYMBOL(atomic64_xchg_386);
16238 long long atomic64_add_return_386(long long a, atomic64_t *v);
16239 EXPORT_SYMBOL(atomic64_add_return_386);
16240 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16241 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16242 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16243 EXPORT_SYMBOL(atomic64_sub_return_386);
16244 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16245 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16246 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16247 EXPORT_SYMBOL(atomic64_inc_return_386);
16248 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16249 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16250 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16251 EXPORT_SYMBOL(atomic64_dec_return_386);
16252 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16253 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16254 long long atomic64_add_386(long long a, atomic64_t *v);
16255 EXPORT_SYMBOL(atomic64_add_386);
16256 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16257 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16258 long long atomic64_sub_386(long long a, atomic64_t *v);
16259 EXPORT_SYMBOL(atomic64_sub_386);
16260 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16261 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16262 long long atomic64_inc_386(long long a, atomic64_t *v);
16263 EXPORT_SYMBOL(atomic64_inc_386);
16264 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16265 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16266 long long atomic64_dec_386(long long a, atomic64_t *v);
16267 EXPORT_SYMBOL(atomic64_dec_386);
16268 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16269 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16270 long long atomic64_dec_if_positive_386(atomic64_t *v);
16271 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16272 int atomic64_inc_not_zero_386(atomic64_t *v);
16273 diff -urNp linux-3.0.3/arch/x86/lib/atomic64_386_32.S linux-3.0.3/arch/x86/lib/atomic64_386_32.S
16274 --- linux-3.0.3/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16275 +++ linux-3.0.3/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16276 @@ -48,6 +48,10 @@ BEGIN(read)
16277 movl (v), %eax
16278 movl 4(v), %edx
16279 RET_ENDP
16280 +BEGIN(read_unchecked)
16281 + movl (v), %eax
16282 + movl 4(v), %edx
16283 +RET_ENDP
16284 #undef v
16285
16286 #define v %esi
16287 @@ -55,6 +59,10 @@ BEGIN(set)
16288 movl %ebx, (v)
16289 movl %ecx, 4(v)
16290 RET_ENDP
16291 +BEGIN(set_unchecked)
16292 + movl %ebx, (v)
16293 + movl %ecx, 4(v)
16294 +RET_ENDP
16295 #undef v
16296
16297 #define v %esi
16298 @@ -70,6 +78,20 @@ RET_ENDP
16299 BEGIN(add)
16300 addl %eax, (v)
16301 adcl %edx, 4(v)
16302 +
16303 +#ifdef CONFIG_PAX_REFCOUNT
16304 + jno 0f
16305 + subl %eax, (v)
16306 + sbbl %edx, 4(v)
16307 + int $4
16308 +0:
16309 + _ASM_EXTABLE(0b, 0b)
16310 +#endif
16311 +
16312 +RET_ENDP
16313 +BEGIN(add_unchecked)
16314 + addl %eax, (v)
16315 + adcl %edx, 4(v)
16316 RET_ENDP
16317 #undef v
16318
16319 @@ -77,6 +99,24 @@ RET_ENDP
16320 BEGIN(add_return)
16321 addl (v), %eax
16322 adcl 4(v), %edx
16323 +
16324 +#ifdef CONFIG_PAX_REFCOUNT
16325 + into
16326 +1234:
16327 + _ASM_EXTABLE(1234b, 2f)
16328 +#endif
16329 +
16330 + movl %eax, (v)
16331 + movl %edx, 4(v)
16332 +
16333 +#ifdef CONFIG_PAX_REFCOUNT
16334 +2:
16335 +#endif
16336 +
16337 +RET_ENDP
16338 +BEGIN(add_return_unchecked)
16339 + addl (v), %eax
16340 + adcl 4(v), %edx
16341 movl %eax, (v)
16342 movl %edx, 4(v)
16343 RET_ENDP
16344 @@ -86,6 +126,20 @@ RET_ENDP
16345 BEGIN(sub)
16346 subl %eax, (v)
16347 sbbl %edx, 4(v)
16348 +
16349 +#ifdef CONFIG_PAX_REFCOUNT
16350 + jno 0f
16351 + addl %eax, (v)
16352 + adcl %edx, 4(v)
16353 + int $4
16354 +0:
16355 + _ASM_EXTABLE(0b, 0b)
16356 +#endif
16357 +
16358 +RET_ENDP
16359 +BEGIN(sub_unchecked)
16360 + subl %eax, (v)
16361 + sbbl %edx, 4(v)
16362 RET_ENDP
16363 #undef v
16364
16365 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16366 sbbl $0, %edx
16367 addl (v), %eax
16368 adcl 4(v), %edx
16369 +
16370 +#ifdef CONFIG_PAX_REFCOUNT
16371 + into
16372 +1234:
16373 + _ASM_EXTABLE(1234b, 2f)
16374 +#endif
16375 +
16376 + movl %eax, (v)
16377 + movl %edx, 4(v)
16378 +
16379 +#ifdef CONFIG_PAX_REFCOUNT
16380 +2:
16381 +#endif
16382 +
16383 +RET_ENDP
16384 +BEGIN(sub_return_unchecked)
16385 + negl %edx
16386 + negl %eax
16387 + sbbl $0, %edx
16388 + addl (v), %eax
16389 + adcl 4(v), %edx
16390 movl %eax, (v)
16391 movl %edx, 4(v)
16392 RET_ENDP
16393 @@ -105,6 +180,20 @@ RET_ENDP
16394 BEGIN(inc)
16395 addl $1, (v)
16396 adcl $0, 4(v)
16397 +
16398 +#ifdef CONFIG_PAX_REFCOUNT
16399 + jno 0f
16400 + subl $1, (v)
16401 + sbbl $0, 4(v)
16402 + int $4
16403 +0:
16404 + _ASM_EXTABLE(0b, 0b)
16405 +#endif
16406 +
16407 +RET_ENDP
16408 +BEGIN(inc_unchecked)
16409 + addl $1, (v)
16410 + adcl $0, 4(v)
16411 RET_ENDP
16412 #undef v
16413
16414 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16415 movl 4(v), %edx
16416 addl $1, %eax
16417 adcl $0, %edx
16418 +
16419 +#ifdef CONFIG_PAX_REFCOUNT
16420 + into
16421 +1234:
16422 + _ASM_EXTABLE(1234b, 2f)
16423 +#endif
16424 +
16425 + movl %eax, (v)
16426 + movl %edx, 4(v)
16427 +
16428 +#ifdef CONFIG_PAX_REFCOUNT
16429 +2:
16430 +#endif
16431 +
16432 +RET_ENDP
16433 +BEGIN(inc_return_unchecked)
16434 + movl (v), %eax
16435 + movl 4(v), %edx
16436 + addl $1, %eax
16437 + adcl $0, %edx
16438 movl %eax, (v)
16439 movl %edx, 4(v)
16440 RET_ENDP
16441 @@ -123,6 +232,20 @@ RET_ENDP
16442 BEGIN(dec)
16443 subl $1, (v)
16444 sbbl $0, 4(v)
16445 +
16446 +#ifdef CONFIG_PAX_REFCOUNT
16447 + jno 0f
16448 + addl $1, (v)
16449 + adcl $0, 4(v)
16450 + int $4
16451 +0:
16452 + _ASM_EXTABLE(0b, 0b)
16453 +#endif
16454 +
16455 +RET_ENDP
16456 +BEGIN(dec_unchecked)
16457 + subl $1, (v)
16458 + sbbl $0, 4(v)
16459 RET_ENDP
16460 #undef v
16461
16462 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16463 movl 4(v), %edx
16464 subl $1, %eax
16465 sbbl $0, %edx
16466 +
16467 +#ifdef CONFIG_PAX_REFCOUNT
16468 + into
16469 +1234:
16470 + _ASM_EXTABLE(1234b, 2f)
16471 +#endif
16472 +
16473 + movl %eax, (v)
16474 + movl %edx, 4(v)
16475 +
16476 +#ifdef CONFIG_PAX_REFCOUNT
16477 +2:
16478 +#endif
16479 +
16480 +RET_ENDP
16481 +BEGIN(dec_return_unchecked)
16482 + movl (v), %eax
16483 + movl 4(v), %edx
16484 + subl $1, %eax
16485 + sbbl $0, %edx
16486 movl %eax, (v)
16487 movl %edx, 4(v)
16488 RET_ENDP
16489 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16490 adcl %edx, %edi
16491 addl (v), %eax
16492 adcl 4(v), %edx
16493 +
16494 +#ifdef CONFIG_PAX_REFCOUNT
16495 + into
16496 +1234:
16497 + _ASM_EXTABLE(1234b, 2f)
16498 +#endif
16499 +
16500 cmpl %eax, %esi
16501 je 3f
16502 1:
16503 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16504 1:
16505 addl $1, %eax
16506 adcl $0, %edx
16507 +
16508 +#ifdef CONFIG_PAX_REFCOUNT
16509 + into
16510 +1234:
16511 + _ASM_EXTABLE(1234b, 2f)
16512 +#endif
16513 +
16514 movl %eax, (v)
16515 movl %edx, 4(v)
16516 movl $1, %eax
16517 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16518 movl 4(v), %edx
16519 subl $1, %eax
16520 sbbl $0, %edx
16521 +
16522 +#ifdef CONFIG_PAX_REFCOUNT
16523 + into
16524 +1234:
16525 + _ASM_EXTABLE(1234b, 1f)
16526 +#endif
16527 +
16528 js 1f
16529 movl %eax, (v)
16530 movl %edx, 4(v)
16531 diff -urNp linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S
16532 --- linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16533 +++ linux-3.0.3/arch/x86/lib/atomic64_cx8_32.S 2011-08-23 21:47:55.000000000 -0400
16534 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16535 CFI_ENDPROC
16536 ENDPROC(atomic64_read_cx8)
16537
16538 +ENTRY(atomic64_read_unchecked_cx8)
16539 + CFI_STARTPROC
16540 +
16541 + read64 %ecx
16542 + ret
16543 + CFI_ENDPROC
16544 +ENDPROC(atomic64_read_unchecked_cx8)
16545 +
16546 ENTRY(atomic64_set_cx8)
16547 CFI_STARTPROC
16548
16549 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16550 CFI_ENDPROC
16551 ENDPROC(atomic64_set_cx8)
16552
16553 +ENTRY(atomic64_set_unchecked_cx8)
16554 + CFI_STARTPROC
16555 +
16556 +1:
16557 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16558 + * are atomic on 586 and newer */
16559 + cmpxchg8b (%esi)
16560 + jne 1b
16561 +
16562 + ret
16563 + CFI_ENDPROC
16564 +ENDPROC(atomic64_set_unchecked_cx8)
16565 +
16566 ENTRY(atomic64_xchg_cx8)
16567 CFI_STARTPROC
16568
16569 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16570 CFI_ENDPROC
16571 ENDPROC(atomic64_xchg_cx8)
16572
16573 -.macro addsub_return func ins insc
16574 -ENTRY(atomic64_\func\()_return_cx8)
16575 +.macro addsub_return func ins insc unchecked=""
16576 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16577 CFI_STARTPROC
16578 SAVE ebp
16579 SAVE ebx
16580 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16581 movl %edx, %ecx
16582 \ins\()l %esi, %ebx
16583 \insc\()l %edi, %ecx
16584 +
16585 +.ifb \unchecked
16586 +#ifdef CONFIG_PAX_REFCOUNT
16587 + into
16588 +2:
16589 + _ASM_EXTABLE(2b, 3f)
16590 +#endif
16591 +.endif
16592 +
16593 LOCK_PREFIX
16594 cmpxchg8b (%ebp)
16595 jne 1b
16596 -
16597 -10:
16598 movl %ebx, %eax
16599 movl %ecx, %edx
16600 +
16601 +.ifb \unchecked
16602 +#ifdef CONFIG_PAX_REFCOUNT
16603 +3:
16604 +#endif
16605 +.endif
16606 +
16607 RESTORE edi
16608 RESTORE esi
16609 RESTORE ebx
16610 RESTORE ebp
16611 ret
16612 CFI_ENDPROC
16613 -ENDPROC(atomic64_\func\()_return_cx8)
16614 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16615 .endm
16616
16617 addsub_return add add adc
16618 addsub_return sub sub sbb
16619 +addsub_return add add adc _unchecked
16620 +addsub_return sub sub sbb _unchecked
16621
16622 -.macro incdec_return func ins insc
16623 -ENTRY(atomic64_\func\()_return_cx8)
16624 +.macro incdec_return func ins insc unchecked
16625 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16626 CFI_STARTPROC
16627 SAVE ebx
16628
16629 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16630 movl %edx, %ecx
16631 \ins\()l $1, %ebx
16632 \insc\()l $0, %ecx
16633 +
16634 +.ifb \unchecked
16635 +#ifdef CONFIG_PAX_REFCOUNT
16636 + into
16637 +2:
16638 + _ASM_EXTABLE(2b, 3f)
16639 +#endif
16640 +.endif
16641 +
16642 LOCK_PREFIX
16643 cmpxchg8b (%esi)
16644 jne 1b
16645
16646 -10:
16647 movl %ebx, %eax
16648 movl %ecx, %edx
16649 +
16650 +.ifb \unchecked
16651 +#ifdef CONFIG_PAX_REFCOUNT
16652 +3:
16653 +#endif
16654 +.endif
16655 +
16656 RESTORE ebx
16657 ret
16658 CFI_ENDPROC
16659 -ENDPROC(atomic64_\func\()_return_cx8)
16660 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16661 .endm
16662
16663 incdec_return inc add adc
16664 incdec_return dec sub sbb
16665 +incdec_return inc add adc _unchecked
16666 +incdec_return dec sub sbb _unchecked
16667
16668 ENTRY(atomic64_dec_if_positive_cx8)
16669 CFI_STARTPROC
16670 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16671 movl %edx, %ecx
16672 subl $1, %ebx
16673 sbb $0, %ecx
16674 +
16675 +#ifdef CONFIG_PAX_REFCOUNT
16676 + into
16677 +1234:
16678 + _ASM_EXTABLE(1234b, 2f)
16679 +#endif
16680 +
16681 js 2f
16682 LOCK_PREFIX
16683 cmpxchg8b (%esi)
16684 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16685 movl %edx, %ecx
16686 addl %esi, %ebx
16687 adcl %edi, %ecx
16688 +
16689 +#ifdef CONFIG_PAX_REFCOUNT
16690 + into
16691 +1234:
16692 + _ASM_EXTABLE(1234b, 3f)
16693 +#endif
16694 +
16695 LOCK_PREFIX
16696 cmpxchg8b (%ebp)
16697 jne 1b
16698 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16699 movl %edx, %ecx
16700 addl $1, %ebx
16701 adcl $0, %ecx
16702 +
16703 +#ifdef CONFIG_PAX_REFCOUNT
16704 + into
16705 +1234:
16706 + _ASM_EXTABLE(1234b, 3f)
16707 +#endif
16708 +
16709 LOCK_PREFIX
16710 cmpxchg8b (%esi)
16711 jne 1b
16712 diff -urNp linux-3.0.3/arch/x86/lib/checksum_32.S linux-3.0.3/arch/x86/lib/checksum_32.S
16713 --- linux-3.0.3/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16714 +++ linux-3.0.3/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16715 @@ -28,7 +28,8 @@
16716 #include <linux/linkage.h>
16717 #include <asm/dwarf2.h>
16718 #include <asm/errno.h>
16719 -
16720 +#include <asm/segment.h>
16721 +
16722 /*
16723 * computes a partial checksum, e.g. for TCP/UDP fragments
16724 */
16725 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16726
16727 #define ARGBASE 16
16728 #define FP 12
16729 -
16730 -ENTRY(csum_partial_copy_generic)
16731 +
16732 +ENTRY(csum_partial_copy_generic_to_user)
16733 CFI_STARTPROC
16734 +
16735 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16736 + pushl_cfi %gs
16737 + popl_cfi %es
16738 + jmp csum_partial_copy_generic
16739 +#endif
16740 +
16741 +ENTRY(csum_partial_copy_generic_from_user)
16742 +
16743 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16744 + pushl_cfi %gs
16745 + popl_cfi %ds
16746 +#endif
16747 +
16748 +ENTRY(csum_partial_copy_generic)
16749 subl $4,%esp
16750 CFI_ADJUST_CFA_OFFSET 4
16751 pushl_cfi %edi
16752 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16753 jmp 4f
16754 SRC(1: movw (%esi), %bx )
16755 addl $2, %esi
16756 -DST( movw %bx, (%edi) )
16757 +DST( movw %bx, %es:(%edi) )
16758 addl $2, %edi
16759 addw %bx, %ax
16760 adcl $0, %eax
16761 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16762 SRC(1: movl (%esi), %ebx )
16763 SRC( movl 4(%esi), %edx )
16764 adcl %ebx, %eax
16765 -DST( movl %ebx, (%edi) )
16766 +DST( movl %ebx, %es:(%edi) )
16767 adcl %edx, %eax
16768 -DST( movl %edx, 4(%edi) )
16769 +DST( movl %edx, %es:4(%edi) )
16770
16771 SRC( movl 8(%esi), %ebx )
16772 SRC( movl 12(%esi), %edx )
16773 adcl %ebx, %eax
16774 -DST( movl %ebx, 8(%edi) )
16775 +DST( movl %ebx, %es:8(%edi) )
16776 adcl %edx, %eax
16777 -DST( movl %edx, 12(%edi) )
16778 +DST( movl %edx, %es:12(%edi) )
16779
16780 SRC( movl 16(%esi), %ebx )
16781 SRC( movl 20(%esi), %edx )
16782 adcl %ebx, %eax
16783 -DST( movl %ebx, 16(%edi) )
16784 +DST( movl %ebx, %es:16(%edi) )
16785 adcl %edx, %eax
16786 -DST( movl %edx, 20(%edi) )
16787 +DST( movl %edx, %es:20(%edi) )
16788
16789 SRC( movl 24(%esi), %ebx )
16790 SRC( movl 28(%esi), %edx )
16791 adcl %ebx, %eax
16792 -DST( movl %ebx, 24(%edi) )
16793 +DST( movl %ebx, %es:24(%edi) )
16794 adcl %edx, %eax
16795 -DST( movl %edx, 28(%edi) )
16796 +DST( movl %edx, %es:28(%edi) )
16797
16798 lea 32(%esi), %esi
16799 lea 32(%edi), %edi
16800 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16801 shrl $2, %edx # This clears CF
16802 SRC(3: movl (%esi), %ebx )
16803 adcl %ebx, %eax
16804 -DST( movl %ebx, (%edi) )
16805 +DST( movl %ebx, %es:(%edi) )
16806 lea 4(%esi), %esi
16807 lea 4(%edi), %edi
16808 dec %edx
16809 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16810 jb 5f
16811 SRC( movw (%esi), %cx )
16812 leal 2(%esi), %esi
16813 -DST( movw %cx, (%edi) )
16814 +DST( movw %cx, %es:(%edi) )
16815 leal 2(%edi), %edi
16816 je 6f
16817 shll $16,%ecx
16818 SRC(5: movb (%esi), %cl )
16819 -DST( movb %cl, (%edi) )
16820 +DST( movb %cl, %es:(%edi) )
16821 6: addl %ecx, %eax
16822 adcl $0, %eax
16823 7:
16824 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16825
16826 6001:
16827 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16828 - movl $-EFAULT, (%ebx)
16829 + movl $-EFAULT, %ss:(%ebx)
16830
16831 # zero the complete destination - computing the rest
16832 # is too much work
16833 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16834
16835 6002:
16836 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16837 - movl $-EFAULT,(%ebx)
16838 + movl $-EFAULT,%ss:(%ebx)
16839 jmp 5000b
16840
16841 .previous
16842
16843 + pushl_cfi %ss
16844 + popl_cfi %ds
16845 + pushl_cfi %ss
16846 + popl_cfi %es
16847 popl_cfi %ebx
16848 CFI_RESTORE ebx
16849 popl_cfi %esi
16850 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16851 popl_cfi %ecx # equivalent to addl $4,%esp
16852 ret
16853 CFI_ENDPROC
16854 -ENDPROC(csum_partial_copy_generic)
16855 +ENDPROC(csum_partial_copy_generic_to_user)
16856
16857 #else
16858
16859 /* Version for PentiumII/PPro */
16860
16861 #define ROUND1(x) \
16862 + nop; nop; nop; \
16863 SRC(movl x(%esi), %ebx ) ; \
16864 addl %ebx, %eax ; \
16865 - DST(movl %ebx, x(%edi) ) ;
16866 + DST(movl %ebx, %es:x(%edi)) ;
16867
16868 #define ROUND(x) \
16869 + nop; nop; nop; \
16870 SRC(movl x(%esi), %ebx ) ; \
16871 adcl %ebx, %eax ; \
16872 - DST(movl %ebx, x(%edi) ) ;
16873 + DST(movl %ebx, %es:x(%edi)) ;
16874
16875 #define ARGBASE 12
16876 -
16877 -ENTRY(csum_partial_copy_generic)
16878 +
16879 +ENTRY(csum_partial_copy_generic_to_user)
16880 CFI_STARTPROC
16881 +
16882 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16883 + pushl_cfi %gs
16884 + popl_cfi %es
16885 + jmp csum_partial_copy_generic
16886 +#endif
16887 +
16888 +ENTRY(csum_partial_copy_generic_from_user)
16889 +
16890 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16891 + pushl_cfi %gs
16892 + popl_cfi %ds
16893 +#endif
16894 +
16895 +ENTRY(csum_partial_copy_generic)
16896 pushl_cfi %ebx
16897 CFI_REL_OFFSET ebx, 0
16898 pushl_cfi %edi
16899 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16900 subl %ebx, %edi
16901 lea -1(%esi),%edx
16902 andl $-32,%edx
16903 - lea 3f(%ebx,%ebx), %ebx
16904 + lea 3f(%ebx,%ebx,2), %ebx
16905 testl %esi, %esi
16906 jmp *%ebx
16907 1: addl $64,%esi
16908 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16909 jb 5f
16910 SRC( movw (%esi), %dx )
16911 leal 2(%esi), %esi
16912 -DST( movw %dx, (%edi) )
16913 +DST( movw %dx, %es:(%edi) )
16914 leal 2(%edi), %edi
16915 je 6f
16916 shll $16,%edx
16917 5:
16918 SRC( movb (%esi), %dl )
16919 -DST( movb %dl, (%edi) )
16920 +DST( movb %dl, %es:(%edi) )
16921 6: addl %edx, %eax
16922 adcl $0, %eax
16923 7:
16924 .section .fixup, "ax"
16925 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16926 - movl $-EFAULT, (%ebx)
16927 + movl $-EFAULT, %ss:(%ebx)
16928 # zero the complete destination (computing the rest is too much work)
16929 movl ARGBASE+8(%esp),%edi # dst
16930 movl ARGBASE+12(%esp),%ecx # len
16931 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16932 rep; stosb
16933 jmp 7b
16934 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16935 - movl $-EFAULT, (%ebx)
16936 + movl $-EFAULT, %ss:(%ebx)
16937 jmp 7b
16938 .previous
16939
16940 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16941 + pushl_cfi %ss
16942 + popl_cfi %ds
16943 + pushl_cfi %ss
16944 + popl_cfi %es
16945 +#endif
16946 +
16947 popl_cfi %esi
16948 CFI_RESTORE esi
16949 popl_cfi %edi
16950 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
16951 CFI_RESTORE ebx
16952 ret
16953 CFI_ENDPROC
16954 -ENDPROC(csum_partial_copy_generic)
16955 +ENDPROC(csum_partial_copy_generic_to_user)
16956
16957 #undef ROUND
16958 #undef ROUND1
16959 diff -urNp linux-3.0.3/arch/x86/lib/clear_page_64.S linux-3.0.3/arch/x86/lib/clear_page_64.S
16960 --- linux-3.0.3/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
16961 +++ linux-3.0.3/arch/x86/lib/clear_page_64.S 2011-08-23 21:47:55.000000000 -0400
16962 @@ -58,7 +58,7 @@ ENDPROC(clear_page)
16963
16964 #include <asm/cpufeature.h>
16965
16966 - .section .altinstr_replacement,"ax"
16967 + .section .altinstr_replacement,"a"
16968 1: .byte 0xeb /* jmp <disp8> */
16969 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
16970 2: .byte 0xeb /* jmp <disp8> */
16971 diff -urNp linux-3.0.3/arch/x86/lib/copy_page_64.S linux-3.0.3/arch/x86/lib/copy_page_64.S
16972 --- linux-3.0.3/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
16973 +++ linux-3.0.3/arch/x86/lib/copy_page_64.S 2011-08-23 21:47:55.000000000 -0400
16974 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
16975
16976 #include <asm/cpufeature.h>
16977
16978 - .section .altinstr_replacement,"ax"
16979 + .section .altinstr_replacement,"a"
16980 1: .byte 0xeb /* jmp <disp8> */
16981 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
16982 2:
16983 diff -urNp linux-3.0.3/arch/x86/lib/copy_user_64.S linux-3.0.3/arch/x86/lib/copy_user_64.S
16984 --- linux-3.0.3/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
16985 +++ linux-3.0.3/arch/x86/lib/copy_user_64.S 2011-08-23 21:47:55.000000000 -0400
16986 @@ -16,6 +16,7 @@
16987 #include <asm/thread_info.h>
16988 #include <asm/cpufeature.h>
16989 #include <asm/alternative-asm.h>
16990 +#include <asm/pgtable.h>
16991
16992 /*
16993 * By placing feature2 after feature1 in altinstructions section, we logically
16994 @@ -29,7 +30,7 @@
16995 .byte 0xe9 /* 32bit jump */
16996 .long \orig-1f /* by default jump to orig */
16997 1:
16998 - .section .altinstr_replacement,"ax"
16999 + .section .altinstr_replacement,"a"
17000 2: .byte 0xe9 /* near jump with 32bit immediate */
17001 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17002 3: .byte 0xe9 /* near jump with 32bit immediate */
17003 @@ -71,41 +72,13 @@
17004 #endif
17005 .endm
17006
17007 -/* Standard copy_to_user with segment limit checking */
17008 -ENTRY(_copy_to_user)
17009 - CFI_STARTPROC
17010 - GET_THREAD_INFO(%rax)
17011 - movq %rdi,%rcx
17012 - addq %rdx,%rcx
17013 - jc bad_to_user
17014 - cmpq TI_addr_limit(%rax),%rcx
17015 - ja bad_to_user
17016 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17017 - copy_user_generic_unrolled,copy_user_generic_string, \
17018 - copy_user_enhanced_fast_string
17019 - CFI_ENDPROC
17020 -ENDPROC(_copy_to_user)
17021 -
17022 -/* Standard copy_from_user with segment limit checking */
17023 -ENTRY(_copy_from_user)
17024 - CFI_STARTPROC
17025 - GET_THREAD_INFO(%rax)
17026 - movq %rsi,%rcx
17027 - addq %rdx,%rcx
17028 - jc bad_from_user
17029 - cmpq TI_addr_limit(%rax),%rcx
17030 - ja bad_from_user
17031 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17032 - copy_user_generic_unrolled,copy_user_generic_string, \
17033 - copy_user_enhanced_fast_string
17034 - CFI_ENDPROC
17035 -ENDPROC(_copy_from_user)
17036 -
17037 .section .fixup,"ax"
17038 /* must zero dest */
17039 ENTRY(bad_from_user)
17040 bad_from_user:
17041 CFI_STARTPROC
17042 + testl %edx,%edx
17043 + js bad_to_user
17044 movl %edx,%ecx
17045 xorl %eax,%eax
17046 rep
17047 diff -urNp linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S
17048 --- linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17049 +++ linux-3.0.3/arch/x86/lib/copy_user_nocache_64.S 2011-08-23 21:47:55.000000000 -0400
17050 @@ -14,6 +14,7 @@
17051 #include <asm/current.h>
17052 #include <asm/asm-offsets.h>
17053 #include <asm/thread_info.h>
17054 +#include <asm/pgtable.h>
17055
17056 .macro ALIGN_DESTINATION
17057 #ifdef FIX_ALIGNMENT
17058 @@ -50,6 +51,15 @@
17059 */
17060 ENTRY(__copy_user_nocache)
17061 CFI_STARTPROC
17062 +
17063 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17064 + mov $PAX_USER_SHADOW_BASE,%rcx
17065 + cmp %rcx,%rsi
17066 + jae 1f
17067 + add %rcx,%rsi
17068 +1:
17069 +#endif
17070 +
17071 cmpl $8,%edx
17072 jb 20f /* less then 8 bytes, go to byte copy loop */
17073 ALIGN_DESTINATION
17074 diff -urNp linux-3.0.3/arch/x86/lib/csum-wrappers_64.c linux-3.0.3/arch/x86/lib/csum-wrappers_64.c
17075 --- linux-3.0.3/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17076 +++ linux-3.0.3/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17077 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17078 len -= 2;
17079 }
17080 }
17081 +
17082 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17083 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17084 + src += PAX_USER_SHADOW_BASE;
17085 +#endif
17086 +
17087 isum = csum_partial_copy_generic((__force const void *)src,
17088 dst, len, isum, errp, NULL);
17089 if (unlikely(*errp))
17090 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17091 }
17092
17093 *errp = 0;
17094 +
17095 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17096 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17097 + dst += PAX_USER_SHADOW_BASE;
17098 +#endif
17099 +
17100 return csum_partial_copy_generic(src, (void __force *)dst,
17101 len, isum, NULL, errp);
17102 }
17103 diff -urNp linux-3.0.3/arch/x86/lib/getuser.S linux-3.0.3/arch/x86/lib/getuser.S
17104 --- linux-3.0.3/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17105 +++ linux-3.0.3/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17106 @@ -33,14 +33,35 @@
17107 #include <asm/asm-offsets.h>
17108 #include <asm/thread_info.h>
17109 #include <asm/asm.h>
17110 +#include <asm/segment.h>
17111 +#include <asm/pgtable.h>
17112 +
17113 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17114 +#define __copyuser_seg gs;
17115 +#else
17116 +#define __copyuser_seg
17117 +#endif
17118
17119 .text
17120 ENTRY(__get_user_1)
17121 CFI_STARTPROC
17122 +
17123 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17124 GET_THREAD_INFO(%_ASM_DX)
17125 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17126 jae bad_get_user
17127 -1: movzb (%_ASM_AX),%edx
17128 +
17129 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17130 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17131 + cmp %_ASM_DX,%_ASM_AX
17132 + jae 1234f
17133 + add %_ASM_DX,%_ASM_AX
17134 +1234:
17135 +#endif
17136 +
17137 +#endif
17138 +
17139 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17140 xor %eax,%eax
17141 ret
17142 CFI_ENDPROC
17143 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17144 ENTRY(__get_user_2)
17145 CFI_STARTPROC
17146 add $1,%_ASM_AX
17147 +
17148 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17149 jc bad_get_user
17150 GET_THREAD_INFO(%_ASM_DX)
17151 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17152 jae bad_get_user
17153 -2: movzwl -1(%_ASM_AX),%edx
17154 +
17155 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17156 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17157 + cmp %_ASM_DX,%_ASM_AX
17158 + jae 1234f
17159 + add %_ASM_DX,%_ASM_AX
17160 +1234:
17161 +#endif
17162 +
17163 +#endif
17164 +
17165 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17166 xor %eax,%eax
17167 ret
17168 CFI_ENDPROC
17169 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17170 ENTRY(__get_user_4)
17171 CFI_STARTPROC
17172 add $3,%_ASM_AX
17173 +
17174 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17175 jc bad_get_user
17176 GET_THREAD_INFO(%_ASM_DX)
17177 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17178 jae bad_get_user
17179 -3: mov -3(%_ASM_AX),%edx
17180 +
17181 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17182 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17183 + cmp %_ASM_DX,%_ASM_AX
17184 + jae 1234f
17185 + add %_ASM_DX,%_ASM_AX
17186 +1234:
17187 +#endif
17188 +
17189 +#endif
17190 +
17191 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17192 xor %eax,%eax
17193 ret
17194 CFI_ENDPROC
17195 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17196 GET_THREAD_INFO(%_ASM_DX)
17197 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17198 jae bad_get_user
17199 +
17200 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17201 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17202 + cmp %_ASM_DX,%_ASM_AX
17203 + jae 1234f
17204 + add %_ASM_DX,%_ASM_AX
17205 +1234:
17206 +#endif
17207 +
17208 4: movq -7(%_ASM_AX),%_ASM_DX
17209 xor %eax,%eax
17210 ret
17211 diff -urNp linux-3.0.3/arch/x86/lib/insn.c linux-3.0.3/arch/x86/lib/insn.c
17212 --- linux-3.0.3/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17213 +++ linux-3.0.3/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17214 @@ -21,6 +21,11 @@
17215 #include <linux/string.h>
17216 #include <asm/inat.h>
17217 #include <asm/insn.h>
17218 +#ifdef __KERNEL__
17219 +#include <asm/pgtable_types.h>
17220 +#else
17221 +#define ktla_ktva(addr) addr
17222 +#endif
17223
17224 #define get_next(t, insn) \
17225 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17226 @@ -40,8 +45,8 @@
17227 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17228 {
17229 memset(insn, 0, sizeof(*insn));
17230 - insn->kaddr = kaddr;
17231 - insn->next_byte = kaddr;
17232 + insn->kaddr = ktla_ktva(kaddr);
17233 + insn->next_byte = ktla_ktva(kaddr);
17234 insn->x86_64 = x86_64 ? 1 : 0;
17235 insn->opnd_bytes = 4;
17236 if (x86_64)
17237 diff -urNp linux-3.0.3/arch/x86/lib/mmx_32.c linux-3.0.3/arch/x86/lib/mmx_32.c
17238 --- linux-3.0.3/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17239 +++ linux-3.0.3/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17240 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17241 {
17242 void *p;
17243 int i;
17244 + unsigned long cr0;
17245
17246 if (unlikely(in_interrupt()))
17247 return __memcpy(to, from, len);
17248 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17249 kernel_fpu_begin();
17250
17251 __asm__ __volatile__ (
17252 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17253 - " prefetch 64(%0)\n"
17254 - " prefetch 128(%0)\n"
17255 - " prefetch 192(%0)\n"
17256 - " prefetch 256(%0)\n"
17257 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17258 + " prefetch 64(%1)\n"
17259 + " prefetch 128(%1)\n"
17260 + " prefetch 192(%1)\n"
17261 + " prefetch 256(%1)\n"
17262 "2: \n"
17263 ".section .fixup, \"ax\"\n"
17264 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17265 + "3: \n"
17266 +
17267 +#ifdef CONFIG_PAX_KERNEXEC
17268 + " movl %%cr0, %0\n"
17269 + " movl %0, %%eax\n"
17270 + " andl $0xFFFEFFFF, %%eax\n"
17271 + " movl %%eax, %%cr0\n"
17272 +#endif
17273 +
17274 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17275 +
17276 +#ifdef CONFIG_PAX_KERNEXEC
17277 + " movl %0, %%cr0\n"
17278 +#endif
17279 +
17280 " jmp 2b\n"
17281 ".previous\n"
17282 _ASM_EXTABLE(1b, 3b)
17283 - : : "r" (from));
17284 + : "=&r" (cr0) : "r" (from) : "ax");
17285
17286 for ( ; i > 5; i--) {
17287 __asm__ __volatile__ (
17288 - "1: prefetch 320(%0)\n"
17289 - "2: movq (%0), %%mm0\n"
17290 - " movq 8(%0), %%mm1\n"
17291 - " movq 16(%0), %%mm2\n"
17292 - " movq 24(%0), %%mm3\n"
17293 - " movq %%mm0, (%1)\n"
17294 - " movq %%mm1, 8(%1)\n"
17295 - " movq %%mm2, 16(%1)\n"
17296 - " movq %%mm3, 24(%1)\n"
17297 - " movq 32(%0), %%mm0\n"
17298 - " movq 40(%0), %%mm1\n"
17299 - " movq 48(%0), %%mm2\n"
17300 - " movq 56(%0), %%mm3\n"
17301 - " movq %%mm0, 32(%1)\n"
17302 - " movq %%mm1, 40(%1)\n"
17303 - " movq %%mm2, 48(%1)\n"
17304 - " movq %%mm3, 56(%1)\n"
17305 + "1: prefetch 320(%1)\n"
17306 + "2: movq (%1), %%mm0\n"
17307 + " movq 8(%1), %%mm1\n"
17308 + " movq 16(%1), %%mm2\n"
17309 + " movq 24(%1), %%mm3\n"
17310 + " movq %%mm0, (%2)\n"
17311 + " movq %%mm1, 8(%2)\n"
17312 + " movq %%mm2, 16(%2)\n"
17313 + " movq %%mm3, 24(%2)\n"
17314 + " movq 32(%1), %%mm0\n"
17315 + " movq 40(%1), %%mm1\n"
17316 + " movq 48(%1), %%mm2\n"
17317 + " movq 56(%1), %%mm3\n"
17318 + " movq %%mm0, 32(%2)\n"
17319 + " movq %%mm1, 40(%2)\n"
17320 + " movq %%mm2, 48(%2)\n"
17321 + " movq %%mm3, 56(%2)\n"
17322 ".section .fixup, \"ax\"\n"
17323 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17324 + "3:\n"
17325 +
17326 +#ifdef CONFIG_PAX_KERNEXEC
17327 + " movl %%cr0, %0\n"
17328 + " movl %0, %%eax\n"
17329 + " andl $0xFFFEFFFF, %%eax\n"
17330 + " movl %%eax, %%cr0\n"
17331 +#endif
17332 +
17333 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17334 +
17335 +#ifdef CONFIG_PAX_KERNEXEC
17336 + " movl %0, %%cr0\n"
17337 +#endif
17338 +
17339 " jmp 2b\n"
17340 ".previous\n"
17341 _ASM_EXTABLE(1b, 3b)
17342 - : : "r" (from), "r" (to) : "memory");
17343 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17344
17345 from += 64;
17346 to += 64;
17347 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17348 static void fast_copy_page(void *to, void *from)
17349 {
17350 int i;
17351 + unsigned long cr0;
17352
17353 kernel_fpu_begin();
17354
17355 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17356 * but that is for later. -AV
17357 */
17358 __asm__ __volatile__(
17359 - "1: prefetch (%0)\n"
17360 - " prefetch 64(%0)\n"
17361 - " prefetch 128(%0)\n"
17362 - " prefetch 192(%0)\n"
17363 - " prefetch 256(%0)\n"
17364 + "1: prefetch (%1)\n"
17365 + " prefetch 64(%1)\n"
17366 + " prefetch 128(%1)\n"
17367 + " prefetch 192(%1)\n"
17368 + " prefetch 256(%1)\n"
17369 "2: \n"
17370 ".section .fixup, \"ax\"\n"
17371 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17372 + "3: \n"
17373 +
17374 +#ifdef CONFIG_PAX_KERNEXEC
17375 + " movl %%cr0, %0\n"
17376 + " movl %0, %%eax\n"
17377 + " andl $0xFFFEFFFF, %%eax\n"
17378 + " movl %%eax, %%cr0\n"
17379 +#endif
17380 +
17381 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17382 +
17383 +#ifdef CONFIG_PAX_KERNEXEC
17384 + " movl %0, %%cr0\n"
17385 +#endif
17386 +
17387 " jmp 2b\n"
17388 ".previous\n"
17389 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17390 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17391
17392 for (i = 0; i < (4096-320)/64; i++) {
17393 __asm__ __volatile__ (
17394 - "1: prefetch 320(%0)\n"
17395 - "2: movq (%0), %%mm0\n"
17396 - " movntq %%mm0, (%1)\n"
17397 - " movq 8(%0), %%mm1\n"
17398 - " movntq %%mm1, 8(%1)\n"
17399 - " movq 16(%0), %%mm2\n"
17400 - " movntq %%mm2, 16(%1)\n"
17401 - " movq 24(%0), %%mm3\n"
17402 - " movntq %%mm3, 24(%1)\n"
17403 - " movq 32(%0), %%mm4\n"
17404 - " movntq %%mm4, 32(%1)\n"
17405 - " movq 40(%0), %%mm5\n"
17406 - " movntq %%mm5, 40(%1)\n"
17407 - " movq 48(%0), %%mm6\n"
17408 - " movntq %%mm6, 48(%1)\n"
17409 - " movq 56(%0), %%mm7\n"
17410 - " movntq %%mm7, 56(%1)\n"
17411 + "1: prefetch 320(%1)\n"
17412 + "2: movq (%1), %%mm0\n"
17413 + " movntq %%mm0, (%2)\n"
17414 + " movq 8(%1), %%mm1\n"
17415 + " movntq %%mm1, 8(%2)\n"
17416 + " movq 16(%1), %%mm2\n"
17417 + " movntq %%mm2, 16(%2)\n"
17418 + " movq 24(%1), %%mm3\n"
17419 + " movntq %%mm3, 24(%2)\n"
17420 + " movq 32(%1), %%mm4\n"
17421 + " movntq %%mm4, 32(%2)\n"
17422 + " movq 40(%1), %%mm5\n"
17423 + " movntq %%mm5, 40(%2)\n"
17424 + " movq 48(%1), %%mm6\n"
17425 + " movntq %%mm6, 48(%2)\n"
17426 + " movq 56(%1), %%mm7\n"
17427 + " movntq %%mm7, 56(%2)\n"
17428 ".section .fixup, \"ax\"\n"
17429 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17430 + "3:\n"
17431 +
17432 +#ifdef CONFIG_PAX_KERNEXEC
17433 + " movl %%cr0, %0\n"
17434 + " movl %0, %%eax\n"
17435 + " andl $0xFFFEFFFF, %%eax\n"
17436 + " movl %%eax, %%cr0\n"
17437 +#endif
17438 +
17439 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17440 +
17441 +#ifdef CONFIG_PAX_KERNEXEC
17442 + " movl %0, %%cr0\n"
17443 +#endif
17444 +
17445 " jmp 2b\n"
17446 ".previous\n"
17447 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17448 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17449
17450 from += 64;
17451 to += 64;
17452 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17453 static void fast_copy_page(void *to, void *from)
17454 {
17455 int i;
17456 + unsigned long cr0;
17457
17458 kernel_fpu_begin();
17459
17460 __asm__ __volatile__ (
17461 - "1: prefetch (%0)\n"
17462 - " prefetch 64(%0)\n"
17463 - " prefetch 128(%0)\n"
17464 - " prefetch 192(%0)\n"
17465 - " prefetch 256(%0)\n"
17466 + "1: prefetch (%1)\n"
17467 + " prefetch 64(%1)\n"
17468 + " prefetch 128(%1)\n"
17469 + " prefetch 192(%1)\n"
17470 + " prefetch 256(%1)\n"
17471 "2: \n"
17472 ".section .fixup, \"ax\"\n"
17473 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17474 + "3: \n"
17475 +
17476 +#ifdef CONFIG_PAX_KERNEXEC
17477 + " movl %%cr0, %0\n"
17478 + " movl %0, %%eax\n"
17479 + " andl $0xFFFEFFFF, %%eax\n"
17480 + " movl %%eax, %%cr0\n"
17481 +#endif
17482 +
17483 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17484 +
17485 +#ifdef CONFIG_PAX_KERNEXEC
17486 + " movl %0, %%cr0\n"
17487 +#endif
17488 +
17489 " jmp 2b\n"
17490 ".previous\n"
17491 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17492 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17493
17494 for (i = 0; i < 4096/64; i++) {
17495 __asm__ __volatile__ (
17496 - "1: prefetch 320(%0)\n"
17497 - "2: movq (%0), %%mm0\n"
17498 - " movq 8(%0), %%mm1\n"
17499 - " movq 16(%0), %%mm2\n"
17500 - " movq 24(%0), %%mm3\n"
17501 - " movq %%mm0, (%1)\n"
17502 - " movq %%mm1, 8(%1)\n"
17503 - " movq %%mm2, 16(%1)\n"
17504 - " movq %%mm3, 24(%1)\n"
17505 - " movq 32(%0), %%mm0\n"
17506 - " movq 40(%0), %%mm1\n"
17507 - " movq 48(%0), %%mm2\n"
17508 - " movq 56(%0), %%mm3\n"
17509 - " movq %%mm0, 32(%1)\n"
17510 - " movq %%mm1, 40(%1)\n"
17511 - " movq %%mm2, 48(%1)\n"
17512 - " movq %%mm3, 56(%1)\n"
17513 + "1: prefetch 320(%1)\n"
17514 + "2: movq (%1), %%mm0\n"
17515 + " movq 8(%1), %%mm1\n"
17516 + " movq 16(%1), %%mm2\n"
17517 + " movq 24(%1), %%mm3\n"
17518 + " movq %%mm0, (%2)\n"
17519 + " movq %%mm1, 8(%2)\n"
17520 + " movq %%mm2, 16(%2)\n"
17521 + " movq %%mm3, 24(%2)\n"
17522 + " movq 32(%1), %%mm0\n"
17523 + " movq 40(%1), %%mm1\n"
17524 + " movq 48(%1), %%mm2\n"
17525 + " movq 56(%1), %%mm3\n"
17526 + " movq %%mm0, 32(%2)\n"
17527 + " movq %%mm1, 40(%2)\n"
17528 + " movq %%mm2, 48(%2)\n"
17529 + " movq %%mm3, 56(%2)\n"
17530 ".section .fixup, \"ax\"\n"
17531 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17532 + "3:\n"
17533 +
17534 +#ifdef CONFIG_PAX_KERNEXEC
17535 + " movl %%cr0, %0\n"
17536 + " movl %0, %%eax\n"
17537 + " andl $0xFFFEFFFF, %%eax\n"
17538 + " movl %%eax, %%cr0\n"
17539 +#endif
17540 +
17541 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17542 +
17543 +#ifdef CONFIG_PAX_KERNEXEC
17544 + " movl %0, %%cr0\n"
17545 +#endif
17546 +
17547 " jmp 2b\n"
17548 ".previous\n"
17549 _ASM_EXTABLE(1b, 3b)
17550 - : : "r" (from), "r" (to) : "memory");
17551 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17552
17553 from += 64;
17554 to += 64;
17555 diff -urNp linux-3.0.3/arch/x86/lib/putuser.S linux-3.0.3/arch/x86/lib/putuser.S
17556 --- linux-3.0.3/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17557 +++ linux-3.0.3/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17558 @@ -15,7 +15,8 @@
17559 #include <asm/thread_info.h>
17560 #include <asm/errno.h>
17561 #include <asm/asm.h>
17562 -
17563 +#include <asm/segment.h>
17564 +#include <asm/pgtable.h>
17565
17566 /*
17567 * __put_user_X
17568 @@ -29,52 +30,119 @@
17569 * as they get called from within inline assembly.
17570 */
17571
17572 -#define ENTER CFI_STARTPROC ; \
17573 - GET_THREAD_INFO(%_ASM_BX)
17574 +#define ENTER CFI_STARTPROC
17575 #define EXIT ret ; \
17576 CFI_ENDPROC
17577
17578 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17579 +#define _DEST %_ASM_CX,%_ASM_BX
17580 +#else
17581 +#define _DEST %_ASM_CX
17582 +#endif
17583 +
17584 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17585 +#define __copyuser_seg gs;
17586 +#else
17587 +#define __copyuser_seg
17588 +#endif
17589 +
17590 .text
17591 ENTRY(__put_user_1)
17592 ENTER
17593 +
17594 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17595 + GET_THREAD_INFO(%_ASM_BX)
17596 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17597 jae bad_put_user
17598 -1: movb %al,(%_ASM_CX)
17599 +
17600 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17601 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17602 + cmp %_ASM_BX,%_ASM_CX
17603 + jb 1234f
17604 + xor %ebx,%ebx
17605 +1234:
17606 +#endif
17607 +
17608 +#endif
17609 +
17610 +1: __copyuser_seg movb %al,(_DEST)
17611 xor %eax,%eax
17612 EXIT
17613 ENDPROC(__put_user_1)
17614
17615 ENTRY(__put_user_2)
17616 ENTER
17617 +
17618 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17619 + GET_THREAD_INFO(%_ASM_BX)
17620 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17621 sub $1,%_ASM_BX
17622 cmp %_ASM_BX,%_ASM_CX
17623 jae bad_put_user
17624 -2: movw %ax,(%_ASM_CX)
17625 +
17626 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17627 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17628 + cmp %_ASM_BX,%_ASM_CX
17629 + jb 1234f
17630 + xor %ebx,%ebx
17631 +1234:
17632 +#endif
17633 +
17634 +#endif
17635 +
17636 +2: __copyuser_seg movw %ax,(_DEST)
17637 xor %eax,%eax
17638 EXIT
17639 ENDPROC(__put_user_2)
17640
17641 ENTRY(__put_user_4)
17642 ENTER
17643 +
17644 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17645 + GET_THREAD_INFO(%_ASM_BX)
17646 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17647 sub $3,%_ASM_BX
17648 cmp %_ASM_BX,%_ASM_CX
17649 jae bad_put_user
17650 -3: movl %eax,(%_ASM_CX)
17651 +
17652 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17653 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17654 + cmp %_ASM_BX,%_ASM_CX
17655 + jb 1234f
17656 + xor %ebx,%ebx
17657 +1234:
17658 +#endif
17659 +
17660 +#endif
17661 +
17662 +3: __copyuser_seg movl %eax,(_DEST)
17663 xor %eax,%eax
17664 EXIT
17665 ENDPROC(__put_user_4)
17666
17667 ENTRY(__put_user_8)
17668 ENTER
17669 +
17670 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17671 + GET_THREAD_INFO(%_ASM_BX)
17672 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17673 sub $7,%_ASM_BX
17674 cmp %_ASM_BX,%_ASM_CX
17675 jae bad_put_user
17676 -4: mov %_ASM_AX,(%_ASM_CX)
17677 +
17678 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17679 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17680 + cmp %_ASM_BX,%_ASM_CX
17681 + jb 1234f
17682 + xor %ebx,%ebx
17683 +1234:
17684 +#endif
17685 +
17686 +#endif
17687 +
17688 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
17689 #ifdef CONFIG_X86_32
17690 -5: movl %edx,4(%_ASM_CX)
17691 +5: __copyuser_seg movl %edx,4(_DEST)
17692 #endif
17693 xor %eax,%eax
17694 EXIT
17695 diff -urNp linux-3.0.3/arch/x86/lib/usercopy_32.c linux-3.0.3/arch/x86/lib/usercopy_32.c
17696 --- linux-3.0.3/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
17697 +++ linux-3.0.3/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
17698 @@ -43,7 +43,7 @@ do { \
17699 __asm__ __volatile__( \
17700 " testl %1,%1\n" \
17701 " jz 2f\n" \
17702 - "0: lodsb\n" \
17703 + "0: "__copyuser_seg"lodsb\n" \
17704 " stosb\n" \
17705 " testb %%al,%%al\n" \
17706 " jz 1f\n" \
17707 @@ -128,10 +128,12 @@ do { \
17708 int __d0; \
17709 might_fault(); \
17710 __asm__ __volatile__( \
17711 + __COPYUSER_SET_ES \
17712 "0: rep; stosl\n" \
17713 " movl %2,%0\n" \
17714 "1: rep; stosb\n" \
17715 "2:\n" \
17716 + __COPYUSER_RESTORE_ES \
17717 ".section .fixup,\"ax\"\n" \
17718 "3: lea 0(%2,%0,4),%0\n" \
17719 " jmp 2b\n" \
17720 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17721 might_fault();
17722
17723 __asm__ __volatile__(
17724 + __COPYUSER_SET_ES
17725 " testl %0, %0\n"
17726 " jz 3f\n"
17727 " andl %0,%%ecx\n"
17728 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17729 " subl %%ecx,%0\n"
17730 " addl %0,%%eax\n"
17731 "1:\n"
17732 + __COPYUSER_RESTORE_ES
17733 ".section .fixup,\"ax\"\n"
17734 "2: xorl %%eax,%%eax\n"
17735 " jmp 1b\n"
17736 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17737
17738 #ifdef CONFIG_X86_INTEL_USERCOPY
17739 static unsigned long
17740 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17741 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17742 {
17743 int d0, d1;
17744 __asm__ __volatile__(
17745 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17746 " .align 2,0x90\n"
17747 "3: movl 0(%4), %%eax\n"
17748 "4: movl 4(%4), %%edx\n"
17749 - "5: movl %%eax, 0(%3)\n"
17750 - "6: movl %%edx, 4(%3)\n"
17751 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17752 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17753 "7: movl 8(%4), %%eax\n"
17754 "8: movl 12(%4),%%edx\n"
17755 - "9: movl %%eax, 8(%3)\n"
17756 - "10: movl %%edx, 12(%3)\n"
17757 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17758 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17759 "11: movl 16(%4), %%eax\n"
17760 "12: movl 20(%4), %%edx\n"
17761 - "13: movl %%eax, 16(%3)\n"
17762 - "14: movl %%edx, 20(%3)\n"
17763 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17764 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17765 "15: movl 24(%4), %%eax\n"
17766 "16: movl 28(%4), %%edx\n"
17767 - "17: movl %%eax, 24(%3)\n"
17768 - "18: movl %%edx, 28(%3)\n"
17769 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17770 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17771 "19: movl 32(%4), %%eax\n"
17772 "20: movl 36(%4), %%edx\n"
17773 - "21: movl %%eax, 32(%3)\n"
17774 - "22: movl %%edx, 36(%3)\n"
17775 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17776 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17777 "23: movl 40(%4), %%eax\n"
17778 "24: movl 44(%4), %%edx\n"
17779 - "25: movl %%eax, 40(%3)\n"
17780 - "26: movl %%edx, 44(%3)\n"
17781 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17782 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17783 "27: movl 48(%4), %%eax\n"
17784 "28: movl 52(%4), %%edx\n"
17785 - "29: movl %%eax, 48(%3)\n"
17786 - "30: movl %%edx, 52(%3)\n"
17787 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17788 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17789 "31: movl 56(%4), %%eax\n"
17790 "32: movl 60(%4), %%edx\n"
17791 - "33: movl %%eax, 56(%3)\n"
17792 - "34: movl %%edx, 60(%3)\n"
17793 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17794 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17795 " addl $-64, %0\n"
17796 " addl $64, %4\n"
17797 " addl $64, %3\n"
17798 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17799 " shrl $2, %0\n"
17800 " andl $3, %%eax\n"
17801 " cld\n"
17802 + __COPYUSER_SET_ES
17803 "99: rep; movsl\n"
17804 "36: movl %%eax, %0\n"
17805 "37: rep; movsb\n"
17806 "100:\n"
17807 + __COPYUSER_RESTORE_ES
17808 + ".section .fixup,\"ax\"\n"
17809 + "101: lea 0(%%eax,%0,4),%0\n"
17810 + " jmp 100b\n"
17811 + ".previous\n"
17812 + ".section __ex_table,\"a\"\n"
17813 + " .align 4\n"
17814 + " .long 1b,100b\n"
17815 + " .long 2b,100b\n"
17816 + " .long 3b,100b\n"
17817 + " .long 4b,100b\n"
17818 + " .long 5b,100b\n"
17819 + " .long 6b,100b\n"
17820 + " .long 7b,100b\n"
17821 + " .long 8b,100b\n"
17822 + " .long 9b,100b\n"
17823 + " .long 10b,100b\n"
17824 + " .long 11b,100b\n"
17825 + " .long 12b,100b\n"
17826 + " .long 13b,100b\n"
17827 + " .long 14b,100b\n"
17828 + " .long 15b,100b\n"
17829 + " .long 16b,100b\n"
17830 + " .long 17b,100b\n"
17831 + " .long 18b,100b\n"
17832 + " .long 19b,100b\n"
17833 + " .long 20b,100b\n"
17834 + " .long 21b,100b\n"
17835 + " .long 22b,100b\n"
17836 + " .long 23b,100b\n"
17837 + " .long 24b,100b\n"
17838 + " .long 25b,100b\n"
17839 + " .long 26b,100b\n"
17840 + " .long 27b,100b\n"
17841 + " .long 28b,100b\n"
17842 + " .long 29b,100b\n"
17843 + " .long 30b,100b\n"
17844 + " .long 31b,100b\n"
17845 + " .long 32b,100b\n"
17846 + " .long 33b,100b\n"
17847 + " .long 34b,100b\n"
17848 + " .long 35b,100b\n"
17849 + " .long 36b,100b\n"
17850 + " .long 37b,100b\n"
17851 + " .long 99b,101b\n"
17852 + ".previous"
17853 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
17854 + : "1"(to), "2"(from), "0"(size)
17855 + : "eax", "edx", "memory");
17856 + return size;
17857 +}
17858 +
17859 +static unsigned long
17860 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17861 +{
17862 + int d0, d1;
17863 + __asm__ __volatile__(
17864 + " .align 2,0x90\n"
17865 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17866 + " cmpl $67, %0\n"
17867 + " jbe 3f\n"
17868 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17869 + " .align 2,0x90\n"
17870 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17871 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17872 + "5: movl %%eax, 0(%3)\n"
17873 + "6: movl %%edx, 4(%3)\n"
17874 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17875 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17876 + "9: movl %%eax, 8(%3)\n"
17877 + "10: movl %%edx, 12(%3)\n"
17878 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17879 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17880 + "13: movl %%eax, 16(%3)\n"
17881 + "14: movl %%edx, 20(%3)\n"
17882 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17883 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17884 + "17: movl %%eax, 24(%3)\n"
17885 + "18: movl %%edx, 28(%3)\n"
17886 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17887 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17888 + "21: movl %%eax, 32(%3)\n"
17889 + "22: movl %%edx, 36(%3)\n"
17890 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17891 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17892 + "25: movl %%eax, 40(%3)\n"
17893 + "26: movl %%edx, 44(%3)\n"
17894 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17895 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17896 + "29: movl %%eax, 48(%3)\n"
17897 + "30: movl %%edx, 52(%3)\n"
17898 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17899 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17900 + "33: movl %%eax, 56(%3)\n"
17901 + "34: movl %%edx, 60(%3)\n"
17902 + " addl $-64, %0\n"
17903 + " addl $64, %4\n"
17904 + " addl $64, %3\n"
17905 + " cmpl $63, %0\n"
17906 + " ja 1b\n"
17907 + "35: movl %0, %%eax\n"
17908 + " shrl $2, %0\n"
17909 + " andl $3, %%eax\n"
17910 + " cld\n"
17911 + "99: rep; "__copyuser_seg" movsl\n"
17912 + "36: movl %%eax, %0\n"
17913 + "37: rep; "__copyuser_seg" movsb\n"
17914 + "100:\n"
17915 ".section .fixup,\"ax\"\n"
17916 "101: lea 0(%%eax,%0,4),%0\n"
17917 " jmp 100b\n"
17918 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17919 int d0, d1;
17920 __asm__ __volatile__(
17921 " .align 2,0x90\n"
17922 - "0: movl 32(%4), %%eax\n"
17923 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17924 " cmpl $67, %0\n"
17925 " jbe 2f\n"
17926 - "1: movl 64(%4), %%eax\n"
17927 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17928 " .align 2,0x90\n"
17929 - "2: movl 0(%4), %%eax\n"
17930 - "21: movl 4(%4), %%edx\n"
17931 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17932 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17933 " movl %%eax, 0(%3)\n"
17934 " movl %%edx, 4(%3)\n"
17935 - "3: movl 8(%4), %%eax\n"
17936 - "31: movl 12(%4),%%edx\n"
17937 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
17938 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
17939 " movl %%eax, 8(%3)\n"
17940 " movl %%edx, 12(%3)\n"
17941 - "4: movl 16(%4), %%eax\n"
17942 - "41: movl 20(%4), %%edx\n"
17943 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
17944 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
17945 " movl %%eax, 16(%3)\n"
17946 " movl %%edx, 20(%3)\n"
17947 - "10: movl 24(%4), %%eax\n"
17948 - "51: movl 28(%4), %%edx\n"
17949 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
17950 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
17951 " movl %%eax, 24(%3)\n"
17952 " movl %%edx, 28(%3)\n"
17953 - "11: movl 32(%4), %%eax\n"
17954 - "61: movl 36(%4), %%edx\n"
17955 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
17956 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
17957 " movl %%eax, 32(%3)\n"
17958 " movl %%edx, 36(%3)\n"
17959 - "12: movl 40(%4), %%eax\n"
17960 - "71: movl 44(%4), %%edx\n"
17961 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
17962 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
17963 " movl %%eax, 40(%3)\n"
17964 " movl %%edx, 44(%3)\n"
17965 - "13: movl 48(%4), %%eax\n"
17966 - "81: movl 52(%4), %%edx\n"
17967 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
17968 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
17969 " movl %%eax, 48(%3)\n"
17970 " movl %%edx, 52(%3)\n"
17971 - "14: movl 56(%4), %%eax\n"
17972 - "91: movl 60(%4), %%edx\n"
17973 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
17974 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
17975 " movl %%eax, 56(%3)\n"
17976 " movl %%edx, 60(%3)\n"
17977 " addl $-64, %0\n"
17978 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
17979 " shrl $2, %0\n"
17980 " andl $3, %%eax\n"
17981 " cld\n"
17982 - "6: rep; movsl\n"
17983 + "6: rep; "__copyuser_seg" movsl\n"
17984 " movl %%eax,%0\n"
17985 - "7: rep; movsb\n"
17986 + "7: rep; "__copyuser_seg" movsb\n"
17987 "8:\n"
17988 ".section .fixup,\"ax\"\n"
17989 "9: lea 0(%%eax,%0,4),%0\n"
17990 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
17991
17992 __asm__ __volatile__(
17993 " .align 2,0x90\n"
17994 - "0: movl 32(%4), %%eax\n"
17995 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17996 " cmpl $67, %0\n"
17997 " jbe 2f\n"
17998 - "1: movl 64(%4), %%eax\n"
17999 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18000 " .align 2,0x90\n"
18001 - "2: movl 0(%4), %%eax\n"
18002 - "21: movl 4(%4), %%edx\n"
18003 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18004 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18005 " movnti %%eax, 0(%3)\n"
18006 " movnti %%edx, 4(%3)\n"
18007 - "3: movl 8(%4), %%eax\n"
18008 - "31: movl 12(%4),%%edx\n"
18009 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18010 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18011 " movnti %%eax, 8(%3)\n"
18012 " movnti %%edx, 12(%3)\n"
18013 - "4: movl 16(%4), %%eax\n"
18014 - "41: movl 20(%4), %%edx\n"
18015 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18016 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18017 " movnti %%eax, 16(%3)\n"
18018 " movnti %%edx, 20(%3)\n"
18019 - "10: movl 24(%4), %%eax\n"
18020 - "51: movl 28(%4), %%edx\n"
18021 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18022 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18023 " movnti %%eax, 24(%3)\n"
18024 " movnti %%edx, 28(%3)\n"
18025 - "11: movl 32(%4), %%eax\n"
18026 - "61: movl 36(%4), %%edx\n"
18027 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18028 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18029 " movnti %%eax, 32(%3)\n"
18030 " movnti %%edx, 36(%3)\n"
18031 - "12: movl 40(%4), %%eax\n"
18032 - "71: movl 44(%4), %%edx\n"
18033 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18034 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18035 " movnti %%eax, 40(%3)\n"
18036 " movnti %%edx, 44(%3)\n"
18037 - "13: movl 48(%4), %%eax\n"
18038 - "81: movl 52(%4), %%edx\n"
18039 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18040 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18041 " movnti %%eax, 48(%3)\n"
18042 " movnti %%edx, 52(%3)\n"
18043 - "14: movl 56(%4), %%eax\n"
18044 - "91: movl 60(%4), %%edx\n"
18045 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18046 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18047 " movnti %%eax, 56(%3)\n"
18048 " movnti %%edx, 60(%3)\n"
18049 " addl $-64, %0\n"
18050 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18051 " shrl $2, %0\n"
18052 " andl $3, %%eax\n"
18053 " cld\n"
18054 - "6: rep; movsl\n"
18055 + "6: rep; "__copyuser_seg" movsl\n"
18056 " movl %%eax,%0\n"
18057 - "7: rep; movsb\n"
18058 + "7: rep; "__copyuser_seg" movsb\n"
18059 "8:\n"
18060 ".section .fixup,\"ax\"\n"
18061 "9: lea 0(%%eax,%0,4),%0\n"
18062 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18063
18064 __asm__ __volatile__(
18065 " .align 2,0x90\n"
18066 - "0: movl 32(%4), %%eax\n"
18067 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18068 " cmpl $67, %0\n"
18069 " jbe 2f\n"
18070 - "1: movl 64(%4), %%eax\n"
18071 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18072 " .align 2,0x90\n"
18073 - "2: movl 0(%4), %%eax\n"
18074 - "21: movl 4(%4), %%edx\n"
18075 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18076 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18077 " movnti %%eax, 0(%3)\n"
18078 " movnti %%edx, 4(%3)\n"
18079 - "3: movl 8(%4), %%eax\n"
18080 - "31: movl 12(%4),%%edx\n"
18081 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18082 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18083 " movnti %%eax, 8(%3)\n"
18084 " movnti %%edx, 12(%3)\n"
18085 - "4: movl 16(%4), %%eax\n"
18086 - "41: movl 20(%4), %%edx\n"
18087 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18088 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18089 " movnti %%eax, 16(%3)\n"
18090 " movnti %%edx, 20(%3)\n"
18091 - "10: movl 24(%4), %%eax\n"
18092 - "51: movl 28(%4), %%edx\n"
18093 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18094 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18095 " movnti %%eax, 24(%3)\n"
18096 " movnti %%edx, 28(%3)\n"
18097 - "11: movl 32(%4), %%eax\n"
18098 - "61: movl 36(%4), %%edx\n"
18099 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18100 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18101 " movnti %%eax, 32(%3)\n"
18102 " movnti %%edx, 36(%3)\n"
18103 - "12: movl 40(%4), %%eax\n"
18104 - "71: movl 44(%4), %%edx\n"
18105 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18106 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18107 " movnti %%eax, 40(%3)\n"
18108 " movnti %%edx, 44(%3)\n"
18109 - "13: movl 48(%4), %%eax\n"
18110 - "81: movl 52(%4), %%edx\n"
18111 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18112 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18113 " movnti %%eax, 48(%3)\n"
18114 " movnti %%edx, 52(%3)\n"
18115 - "14: movl 56(%4), %%eax\n"
18116 - "91: movl 60(%4), %%edx\n"
18117 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18118 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18119 " movnti %%eax, 56(%3)\n"
18120 " movnti %%edx, 60(%3)\n"
18121 " addl $-64, %0\n"
18122 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18123 " shrl $2, %0\n"
18124 " andl $3, %%eax\n"
18125 " cld\n"
18126 - "6: rep; movsl\n"
18127 + "6: rep; "__copyuser_seg" movsl\n"
18128 " movl %%eax,%0\n"
18129 - "7: rep; movsb\n"
18130 + "7: rep; "__copyuser_seg" movsb\n"
18131 "8:\n"
18132 ".section .fixup,\"ax\"\n"
18133 "9: lea 0(%%eax,%0,4),%0\n"
18134 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18135 */
18136 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18137 unsigned long size);
18138 -unsigned long __copy_user_intel(void __user *to, const void *from,
18139 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18140 + unsigned long size);
18141 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18142 unsigned long size);
18143 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18144 const void __user *from, unsigned long size);
18145 #endif /* CONFIG_X86_INTEL_USERCOPY */
18146
18147 /* Generic arbitrary sized copy. */
18148 -#define __copy_user(to, from, size) \
18149 +#define __copy_user(to, from, size, prefix, set, restore) \
18150 do { \
18151 int __d0, __d1, __d2; \
18152 __asm__ __volatile__( \
18153 + set \
18154 " cmp $7,%0\n" \
18155 " jbe 1f\n" \
18156 " movl %1,%0\n" \
18157 " negl %0\n" \
18158 " andl $7,%0\n" \
18159 " subl %0,%3\n" \
18160 - "4: rep; movsb\n" \
18161 + "4: rep; "prefix"movsb\n" \
18162 " movl %3,%0\n" \
18163 " shrl $2,%0\n" \
18164 " andl $3,%3\n" \
18165 " .align 2,0x90\n" \
18166 - "0: rep; movsl\n" \
18167 + "0: rep; "prefix"movsl\n" \
18168 " movl %3,%0\n" \
18169 - "1: rep; movsb\n" \
18170 + "1: rep; "prefix"movsb\n" \
18171 "2:\n" \
18172 + restore \
18173 ".section .fixup,\"ax\"\n" \
18174 "5: addl %3,%0\n" \
18175 " jmp 2b\n" \
18176 @@ -682,14 +799,14 @@ do { \
18177 " negl %0\n" \
18178 " andl $7,%0\n" \
18179 " subl %0,%3\n" \
18180 - "4: rep; movsb\n" \
18181 + "4: rep; "__copyuser_seg"movsb\n" \
18182 " movl %3,%0\n" \
18183 " shrl $2,%0\n" \
18184 " andl $3,%3\n" \
18185 " .align 2,0x90\n" \
18186 - "0: rep; movsl\n" \
18187 + "0: rep; "__copyuser_seg"movsl\n" \
18188 " movl %3,%0\n" \
18189 - "1: rep; movsb\n" \
18190 + "1: rep; "__copyuser_seg"movsb\n" \
18191 "2:\n" \
18192 ".section .fixup,\"ax\"\n" \
18193 "5: addl %3,%0\n" \
18194 @@ -775,9 +892,9 @@ survive:
18195 }
18196 #endif
18197 if (movsl_is_ok(to, from, n))
18198 - __copy_user(to, from, n);
18199 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18200 else
18201 - n = __copy_user_intel(to, from, n);
18202 + n = __generic_copy_to_user_intel(to, from, n);
18203 return n;
18204 }
18205 EXPORT_SYMBOL(__copy_to_user_ll);
18206 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18207 unsigned long n)
18208 {
18209 if (movsl_is_ok(to, from, n))
18210 - __copy_user(to, from, n);
18211 + __copy_user(to, from, n, __copyuser_seg, "", "");
18212 else
18213 - n = __copy_user_intel((void __user *)to,
18214 - (const void *)from, n);
18215 + n = __generic_copy_from_user_intel(to, from, n);
18216 return n;
18217 }
18218 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18219 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18220 if (n > 64 && cpu_has_xmm2)
18221 n = __copy_user_intel_nocache(to, from, n);
18222 else
18223 - __copy_user(to, from, n);
18224 + __copy_user(to, from, n, __copyuser_seg, "", "");
18225 #else
18226 - __copy_user(to, from, n);
18227 + __copy_user(to, from, n, __copyuser_seg, "", "");
18228 #endif
18229 return n;
18230 }
18231 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18232
18233 -/**
18234 - * copy_to_user: - Copy a block of data into user space.
18235 - * @to: Destination address, in user space.
18236 - * @from: Source address, in kernel space.
18237 - * @n: Number of bytes to copy.
18238 - *
18239 - * Context: User context only. This function may sleep.
18240 - *
18241 - * Copy data from kernel space to user space.
18242 - *
18243 - * Returns number of bytes that could not be copied.
18244 - * On success, this will be zero.
18245 - */
18246 -unsigned long
18247 -copy_to_user(void __user *to, const void *from, unsigned long n)
18248 +void copy_from_user_overflow(void)
18249 {
18250 - if (access_ok(VERIFY_WRITE, to, n))
18251 - n = __copy_to_user(to, from, n);
18252 - return n;
18253 + WARN(1, "Buffer overflow detected!\n");
18254 }
18255 -EXPORT_SYMBOL(copy_to_user);
18256 +EXPORT_SYMBOL(copy_from_user_overflow);
18257
18258 -/**
18259 - * copy_from_user: - Copy a block of data from user space.
18260 - * @to: Destination address, in kernel space.
18261 - * @from: Source address, in user space.
18262 - * @n: Number of bytes to copy.
18263 - *
18264 - * Context: User context only. This function may sleep.
18265 - *
18266 - * Copy data from user space to kernel space.
18267 - *
18268 - * Returns number of bytes that could not be copied.
18269 - * On success, this will be zero.
18270 - *
18271 - * If some data could not be copied, this function will pad the copied
18272 - * data to the requested size using zero bytes.
18273 - */
18274 -unsigned long
18275 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18276 +void copy_to_user_overflow(void)
18277 {
18278 - if (access_ok(VERIFY_READ, from, n))
18279 - n = __copy_from_user(to, from, n);
18280 - else
18281 - memset(to, 0, n);
18282 - return n;
18283 + WARN(1, "Buffer overflow detected!\n");
18284 }
18285 -EXPORT_SYMBOL(_copy_from_user);
18286 +EXPORT_SYMBOL(copy_to_user_overflow);
18287
18288 -void copy_from_user_overflow(void)
18289 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18290 +void __set_fs(mm_segment_t x)
18291 {
18292 - WARN(1, "Buffer overflow detected!\n");
18293 + switch (x.seg) {
18294 + case 0:
18295 + loadsegment(gs, 0);
18296 + break;
18297 + case TASK_SIZE_MAX:
18298 + loadsegment(gs, __USER_DS);
18299 + break;
18300 + case -1UL:
18301 + loadsegment(gs, __KERNEL_DS);
18302 + break;
18303 + default:
18304 + BUG();
18305 + }
18306 + return;
18307 }
18308 -EXPORT_SYMBOL(copy_from_user_overflow);
18309 +EXPORT_SYMBOL(__set_fs);
18310 +
18311 +void set_fs(mm_segment_t x)
18312 +{
18313 + current_thread_info()->addr_limit = x;
18314 + __set_fs(x);
18315 +}
18316 +EXPORT_SYMBOL(set_fs);
18317 +#endif
18318 diff -urNp linux-3.0.3/arch/x86/lib/usercopy_64.c linux-3.0.3/arch/x86/lib/usercopy_64.c
18319 --- linux-3.0.3/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18320 +++ linux-3.0.3/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18321 @@ -42,6 +42,12 @@ long
18322 __strncpy_from_user(char *dst, const char __user *src, long count)
18323 {
18324 long res;
18325 +
18326 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18327 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18328 + src += PAX_USER_SHADOW_BASE;
18329 +#endif
18330 +
18331 __do_strncpy_from_user(dst, src, count, res);
18332 return res;
18333 }
18334 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18335 {
18336 long __d0;
18337 might_fault();
18338 +
18339 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18340 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18341 + addr += PAX_USER_SHADOW_BASE;
18342 +#endif
18343 +
18344 /* no memory constraint because it doesn't change any memory gcc knows
18345 about */
18346 asm volatile(
18347 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18348
18349 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18350 {
18351 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18352 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18353 +
18354 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18355 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18356 + to += PAX_USER_SHADOW_BASE;
18357 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18358 + from += PAX_USER_SHADOW_BASE;
18359 +#endif
18360 +
18361 return copy_user_generic((__force void *)to, (__force void *)from, len);
18362 - }
18363 - return len;
18364 + }
18365 + return len;
18366 }
18367 EXPORT_SYMBOL(copy_in_user);
18368
18369 diff -urNp linux-3.0.3/arch/x86/Makefile linux-3.0.3/arch/x86/Makefile
18370 --- linux-3.0.3/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18371 +++ linux-3.0.3/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18372 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18373 else
18374 BITS := 64
18375 UTS_MACHINE := x86_64
18376 + biarch := $(call cc-option,-m64)
18377 CHECKFLAGS += -D__x86_64__ -m64
18378
18379 KBUILD_AFLAGS += -m64
18380 @@ -195,3 +196,12 @@ define archhelp
18381 echo ' FDARGS="..." arguments for the booted kernel'
18382 echo ' FDINITRD=file initrd for the booted kernel'
18383 endef
18384 +
18385 +define OLD_LD
18386 +
18387 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18388 +*** Please upgrade your binutils to 2.18 or newer
18389 +endef
18390 +
18391 +archprepare:
18392 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18393 diff -urNp linux-3.0.3/arch/x86/mm/extable.c linux-3.0.3/arch/x86/mm/extable.c
18394 --- linux-3.0.3/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18395 +++ linux-3.0.3/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18396 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18397 const struct exception_table_entry *fixup;
18398
18399 #ifdef CONFIG_PNPBIOS
18400 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18401 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18402 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18403 extern u32 pnp_bios_is_utter_crap;
18404 pnp_bios_is_utter_crap = 1;
18405 diff -urNp linux-3.0.3/arch/x86/mm/fault.c linux-3.0.3/arch/x86/mm/fault.c
18406 --- linux-3.0.3/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18407 +++ linux-3.0.3/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18408 @@ -13,10 +13,18 @@
18409 #include <linux/perf_event.h> /* perf_sw_event */
18410 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18411 #include <linux/prefetch.h> /* prefetchw */
18412 +#include <linux/unistd.h>
18413 +#include <linux/compiler.h>
18414
18415 #include <asm/traps.h> /* dotraplinkage, ... */
18416 #include <asm/pgalloc.h> /* pgd_*(), ... */
18417 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18418 +#include <asm/vsyscall.h>
18419 +#include <asm/tlbflush.h>
18420 +
18421 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18422 +#include <asm/stacktrace.h>
18423 +#endif
18424
18425 /*
18426 * Page fault error code bits:
18427 @@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18428 int ret = 0;
18429
18430 /* kprobe_running() needs smp_processor_id() */
18431 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18432 + if (kprobes_built_in() && !user_mode(regs)) {
18433 preempt_disable();
18434 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18435 ret = 1;
18436 @@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18437 return !instr_lo || (instr_lo>>1) == 1;
18438 case 0x00:
18439 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18440 - if (probe_kernel_address(instr, opcode))
18441 + if (user_mode(regs)) {
18442 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18443 + return 0;
18444 + } else if (probe_kernel_address(instr, opcode))
18445 return 0;
18446
18447 *prefetch = (instr_lo == 0xF) &&
18448 @@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18449 while (instr < max_instr) {
18450 unsigned char opcode;
18451
18452 - if (probe_kernel_address(instr, opcode))
18453 + if (user_mode(regs)) {
18454 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18455 + break;
18456 + } else if (probe_kernel_address(instr, opcode))
18457 break;
18458
18459 instr++;
18460 @@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18461 force_sig_info(si_signo, &info, tsk);
18462 }
18463
18464 +#ifdef CONFIG_PAX_EMUTRAMP
18465 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18466 +#endif
18467 +
18468 +#ifdef CONFIG_PAX_PAGEEXEC
18469 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18470 +{
18471 + pgd_t *pgd;
18472 + pud_t *pud;
18473 + pmd_t *pmd;
18474 +
18475 + pgd = pgd_offset(mm, address);
18476 + if (!pgd_present(*pgd))
18477 + return NULL;
18478 + pud = pud_offset(pgd, address);
18479 + if (!pud_present(*pud))
18480 + return NULL;
18481 + pmd = pmd_offset(pud, address);
18482 + if (!pmd_present(*pmd))
18483 + return NULL;
18484 + return pmd;
18485 +}
18486 +#endif
18487 +
18488 DEFINE_SPINLOCK(pgd_lock);
18489 LIST_HEAD(pgd_list);
18490
18491 @@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18492 for (address = VMALLOC_START & PMD_MASK;
18493 address >= TASK_SIZE && address < FIXADDR_TOP;
18494 address += PMD_SIZE) {
18495 +
18496 +#ifdef CONFIG_PAX_PER_CPU_PGD
18497 + unsigned long cpu;
18498 +#else
18499 struct page *page;
18500 +#endif
18501
18502 spin_lock(&pgd_lock);
18503 +
18504 +#ifdef CONFIG_PAX_PER_CPU_PGD
18505 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18506 + pgd_t *pgd = get_cpu_pgd(cpu);
18507 + pmd_t *ret;
18508 +#else
18509 list_for_each_entry(page, &pgd_list, lru) {
18510 + pgd_t *pgd = page_address(page);
18511 spinlock_t *pgt_lock;
18512 pmd_t *ret;
18513
18514 @@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18515 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18516
18517 spin_lock(pgt_lock);
18518 - ret = vmalloc_sync_one(page_address(page), address);
18519 +#endif
18520 +
18521 + ret = vmalloc_sync_one(pgd, address);
18522 +
18523 +#ifndef CONFIG_PAX_PER_CPU_PGD
18524 spin_unlock(pgt_lock);
18525 +#endif
18526
18527 if (!ret)
18528 break;
18529 @@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18530 * an interrupt in the middle of a task switch..
18531 */
18532 pgd_paddr = read_cr3();
18533 +
18534 +#ifdef CONFIG_PAX_PER_CPU_PGD
18535 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18536 +#endif
18537 +
18538 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18539 if (!pmd_k)
18540 return -1;
18541 @@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
18542 * happen within a race in page table update. In the later
18543 * case just flush:
18544 */
18545 +
18546 +#ifdef CONFIG_PAX_PER_CPU_PGD
18547 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18548 + pgd = pgd_offset_cpu(smp_processor_id(), address);
18549 +#else
18550 pgd = pgd_offset(current->active_mm, address);
18551 +#endif
18552 +
18553 pgd_ref = pgd_offset_k(address);
18554 if (pgd_none(*pgd_ref))
18555 return -1;
18556 @@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
18557 static int is_errata100(struct pt_regs *regs, unsigned long address)
18558 {
18559 #ifdef CONFIG_X86_64
18560 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18561 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18562 return 1;
18563 #endif
18564 return 0;
18565 @@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
18566 }
18567
18568 static const char nx_warning[] = KERN_CRIT
18569 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18570 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18571
18572 static void
18573 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18574 @@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
18575 if (!oops_may_print())
18576 return;
18577
18578 - if (error_code & PF_INSTR) {
18579 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18580 unsigned int level;
18581
18582 pte_t *pte = lookup_address(address, &level);
18583
18584 if (pte && pte_present(*pte) && !pte_exec(*pte))
18585 - printk(nx_warning, current_uid());
18586 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18587 + }
18588 +
18589 +#ifdef CONFIG_PAX_KERNEXEC
18590 + if (init_mm.start_code <= address && address < init_mm.end_code) {
18591 + if (current->signal->curr_ip)
18592 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18593 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18594 + else
18595 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18596 + current->comm, task_pid_nr(current), current_uid(), current_euid());
18597 }
18598 +#endif
18599
18600 printk(KERN_ALERT "BUG: unable to handle kernel ");
18601 if (address < PAGE_SIZE)
18602 @@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
18603 unsigned long address, int si_code)
18604 {
18605 struct task_struct *tsk = current;
18606 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18607 + struct mm_struct *mm = tsk->mm;
18608 +#endif
18609 +
18610 +#ifdef CONFIG_X86_64
18611 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18612 + if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
18613 + regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
18614 + regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
18615 + regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
18616 + return;
18617 + }
18618 + }
18619 +#endif
18620 +
18621 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18622 + if (mm && (error_code & PF_USER)) {
18623 + unsigned long ip = regs->ip;
18624 +
18625 + if (v8086_mode(regs))
18626 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18627 +
18628 + /*
18629 + * It's possible to have interrupts off here:
18630 + */
18631 + local_irq_enable();
18632 +
18633 +#ifdef CONFIG_PAX_PAGEEXEC
18634 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18635 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18636 +
18637 +#ifdef CONFIG_PAX_EMUTRAMP
18638 + switch (pax_handle_fetch_fault(regs)) {
18639 + case 2:
18640 + return;
18641 + }
18642 +#endif
18643 +
18644 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18645 + do_group_exit(SIGKILL);
18646 + }
18647 +#endif
18648 +
18649 +#ifdef CONFIG_PAX_SEGMEXEC
18650 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18651 +
18652 +#ifdef CONFIG_PAX_EMUTRAMP
18653 + switch (pax_handle_fetch_fault(regs)) {
18654 + case 2:
18655 + return;
18656 + }
18657 +#endif
18658 +
18659 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18660 + do_group_exit(SIGKILL);
18661 + }
18662 +#endif
18663 +
18664 + }
18665 +#endif
18666
18667 /* User mode accesses just cause a SIGSEGV */
18668 if (error_code & PF_USER) {
18669 @@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
18670 return 1;
18671 }
18672
18673 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18674 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18675 +{
18676 + pte_t *pte;
18677 + pmd_t *pmd;
18678 + spinlock_t *ptl;
18679 + unsigned char pte_mask;
18680 +
18681 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18682 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
18683 + return 0;
18684 +
18685 + /* PaX: it's our fault, let's handle it if we can */
18686 +
18687 + /* PaX: take a look at read faults before acquiring any locks */
18688 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18689 + /* instruction fetch attempt from a protected page in user mode */
18690 + up_read(&mm->mmap_sem);
18691 +
18692 +#ifdef CONFIG_PAX_EMUTRAMP
18693 + switch (pax_handle_fetch_fault(regs)) {
18694 + case 2:
18695 + return 1;
18696 + }
18697 +#endif
18698 +
18699 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18700 + do_group_exit(SIGKILL);
18701 + }
18702 +
18703 + pmd = pax_get_pmd(mm, address);
18704 + if (unlikely(!pmd))
18705 + return 0;
18706 +
18707 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18708 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18709 + pte_unmap_unlock(pte, ptl);
18710 + return 0;
18711 + }
18712 +
18713 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18714 + /* write attempt to a protected page in user mode */
18715 + pte_unmap_unlock(pte, ptl);
18716 + return 0;
18717 + }
18718 +
18719 +#ifdef CONFIG_SMP
18720 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18721 +#else
18722 + if (likely(address > get_limit(regs->cs)))
18723 +#endif
18724 + {
18725 + set_pte(pte, pte_mkread(*pte));
18726 + __flush_tlb_one(address);
18727 + pte_unmap_unlock(pte, ptl);
18728 + up_read(&mm->mmap_sem);
18729 + return 1;
18730 + }
18731 +
18732 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18733 +
18734 + /*
18735 + * PaX: fill DTLB with user rights and retry
18736 + */
18737 + __asm__ __volatile__ (
18738 + "orb %2,(%1)\n"
18739 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18740 +/*
18741 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18742 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18743 + * page fault when examined during a TLB load attempt. this is true not only
18744 + * for PTEs holding a non-present entry but also present entries that will
18745 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18746 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18747 + * for our target pages since their PTEs are simply not in the TLBs at all.
18748 +
18749 + * the best thing in omitting it is that we gain around 15-20% speed in the
18750 + * fast path of the page fault handler and can get rid of tracing since we
18751 + * can no longer flush unintended entries.
18752 + */
18753 + "invlpg (%0)\n"
18754 +#endif
18755 + __copyuser_seg"testb $0,(%0)\n"
18756 + "xorb %3,(%1)\n"
18757 + :
18758 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18759 + : "memory", "cc");
18760 + pte_unmap_unlock(pte, ptl);
18761 + up_read(&mm->mmap_sem);
18762 + return 1;
18763 +}
18764 +#endif
18765 +
18766 /*
18767 * Handle a spurious fault caused by a stale TLB entry.
18768 *
18769 @@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
18770 static inline int
18771 access_error(unsigned long error_code, struct vm_area_struct *vma)
18772 {
18773 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18774 + return 1;
18775 +
18776 if (error_code & PF_WRITE) {
18777 /* write, present and write, not present: */
18778 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18779 @@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
18780 {
18781 struct vm_area_struct *vma;
18782 struct task_struct *tsk;
18783 - unsigned long address;
18784 struct mm_struct *mm;
18785 int fault;
18786 int write = error_code & PF_WRITE;
18787 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
18788 (write ? FAULT_FLAG_WRITE : 0);
18789
18790 + /* Get the faulting address: */
18791 + unsigned long address = read_cr2();
18792 +
18793 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18794 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18795 + if (!search_exception_tables(regs->ip)) {
18796 + bad_area_nosemaphore(regs, error_code, address);
18797 + return;
18798 + }
18799 + if (address < PAX_USER_SHADOW_BASE) {
18800 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18801 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18802 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18803 + } else
18804 + address -= PAX_USER_SHADOW_BASE;
18805 + }
18806 +#endif
18807 +
18808 tsk = current;
18809 mm = tsk->mm;
18810
18811 - /* Get the faulting address: */
18812 - address = read_cr2();
18813 -
18814 /*
18815 * Detect and handle instructions that would cause a page fault for
18816 * both a tracked kernel page and a userspace page.
18817 @@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
18818 * User-mode registers count as a user access even for any
18819 * potential system fault or CPU buglet:
18820 */
18821 - if (user_mode_vm(regs)) {
18822 + if (user_mode(regs)) {
18823 local_irq_enable();
18824 error_code |= PF_USER;
18825 } else {
18826 @@ -1103,6 +1351,11 @@ retry:
18827 might_sleep();
18828 }
18829
18830 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18831 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18832 + return;
18833 +#endif
18834 +
18835 vma = find_vma(mm, address);
18836 if (unlikely(!vma)) {
18837 bad_area(regs, error_code, address);
18838 @@ -1114,18 +1367,24 @@ retry:
18839 bad_area(regs, error_code, address);
18840 return;
18841 }
18842 - if (error_code & PF_USER) {
18843 - /*
18844 - * Accessing the stack below %sp is always a bug.
18845 - * The large cushion allows instructions like enter
18846 - * and pusha to work. ("enter $65535, $31" pushes
18847 - * 32 pointers and then decrements %sp by 65535.)
18848 - */
18849 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18850 - bad_area(regs, error_code, address);
18851 - return;
18852 - }
18853 + /*
18854 + * Accessing the stack below %sp is always a bug.
18855 + * The large cushion allows instructions like enter
18856 + * and pusha to work. ("enter $65535, $31" pushes
18857 + * 32 pointers and then decrements %sp by 65535.)
18858 + */
18859 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18860 + bad_area(regs, error_code, address);
18861 + return;
18862 }
18863 +
18864 +#ifdef CONFIG_PAX_SEGMEXEC
18865 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18866 + bad_area(regs, error_code, address);
18867 + return;
18868 + }
18869 +#endif
18870 +
18871 if (unlikely(expand_stack(vma, address))) {
18872 bad_area(regs, error_code, address);
18873 return;
18874 @@ -1180,3 +1439,199 @@ good_area:
18875
18876 up_read(&mm->mmap_sem);
18877 }
18878 +
18879 +#ifdef CONFIG_PAX_EMUTRAMP
18880 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
18881 +{
18882 + int err;
18883 +
18884 + do { /* PaX: gcc trampoline emulation #1 */
18885 + unsigned char mov1, mov2;
18886 + unsigned short jmp;
18887 + unsigned int addr1, addr2;
18888 +
18889 +#ifdef CONFIG_X86_64
18890 + if ((regs->ip + 11) >> 32)
18891 + break;
18892 +#endif
18893 +
18894 + err = get_user(mov1, (unsigned char __user *)regs->ip);
18895 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18896 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
18897 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18898 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
18899 +
18900 + if (err)
18901 + break;
18902 +
18903 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
18904 + regs->cx = addr1;
18905 + regs->ax = addr2;
18906 + regs->ip = addr2;
18907 + return 2;
18908 + }
18909 + } while (0);
18910 +
18911 + do { /* PaX: gcc trampoline emulation #2 */
18912 + unsigned char mov, jmp;
18913 + unsigned int addr1, addr2;
18914 +
18915 +#ifdef CONFIG_X86_64
18916 + if ((regs->ip + 9) >> 32)
18917 + break;
18918 +#endif
18919 +
18920 + err = get_user(mov, (unsigned char __user *)regs->ip);
18921 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
18922 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
18923 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
18924 +
18925 + if (err)
18926 + break;
18927 +
18928 + if (mov == 0xB9 && jmp == 0xE9) {
18929 + regs->cx = addr1;
18930 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
18931 + return 2;
18932 + }
18933 + } while (0);
18934 +
18935 + return 1; /* PaX in action */
18936 +}
18937 +
18938 +#ifdef CONFIG_X86_64
18939 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
18940 +{
18941 + int err;
18942 +
18943 + do { /* PaX: gcc trampoline emulation #1 */
18944 + unsigned short mov1, mov2, jmp1;
18945 + unsigned char jmp2;
18946 + unsigned int addr1;
18947 + unsigned long addr2;
18948 +
18949 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18950 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
18951 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18952 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18953 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18954 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18955 +
18956 + if (err)
18957 + break;
18958 +
18959 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18960 + regs->r11 = addr1;
18961 + regs->r10 = addr2;
18962 + regs->ip = addr1;
18963 + return 2;
18964 + }
18965 + } while (0);
18966 +
18967 + do { /* PaX: gcc trampoline emulation #2 */
18968 + unsigned short mov1, mov2, jmp1;
18969 + unsigned char jmp2;
18970 + unsigned long addr1, addr2;
18971 +
18972 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18973 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18974 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18975 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18976 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18977 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18978 +
18979 + if (err)
18980 + break;
18981 +
18982 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18983 + regs->r11 = addr1;
18984 + regs->r10 = addr2;
18985 + regs->ip = addr1;
18986 + return 2;
18987 + }
18988 + } while (0);
18989 +
18990 + return 1; /* PaX in action */
18991 +}
18992 +#endif
18993 +
18994 +/*
18995 + * PaX: decide what to do with offenders (regs->ip = fault address)
18996 + *
18997 + * returns 1 when task should be killed
18998 + * 2 when gcc trampoline was detected
18999 + */
19000 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19001 +{
19002 + if (v8086_mode(regs))
19003 + return 1;
19004 +
19005 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19006 + return 1;
19007 +
19008 +#ifdef CONFIG_X86_32
19009 + return pax_handle_fetch_fault_32(regs);
19010 +#else
19011 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19012 + return pax_handle_fetch_fault_32(regs);
19013 + else
19014 + return pax_handle_fetch_fault_64(regs);
19015 +#endif
19016 +}
19017 +#endif
19018 +
19019 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19020 +void pax_report_insns(void *pc, void *sp)
19021 +{
19022 + long i;
19023 +
19024 + printk(KERN_ERR "PAX: bytes at PC: ");
19025 + for (i = 0; i < 20; i++) {
19026 + unsigned char c;
19027 + if (get_user(c, (__force unsigned char __user *)pc+i))
19028 + printk(KERN_CONT "?? ");
19029 + else
19030 + printk(KERN_CONT "%02x ", c);
19031 + }
19032 + printk("\n");
19033 +
19034 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19035 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19036 + unsigned long c;
19037 + if (get_user(c, (__force unsigned long __user *)sp+i))
19038 +#ifdef CONFIG_X86_32
19039 + printk(KERN_CONT "???????? ");
19040 +#else
19041 + printk(KERN_CONT "???????????????? ");
19042 +#endif
19043 + else
19044 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19045 + }
19046 + printk("\n");
19047 +}
19048 +#endif
19049 +
19050 +/**
19051 + * probe_kernel_write(): safely attempt to write to a location
19052 + * @dst: address to write to
19053 + * @src: pointer to the data that shall be written
19054 + * @size: size of the data chunk
19055 + *
19056 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19057 + * happens, handle that and return -EFAULT.
19058 + */
19059 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19060 +{
19061 + long ret;
19062 + mm_segment_t old_fs = get_fs();
19063 +
19064 + set_fs(KERNEL_DS);
19065 + pagefault_disable();
19066 + pax_open_kernel();
19067 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19068 + pax_close_kernel();
19069 + pagefault_enable();
19070 + set_fs(old_fs);
19071 +
19072 + return ret ? -EFAULT : 0;
19073 +}
19074 diff -urNp linux-3.0.3/arch/x86/mm/gup.c linux-3.0.3/arch/x86/mm/gup.c
19075 --- linux-3.0.3/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19076 +++ linux-3.0.3/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19077 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19078 addr = start;
19079 len = (unsigned long) nr_pages << PAGE_SHIFT;
19080 end = start + len;
19081 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19082 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19083 (void __user *)start, len)))
19084 return 0;
19085
19086 diff -urNp linux-3.0.3/arch/x86/mm/highmem_32.c linux-3.0.3/arch/x86/mm/highmem_32.c
19087 --- linux-3.0.3/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19088 +++ linux-3.0.3/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19089 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19090 idx = type + KM_TYPE_NR*smp_processor_id();
19091 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19092 BUG_ON(!pte_none(*(kmap_pte-idx)));
19093 +
19094 + pax_open_kernel();
19095 set_pte(kmap_pte-idx, mk_pte(page, prot));
19096 + pax_close_kernel();
19097
19098 return (void *)vaddr;
19099 }
19100 diff -urNp linux-3.0.3/arch/x86/mm/hugetlbpage.c linux-3.0.3/arch/x86/mm/hugetlbpage.c
19101 --- linux-3.0.3/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19102 +++ linux-3.0.3/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19103 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19104 struct hstate *h = hstate_file(file);
19105 struct mm_struct *mm = current->mm;
19106 struct vm_area_struct *vma;
19107 - unsigned long start_addr;
19108 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19109 +
19110 +#ifdef CONFIG_PAX_SEGMEXEC
19111 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19112 + pax_task_size = SEGMEXEC_TASK_SIZE;
19113 +#endif
19114 +
19115 + pax_task_size -= PAGE_SIZE;
19116
19117 if (len > mm->cached_hole_size) {
19118 - start_addr = mm->free_area_cache;
19119 + start_addr = mm->free_area_cache;
19120 } else {
19121 - start_addr = TASK_UNMAPPED_BASE;
19122 - mm->cached_hole_size = 0;
19123 + start_addr = mm->mmap_base;
19124 + mm->cached_hole_size = 0;
19125 }
19126
19127 full_search:
19128 @@ -280,26 +287,27 @@ full_search:
19129
19130 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19131 /* At this point: (!vma || addr < vma->vm_end). */
19132 - if (TASK_SIZE - len < addr) {
19133 + if (pax_task_size - len < addr) {
19134 /*
19135 * Start a new search - just in case we missed
19136 * some holes.
19137 */
19138 - if (start_addr != TASK_UNMAPPED_BASE) {
19139 - start_addr = TASK_UNMAPPED_BASE;
19140 + if (start_addr != mm->mmap_base) {
19141 + start_addr = mm->mmap_base;
19142 mm->cached_hole_size = 0;
19143 goto full_search;
19144 }
19145 return -ENOMEM;
19146 }
19147 - if (!vma || addr + len <= vma->vm_start) {
19148 - mm->free_area_cache = addr + len;
19149 - return addr;
19150 - }
19151 + if (check_heap_stack_gap(vma, addr, len))
19152 + break;
19153 if (addr + mm->cached_hole_size < vma->vm_start)
19154 mm->cached_hole_size = vma->vm_start - addr;
19155 addr = ALIGN(vma->vm_end, huge_page_size(h));
19156 }
19157 +
19158 + mm->free_area_cache = addr + len;
19159 + return addr;
19160 }
19161
19162 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19163 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19164 {
19165 struct hstate *h = hstate_file(file);
19166 struct mm_struct *mm = current->mm;
19167 - struct vm_area_struct *vma, *prev_vma;
19168 - unsigned long base = mm->mmap_base, addr = addr0;
19169 + struct vm_area_struct *vma;
19170 + unsigned long base = mm->mmap_base, addr;
19171 unsigned long largest_hole = mm->cached_hole_size;
19172 - int first_time = 1;
19173
19174 /* don't allow allocations above current base */
19175 if (mm->free_area_cache > base)
19176 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19177 largest_hole = 0;
19178 mm->free_area_cache = base;
19179 }
19180 -try_again:
19181 +
19182 /* make sure it can fit in the remaining address space */
19183 if (mm->free_area_cache < len)
19184 goto fail;
19185
19186 /* either no address requested or can't fit in requested address hole */
19187 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19188 + addr = (mm->free_area_cache - len);
19189 do {
19190 + addr &= huge_page_mask(h);
19191 + vma = find_vma(mm, addr);
19192 /*
19193 * Lookup failure means no vma is above this address,
19194 * i.e. return with success:
19195 - */
19196 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19197 - return addr;
19198 -
19199 - /*
19200 * new region fits between prev_vma->vm_end and
19201 * vma->vm_start, use it:
19202 */
19203 - if (addr + len <= vma->vm_start &&
19204 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19205 + if (check_heap_stack_gap(vma, addr, len)) {
19206 /* remember the address as a hint for next time */
19207 - mm->cached_hole_size = largest_hole;
19208 - return (mm->free_area_cache = addr);
19209 - } else {
19210 - /* pull free_area_cache down to the first hole */
19211 - if (mm->free_area_cache == vma->vm_end) {
19212 - mm->free_area_cache = vma->vm_start;
19213 - mm->cached_hole_size = largest_hole;
19214 - }
19215 + mm->cached_hole_size = largest_hole;
19216 + return (mm->free_area_cache = addr);
19217 + }
19218 + /* pull free_area_cache down to the first hole */
19219 + if (mm->free_area_cache == vma->vm_end) {
19220 + mm->free_area_cache = vma->vm_start;
19221 + mm->cached_hole_size = largest_hole;
19222 }
19223
19224 /* remember the largest hole we saw so far */
19225 if (addr + largest_hole < vma->vm_start)
19226 - largest_hole = vma->vm_start - addr;
19227 + largest_hole = vma->vm_start - addr;
19228
19229 /* try just below the current vma->vm_start */
19230 - addr = (vma->vm_start - len) & huge_page_mask(h);
19231 - } while (len <= vma->vm_start);
19232 + addr = skip_heap_stack_gap(vma, len);
19233 + } while (!IS_ERR_VALUE(addr));
19234
19235 fail:
19236 /*
19237 - * if hint left us with no space for the requested
19238 - * mapping then try again:
19239 - */
19240 - if (first_time) {
19241 - mm->free_area_cache = base;
19242 - largest_hole = 0;
19243 - first_time = 0;
19244 - goto try_again;
19245 - }
19246 - /*
19247 * A failed mmap() very likely causes application failure,
19248 * so fall back to the bottom-up function here. This scenario
19249 * can happen with large stack limits and large mmap()
19250 * allocations.
19251 */
19252 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19253 +
19254 +#ifdef CONFIG_PAX_SEGMEXEC
19255 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19256 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19257 + else
19258 +#endif
19259 +
19260 + mm->mmap_base = TASK_UNMAPPED_BASE;
19261 +
19262 +#ifdef CONFIG_PAX_RANDMMAP
19263 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19264 + mm->mmap_base += mm->delta_mmap;
19265 +#endif
19266 +
19267 + mm->free_area_cache = mm->mmap_base;
19268 mm->cached_hole_size = ~0UL;
19269 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19270 len, pgoff, flags);
19271 @@ -386,6 +392,7 @@ fail:
19272 /*
19273 * Restore the topdown base:
19274 */
19275 + mm->mmap_base = base;
19276 mm->free_area_cache = base;
19277 mm->cached_hole_size = ~0UL;
19278
19279 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19280 struct hstate *h = hstate_file(file);
19281 struct mm_struct *mm = current->mm;
19282 struct vm_area_struct *vma;
19283 + unsigned long pax_task_size = TASK_SIZE;
19284
19285 if (len & ~huge_page_mask(h))
19286 return -EINVAL;
19287 - if (len > TASK_SIZE)
19288 +
19289 +#ifdef CONFIG_PAX_SEGMEXEC
19290 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19291 + pax_task_size = SEGMEXEC_TASK_SIZE;
19292 +#endif
19293 +
19294 + pax_task_size -= PAGE_SIZE;
19295 +
19296 + if (len > pax_task_size)
19297 return -ENOMEM;
19298
19299 if (flags & MAP_FIXED) {
19300 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19301 if (addr) {
19302 addr = ALIGN(addr, huge_page_size(h));
19303 vma = find_vma(mm, addr);
19304 - if (TASK_SIZE - len >= addr &&
19305 - (!vma || addr + len <= vma->vm_start))
19306 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19307 return addr;
19308 }
19309 if (mm->get_unmapped_area == arch_get_unmapped_area)
19310 diff -urNp linux-3.0.3/arch/x86/mm/init_32.c linux-3.0.3/arch/x86/mm/init_32.c
19311 --- linux-3.0.3/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19312 +++ linux-3.0.3/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19313 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19314 }
19315
19316 /*
19317 - * Creates a middle page table and puts a pointer to it in the
19318 - * given global directory entry. This only returns the gd entry
19319 - * in non-PAE compilation mode, since the middle layer is folded.
19320 - */
19321 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19322 -{
19323 - pud_t *pud;
19324 - pmd_t *pmd_table;
19325 -
19326 -#ifdef CONFIG_X86_PAE
19327 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19328 - if (after_bootmem)
19329 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19330 - else
19331 - pmd_table = (pmd_t *)alloc_low_page();
19332 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19333 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19334 - pud = pud_offset(pgd, 0);
19335 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19336 -
19337 - return pmd_table;
19338 - }
19339 -#endif
19340 - pud = pud_offset(pgd, 0);
19341 - pmd_table = pmd_offset(pud, 0);
19342 -
19343 - return pmd_table;
19344 -}
19345 -
19346 -/*
19347 * Create a page table and place a pointer to it in a middle page
19348 * directory entry:
19349 */
19350 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19351 page_table = (pte_t *)alloc_low_page();
19352
19353 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19354 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19355 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19356 +#else
19357 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19358 +#endif
19359 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19360 }
19361
19362 return pte_offset_kernel(pmd, 0);
19363 }
19364
19365 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19366 +{
19367 + pud_t *pud;
19368 + pmd_t *pmd_table;
19369 +
19370 + pud = pud_offset(pgd, 0);
19371 + pmd_table = pmd_offset(pud, 0);
19372 +
19373 + return pmd_table;
19374 +}
19375 +
19376 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19377 {
19378 int pgd_idx = pgd_index(vaddr);
19379 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19380 int pgd_idx, pmd_idx;
19381 unsigned long vaddr;
19382 pgd_t *pgd;
19383 + pud_t *pud;
19384 pmd_t *pmd;
19385 pte_t *pte = NULL;
19386
19387 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19388 pgd = pgd_base + pgd_idx;
19389
19390 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19391 - pmd = one_md_table_init(pgd);
19392 - pmd = pmd + pmd_index(vaddr);
19393 + pud = pud_offset(pgd, vaddr);
19394 + pmd = pmd_offset(pud, vaddr);
19395 +
19396 +#ifdef CONFIG_X86_PAE
19397 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19398 +#endif
19399 +
19400 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19401 pmd++, pmd_idx++) {
19402 pte = page_table_kmap_check(one_page_table_init(pmd),
19403 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19404 }
19405 }
19406
19407 -static inline int is_kernel_text(unsigned long addr)
19408 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19409 {
19410 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19411 - return 1;
19412 - return 0;
19413 + if ((start > ktla_ktva((unsigned long)_etext) ||
19414 + end <= ktla_ktva((unsigned long)_stext)) &&
19415 + (start > ktla_ktva((unsigned long)_einittext) ||
19416 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19417 +
19418 +#ifdef CONFIG_ACPI_SLEEP
19419 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19420 +#endif
19421 +
19422 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19423 + return 0;
19424 + return 1;
19425 }
19426
19427 /*
19428 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19429 unsigned long last_map_addr = end;
19430 unsigned long start_pfn, end_pfn;
19431 pgd_t *pgd_base = swapper_pg_dir;
19432 - int pgd_idx, pmd_idx, pte_ofs;
19433 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19434 unsigned long pfn;
19435 pgd_t *pgd;
19436 + pud_t *pud;
19437 pmd_t *pmd;
19438 pte_t *pte;
19439 unsigned pages_2m, pages_4k;
19440 @@ -281,8 +282,13 @@ repeat:
19441 pfn = start_pfn;
19442 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19443 pgd = pgd_base + pgd_idx;
19444 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19445 - pmd = one_md_table_init(pgd);
19446 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19447 + pud = pud_offset(pgd, 0);
19448 + pmd = pmd_offset(pud, 0);
19449 +
19450 +#ifdef CONFIG_X86_PAE
19451 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19452 +#endif
19453
19454 if (pfn >= end_pfn)
19455 continue;
19456 @@ -294,14 +300,13 @@ repeat:
19457 #endif
19458 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19459 pmd++, pmd_idx++) {
19460 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19461 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19462
19463 /*
19464 * Map with big pages if possible, otherwise
19465 * create normal page tables:
19466 */
19467 if (use_pse) {
19468 - unsigned int addr2;
19469 pgprot_t prot = PAGE_KERNEL_LARGE;
19470 /*
19471 * first pass will use the same initial
19472 @@ -311,11 +316,7 @@ repeat:
19473 __pgprot(PTE_IDENT_ATTR |
19474 _PAGE_PSE);
19475
19476 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19477 - PAGE_OFFSET + PAGE_SIZE-1;
19478 -
19479 - if (is_kernel_text(addr) ||
19480 - is_kernel_text(addr2))
19481 + if (is_kernel_text(address, address + PMD_SIZE))
19482 prot = PAGE_KERNEL_LARGE_EXEC;
19483
19484 pages_2m++;
19485 @@ -332,7 +333,7 @@ repeat:
19486 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19487 pte += pte_ofs;
19488 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19489 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19490 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19491 pgprot_t prot = PAGE_KERNEL;
19492 /*
19493 * first pass will use the same initial
19494 @@ -340,7 +341,7 @@ repeat:
19495 */
19496 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19497
19498 - if (is_kernel_text(addr))
19499 + if (is_kernel_text(address, address + PAGE_SIZE))
19500 prot = PAGE_KERNEL_EXEC;
19501
19502 pages_4k++;
19503 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19504
19505 pud = pud_offset(pgd, va);
19506 pmd = pmd_offset(pud, va);
19507 - if (!pmd_present(*pmd))
19508 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19509 break;
19510
19511 pte = pte_offset_kernel(pmd, va);
19512 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19513
19514 static void __init pagetable_init(void)
19515 {
19516 - pgd_t *pgd_base = swapper_pg_dir;
19517 -
19518 - permanent_kmaps_init(pgd_base);
19519 + permanent_kmaps_init(swapper_pg_dir);
19520 }
19521
19522 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19523 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19524 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19525
19526 /* user-defined highmem size */
19527 @@ -757,6 +756,12 @@ void __init mem_init(void)
19528
19529 pci_iommu_alloc();
19530
19531 +#ifdef CONFIG_PAX_PER_CPU_PGD
19532 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19533 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19534 + KERNEL_PGD_PTRS);
19535 +#endif
19536 +
19537 #ifdef CONFIG_FLATMEM
19538 BUG_ON(!mem_map);
19539 #endif
19540 @@ -774,7 +779,7 @@ void __init mem_init(void)
19541 set_highmem_pages_init();
19542
19543 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19544 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19545 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19546 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19547
19548 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19549 @@ -815,10 +820,10 @@ void __init mem_init(void)
19550 ((unsigned long)&__init_end -
19551 (unsigned long)&__init_begin) >> 10,
19552
19553 - (unsigned long)&_etext, (unsigned long)&_edata,
19554 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19555 + (unsigned long)&_sdata, (unsigned long)&_edata,
19556 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19557
19558 - (unsigned long)&_text, (unsigned long)&_etext,
19559 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19560 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19561
19562 /*
19563 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
19564 if (!kernel_set_to_readonly)
19565 return;
19566
19567 + start = ktla_ktva(start);
19568 pr_debug("Set kernel text: %lx - %lx for read write\n",
19569 start, start+size);
19570
19571 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
19572 if (!kernel_set_to_readonly)
19573 return;
19574
19575 + start = ktla_ktva(start);
19576 pr_debug("Set kernel text: %lx - %lx for read only\n",
19577 start, start+size);
19578
19579 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
19580 unsigned long start = PFN_ALIGN(_text);
19581 unsigned long size = PFN_ALIGN(_etext) - start;
19582
19583 + start = ktla_ktva(start);
19584 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19585 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19586 size >> 10);
19587 diff -urNp linux-3.0.3/arch/x86/mm/init_64.c linux-3.0.3/arch/x86/mm/init_64.c
19588 --- linux-3.0.3/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
19589 +++ linux-3.0.3/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
19590 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
19591 * around without checking the pgd every time.
19592 */
19593
19594 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19595 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19596 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19597
19598 int force_personality32;
19599 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
19600
19601 for (address = start; address <= end; address += PGDIR_SIZE) {
19602 const pgd_t *pgd_ref = pgd_offset_k(address);
19603 +
19604 +#ifdef CONFIG_PAX_PER_CPU_PGD
19605 + unsigned long cpu;
19606 +#else
19607 struct page *page;
19608 +#endif
19609
19610 if (pgd_none(*pgd_ref))
19611 continue;
19612
19613 spin_lock(&pgd_lock);
19614 +
19615 +#ifdef CONFIG_PAX_PER_CPU_PGD
19616 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19617 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19618 +#else
19619 list_for_each_entry(page, &pgd_list, lru) {
19620 pgd_t *pgd;
19621 spinlock_t *pgt_lock;
19622 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
19623 /* the pgt_lock only for Xen */
19624 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19625 spin_lock(pgt_lock);
19626 +#endif
19627
19628 if (pgd_none(*pgd))
19629 set_pgd(pgd, *pgd_ref);
19630 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
19631 BUG_ON(pgd_page_vaddr(*pgd)
19632 != pgd_page_vaddr(*pgd_ref));
19633
19634 +#ifndef CONFIG_PAX_PER_CPU_PGD
19635 spin_unlock(pgt_lock);
19636 +#endif
19637 +
19638 }
19639 spin_unlock(&pgd_lock);
19640 }
19641 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19642 pmd = fill_pmd(pud, vaddr);
19643 pte = fill_pte(pmd, vaddr);
19644
19645 + pax_open_kernel();
19646 set_pte(pte, new_pte);
19647 + pax_close_kernel();
19648
19649 /*
19650 * It's enough to flush this one mapping.
19651 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
19652 pgd = pgd_offset_k((unsigned long)__va(phys));
19653 if (pgd_none(*pgd)) {
19654 pud = (pud_t *) spp_getpage();
19655 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19656 - _PAGE_USER));
19657 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19658 }
19659 pud = pud_offset(pgd, (unsigned long)__va(phys));
19660 if (pud_none(*pud)) {
19661 pmd = (pmd_t *) spp_getpage();
19662 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19663 - _PAGE_USER));
19664 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19665 }
19666 pmd = pmd_offset(pud, phys);
19667 BUG_ON(!pmd_none(*pmd));
19668 @@ -693,6 +707,12 @@ void __init mem_init(void)
19669
19670 pci_iommu_alloc();
19671
19672 +#ifdef CONFIG_PAX_PER_CPU_PGD
19673 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19674 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19675 + KERNEL_PGD_PTRS);
19676 +#endif
19677 +
19678 /* clear_bss() already clear the empty_zero_page */
19679
19680 reservedpages = 0;
19681 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
19682 static struct vm_area_struct gate_vma = {
19683 .vm_start = VSYSCALL_START,
19684 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19685 - .vm_page_prot = PAGE_READONLY_EXEC,
19686 - .vm_flags = VM_READ | VM_EXEC
19687 + .vm_page_prot = PAGE_READONLY,
19688 + .vm_flags = VM_READ
19689 };
19690
19691 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19692 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
19693
19694 const char *arch_vma_name(struct vm_area_struct *vma)
19695 {
19696 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19697 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19698 return "[vdso]";
19699 if (vma == &gate_vma)
19700 return "[vsyscall]";
19701 diff -urNp linux-3.0.3/arch/x86/mm/init.c linux-3.0.3/arch/x86/mm/init.c
19702 --- linux-3.0.3/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
19703 +++ linux-3.0.3/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
19704 @@ -31,7 +31,7 @@ int direct_gbpages
19705 static void __init find_early_table_space(unsigned long end, int use_pse,
19706 int use_gbpages)
19707 {
19708 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19709 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19710 phys_addr_t base;
19711
19712 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19713 @@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
19714 */
19715 int devmem_is_allowed(unsigned long pagenr)
19716 {
19717 - if (pagenr <= 256)
19718 +#ifdef CONFIG_GRKERNSEC_KMEM
19719 + /* allow BDA */
19720 + if (!pagenr)
19721 + return 1;
19722 + /* allow EBDA */
19723 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19724 + return 1;
19725 +#else
19726 + if (!pagenr)
19727 + return 1;
19728 +#ifdef CONFIG_VM86
19729 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19730 + return 1;
19731 +#endif
19732 +#endif
19733 +
19734 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19735 return 1;
19736 +#ifdef CONFIG_GRKERNSEC_KMEM
19737 + /* throw out everything else below 1MB */
19738 + if (pagenr <= 256)
19739 + return 0;
19740 +#endif
19741 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19742 return 0;
19743 if (!page_is_ram(pagenr))
19744 return 1;
19745 +
19746 return 0;
19747 }
19748
19749 @@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
19750
19751 void free_initmem(void)
19752 {
19753 +
19754 +#ifdef CONFIG_PAX_KERNEXEC
19755 +#ifdef CONFIG_X86_32
19756 + /* PaX: limit KERNEL_CS to actual size */
19757 + unsigned long addr, limit;
19758 + struct desc_struct d;
19759 + int cpu;
19760 +
19761 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19762 + limit = (limit - 1UL) >> PAGE_SHIFT;
19763 +
19764 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19765 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19766 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19767 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19768 + }
19769 +
19770 + /* PaX: make KERNEL_CS read-only */
19771 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19772 + if (!paravirt_enabled())
19773 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19774 +/*
19775 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19776 + pgd = pgd_offset_k(addr);
19777 + pud = pud_offset(pgd, addr);
19778 + pmd = pmd_offset(pud, addr);
19779 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19780 + }
19781 +*/
19782 +#ifdef CONFIG_X86_PAE
19783 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19784 +/*
19785 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19786 + pgd = pgd_offset_k(addr);
19787 + pud = pud_offset(pgd, addr);
19788 + pmd = pmd_offset(pud, addr);
19789 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19790 + }
19791 +*/
19792 +#endif
19793 +
19794 +#ifdef CONFIG_MODULES
19795 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19796 +#endif
19797 +
19798 +#else
19799 + pgd_t *pgd;
19800 + pud_t *pud;
19801 + pmd_t *pmd;
19802 + unsigned long addr, end;
19803 +
19804 + /* PaX: make kernel code/rodata read-only, rest non-executable */
19805 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19806 + pgd = pgd_offset_k(addr);
19807 + pud = pud_offset(pgd, addr);
19808 + pmd = pmd_offset(pud, addr);
19809 + if (!pmd_present(*pmd))
19810 + continue;
19811 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19812 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19813 + else
19814 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19815 + }
19816 +
19817 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19818 + end = addr + KERNEL_IMAGE_SIZE;
19819 + for (; addr < end; addr += PMD_SIZE) {
19820 + pgd = pgd_offset_k(addr);
19821 + pud = pud_offset(pgd, addr);
19822 + pmd = pmd_offset(pud, addr);
19823 + if (!pmd_present(*pmd))
19824 + continue;
19825 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19826 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19827 + }
19828 +#endif
19829 +
19830 + flush_tlb_all();
19831 +#endif
19832 +
19833 free_init_pages("unused kernel memory",
19834 (unsigned long)(&__init_begin),
19835 (unsigned long)(&__init_end));
19836 diff -urNp linux-3.0.3/arch/x86/mm/iomap_32.c linux-3.0.3/arch/x86/mm/iomap_32.c
19837 --- linux-3.0.3/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
19838 +++ linux-3.0.3/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
19839 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19840 type = kmap_atomic_idx_push();
19841 idx = type + KM_TYPE_NR * smp_processor_id();
19842 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19843 +
19844 + pax_open_kernel();
19845 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19846 + pax_close_kernel();
19847 +
19848 arch_flush_lazy_mmu_mode();
19849
19850 return (void *)vaddr;
19851 diff -urNp linux-3.0.3/arch/x86/mm/ioremap.c linux-3.0.3/arch/x86/mm/ioremap.c
19852 --- linux-3.0.3/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
19853 +++ linux-3.0.3/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
19854 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
19855 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19856 int is_ram = page_is_ram(pfn);
19857
19858 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19859 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19860 return NULL;
19861 WARN_ON_ONCE(is_ram);
19862 }
19863 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19864 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19865
19866 static __initdata int after_paging_init;
19867 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19868 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19869
19870 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
19871 {
19872 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
19873 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
19874
19875 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
19876 - memset(bm_pte, 0, sizeof(bm_pte));
19877 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
19878 + pmd_populate_user(&init_mm, pmd, bm_pte);
19879
19880 /*
19881 * The boot-ioremap range spans multiple pmds, for which
19882 diff -urNp linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c
19883 --- linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
19884 +++ linux-3.0.3/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
19885 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
19886 * memory (e.g. tracked pages)? For now, we need this to avoid
19887 * invoking kmemcheck for PnP BIOS calls.
19888 */
19889 - if (regs->flags & X86_VM_MASK)
19890 + if (v8086_mode(regs))
19891 return false;
19892 - if (regs->cs != __KERNEL_CS)
19893 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
19894 return false;
19895
19896 pte = kmemcheck_pte_lookup(address);
19897 diff -urNp linux-3.0.3/arch/x86/mm/mmap.c linux-3.0.3/arch/x86/mm/mmap.c
19898 --- linux-3.0.3/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
19899 +++ linux-3.0.3/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
19900 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
19901 * Leave an at least ~128 MB hole with possible stack randomization.
19902 */
19903 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
19904 -#define MAX_GAP (TASK_SIZE/6*5)
19905 +#define MAX_GAP (pax_task_size/6*5)
19906
19907 /*
19908 * True on X86_32 or when emulating IA32 on X86_64
19909 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
19910 return rnd << PAGE_SHIFT;
19911 }
19912
19913 -static unsigned long mmap_base(void)
19914 +static unsigned long mmap_base(struct mm_struct *mm)
19915 {
19916 unsigned long gap = rlimit(RLIMIT_STACK);
19917 + unsigned long pax_task_size = TASK_SIZE;
19918 +
19919 +#ifdef CONFIG_PAX_SEGMEXEC
19920 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19921 + pax_task_size = SEGMEXEC_TASK_SIZE;
19922 +#endif
19923
19924 if (gap < MIN_GAP)
19925 gap = MIN_GAP;
19926 else if (gap > MAX_GAP)
19927 gap = MAX_GAP;
19928
19929 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
19930 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
19931 }
19932
19933 /*
19934 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
19935 * does, but not when emulating X86_32
19936 */
19937 -static unsigned long mmap_legacy_base(void)
19938 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
19939 {
19940 - if (mmap_is_ia32())
19941 + if (mmap_is_ia32()) {
19942 +
19943 +#ifdef CONFIG_PAX_SEGMEXEC
19944 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19945 + return SEGMEXEC_TASK_UNMAPPED_BASE;
19946 + else
19947 +#endif
19948 +
19949 return TASK_UNMAPPED_BASE;
19950 - else
19951 + } else
19952 return TASK_UNMAPPED_BASE + mmap_rnd();
19953 }
19954
19955 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
19956 void arch_pick_mmap_layout(struct mm_struct *mm)
19957 {
19958 if (mmap_is_legacy()) {
19959 - mm->mmap_base = mmap_legacy_base();
19960 + mm->mmap_base = mmap_legacy_base(mm);
19961 +
19962 +#ifdef CONFIG_PAX_RANDMMAP
19963 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19964 + mm->mmap_base += mm->delta_mmap;
19965 +#endif
19966 +
19967 mm->get_unmapped_area = arch_get_unmapped_area;
19968 mm->unmap_area = arch_unmap_area;
19969 } else {
19970 - mm->mmap_base = mmap_base();
19971 + mm->mmap_base = mmap_base(mm);
19972 +
19973 +#ifdef CONFIG_PAX_RANDMMAP
19974 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19975 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19976 +#endif
19977 +
19978 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19979 mm->unmap_area = arch_unmap_area_topdown;
19980 }
19981 diff -urNp linux-3.0.3/arch/x86/mm/mmio-mod.c linux-3.0.3/arch/x86/mm/mmio-mod.c
19982 --- linux-3.0.3/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
19983 +++ linux-3.0.3/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
19984 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
19985 break;
19986 default:
19987 {
19988 - unsigned char *ip = (unsigned char *)instptr;
19989 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
19990 my_trace->opcode = MMIO_UNKNOWN_OP;
19991 my_trace->width = 0;
19992 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
19993 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
19994 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
19995 void __iomem *addr)
19996 {
19997 - static atomic_t next_id;
19998 + static atomic_unchecked_t next_id;
19999 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20000 /* These are page-unaligned. */
20001 struct mmiotrace_map map = {
20002 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20003 .private = trace
20004 },
20005 .phys = offset,
20006 - .id = atomic_inc_return(&next_id)
20007 + .id = atomic_inc_return_unchecked(&next_id)
20008 };
20009 map.map_id = trace->id;
20010
20011 diff -urNp linux-3.0.3/arch/x86/mm/pageattr.c linux-3.0.3/arch/x86/mm/pageattr.c
20012 --- linux-3.0.3/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20013 +++ linux-3.0.3/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20014 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20015 */
20016 #ifdef CONFIG_PCI_BIOS
20017 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20018 - pgprot_val(forbidden) |= _PAGE_NX;
20019 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20020 #endif
20021
20022 /*
20023 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20024 * Does not cover __inittext since that is gone later on. On
20025 * 64bit we do not enforce !NX on the low mapping
20026 */
20027 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20028 - pgprot_val(forbidden) |= _PAGE_NX;
20029 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20030 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20031
20032 +#ifdef CONFIG_DEBUG_RODATA
20033 /*
20034 * The .rodata section needs to be read-only. Using the pfn
20035 * catches all aliases.
20036 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20037 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20038 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20039 pgprot_val(forbidden) |= _PAGE_RW;
20040 +#endif
20041
20042 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20043 /*
20044 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20045 }
20046 #endif
20047
20048 +#ifdef CONFIG_PAX_KERNEXEC
20049 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20050 + pgprot_val(forbidden) |= _PAGE_RW;
20051 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20052 + }
20053 +#endif
20054 +
20055 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20056
20057 return prot;
20058 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20059 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20060 {
20061 /* change init_mm */
20062 + pax_open_kernel();
20063 set_pte_atomic(kpte, pte);
20064 +
20065 #ifdef CONFIG_X86_32
20066 if (!SHARED_KERNEL_PMD) {
20067 +
20068 +#ifdef CONFIG_PAX_PER_CPU_PGD
20069 + unsigned long cpu;
20070 +#else
20071 struct page *page;
20072 +#endif
20073
20074 +#ifdef CONFIG_PAX_PER_CPU_PGD
20075 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20076 + pgd_t *pgd = get_cpu_pgd(cpu);
20077 +#else
20078 list_for_each_entry(page, &pgd_list, lru) {
20079 - pgd_t *pgd;
20080 + pgd_t *pgd = (pgd_t *)page_address(page);
20081 +#endif
20082 +
20083 pud_t *pud;
20084 pmd_t *pmd;
20085
20086 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20087 + pgd += pgd_index(address);
20088 pud = pud_offset(pgd, address);
20089 pmd = pmd_offset(pud, address);
20090 set_pte_atomic((pte_t *)pmd, pte);
20091 }
20092 }
20093 #endif
20094 + pax_close_kernel();
20095 }
20096
20097 static int
20098 diff -urNp linux-3.0.3/arch/x86/mm/pageattr-test.c linux-3.0.3/arch/x86/mm/pageattr-test.c
20099 --- linux-3.0.3/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20100 +++ linux-3.0.3/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20101 @@ -36,7 +36,7 @@ enum {
20102
20103 static int pte_testbit(pte_t pte)
20104 {
20105 - return pte_flags(pte) & _PAGE_UNUSED1;
20106 + return pte_flags(pte) & _PAGE_CPA_TEST;
20107 }
20108
20109 struct split_state {
20110 diff -urNp linux-3.0.3/arch/x86/mm/pat.c linux-3.0.3/arch/x86/mm/pat.c
20111 --- linux-3.0.3/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20112 +++ linux-3.0.3/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20113 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20114
20115 if (!entry) {
20116 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20117 - current->comm, current->pid, start, end);
20118 + current->comm, task_pid_nr(current), start, end);
20119 return -EINVAL;
20120 }
20121
20122 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20123 while (cursor < to) {
20124 if (!devmem_is_allowed(pfn)) {
20125 printk(KERN_INFO
20126 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20127 - current->comm, from, to);
20128 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20129 + current->comm, from, to, cursor);
20130 return 0;
20131 }
20132 cursor += PAGE_SIZE;
20133 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20134 printk(KERN_INFO
20135 "%s:%d ioremap_change_attr failed %s "
20136 "for %Lx-%Lx\n",
20137 - current->comm, current->pid,
20138 + current->comm, task_pid_nr(current),
20139 cattr_name(flags),
20140 base, (unsigned long long)(base + size));
20141 return -EINVAL;
20142 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20143 if (want_flags != flags) {
20144 printk(KERN_WARNING
20145 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20146 - current->comm, current->pid,
20147 + current->comm, task_pid_nr(current),
20148 cattr_name(want_flags),
20149 (unsigned long long)paddr,
20150 (unsigned long long)(paddr + size),
20151 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20152 free_memtype(paddr, paddr + size);
20153 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20154 " for %Lx-%Lx, got %s\n",
20155 - current->comm, current->pid,
20156 + current->comm, task_pid_nr(current),
20157 cattr_name(want_flags),
20158 (unsigned long long)paddr,
20159 (unsigned long long)(paddr + size),
20160 diff -urNp linux-3.0.3/arch/x86/mm/pf_in.c linux-3.0.3/arch/x86/mm/pf_in.c
20161 --- linux-3.0.3/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20162 +++ linux-3.0.3/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20163 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20164 int i;
20165 enum reason_type rv = OTHERS;
20166
20167 - p = (unsigned char *)ins_addr;
20168 + p = (unsigned char *)ktla_ktva(ins_addr);
20169 p += skip_prefix(p, &prf);
20170 p += get_opcode(p, &opcode);
20171
20172 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20173 struct prefix_bits prf;
20174 int i;
20175
20176 - p = (unsigned char *)ins_addr;
20177 + p = (unsigned char *)ktla_ktva(ins_addr);
20178 p += skip_prefix(p, &prf);
20179 p += get_opcode(p, &opcode);
20180
20181 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20182 struct prefix_bits prf;
20183 int i;
20184
20185 - p = (unsigned char *)ins_addr;
20186 + p = (unsigned char *)ktla_ktva(ins_addr);
20187 p += skip_prefix(p, &prf);
20188 p += get_opcode(p, &opcode);
20189
20190 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20191 struct prefix_bits prf;
20192 int i;
20193
20194 - p = (unsigned char *)ins_addr;
20195 + p = (unsigned char *)ktla_ktva(ins_addr);
20196 p += skip_prefix(p, &prf);
20197 p += get_opcode(p, &opcode);
20198 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20199 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20200 struct prefix_bits prf;
20201 int i;
20202
20203 - p = (unsigned char *)ins_addr;
20204 + p = (unsigned char *)ktla_ktva(ins_addr);
20205 p += skip_prefix(p, &prf);
20206 p += get_opcode(p, &opcode);
20207 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20208 diff -urNp linux-3.0.3/arch/x86/mm/pgtable_32.c linux-3.0.3/arch/x86/mm/pgtable_32.c
20209 --- linux-3.0.3/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20210 +++ linux-3.0.3/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20211 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20212 return;
20213 }
20214 pte = pte_offset_kernel(pmd, vaddr);
20215 +
20216 + pax_open_kernel();
20217 if (pte_val(pteval))
20218 set_pte_at(&init_mm, vaddr, pte, pteval);
20219 else
20220 pte_clear(&init_mm, vaddr, pte);
20221 + pax_close_kernel();
20222
20223 /*
20224 * It's enough to flush this one mapping.
20225 diff -urNp linux-3.0.3/arch/x86/mm/pgtable.c linux-3.0.3/arch/x86/mm/pgtable.c
20226 --- linux-3.0.3/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20227 +++ linux-3.0.3/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20228 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20229 list_del(&page->lru);
20230 }
20231
20232 -#define UNSHARED_PTRS_PER_PGD \
20233 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20234 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20235 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20236
20237 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20238 +{
20239 + while (count--)
20240 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20241 +}
20242 +#endif
20243 +
20244 +#ifdef CONFIG_PAX_PER_CPU_PGD
20245 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20246 +{
20247 + while (count--)
20248 +
20249 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20250 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20251 +#else
20252 + *dst++ = *src++;
20253 +#endif
20254
20255 +}
20256 +#endif
20257 +
20258 +#ifdef CONFIG_X86_64
20259 +#define pxd_t pud_t
20260 +#define pyd_t pgd_t
20261 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20262 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20263 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20264 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20265 +#define PYD_SIZE PGDIR_SIZE
20266 +#else
20267 +#define pxd_t pmd_t
20268 +#define pyd_t pud_t
20269 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20270 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20271 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20272 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20273 +#define PYD_SIZE PUD_SIZE
20274 +#endif
20275 +
20276 +#ifdef CONFIG_PAX_PER_CPU_PGD
20277 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20278 +static inline void pgd_dtor(pgd_t *pgd) {}
20279 +#else
20280 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20281 {
20282 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20283 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20284 pgd_list_del(pgd);
20285 spin_unlock(&pgd_lock);
20286 }
20287 +#endif
20288
20289 /*
20290 * List of all pgd's needed for non-PAE so it can invalidate entries
20291 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20292 * -- wli
20293 */
20294
20295 -#ifdef CONFIG_X86_PAE
20296 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20297 /*
20298 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20299 * updating the top-level pagetable entries to guarantee the
20300 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20301 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20302 * and initialize the kernel pmds here.
20303 */
20304 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20305 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20306
20307 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20308 {
20309 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20310 */
20311 flush_tlb_mm(mm);
20312 }
20313 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20314 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20315 #else /* !CONFIG_X86_PAE */
20316
20317 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20318 -#define PREALLOCATED_PMDS 0
20319 +#define PREALLOCATED_PXDS 0
20320
20321 #endif /* CONFIG_X86_PAE */
20322
20323 -static void free_pmds(pmd_t *pmds[])
20324 +static void free_pxds(pxd_t *pxds[])
20325 {
20326 int i;
20327
20328 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20329 - if (pmds[i])
20330 - free_page((unsigned long)pmds[i]);
20331 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20332 + if (pxds[i])
20333 + free_page((unsigned long)pxds[i]);
20334 }
20335
20336 -static int preallocate_pmds(pmd_t *pmds[])
20337 +static int preallocate_pxds(pxd_t *pxds[])
20338 {
20339 int i;
20340 bool failed = false;
20341
20342 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20343 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20344 - if (pmd == NULL)
20345 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20346 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20347 + if (pxd == NULL)
20348 failed = true;
20349 - pmds[i] = pmd;
20350 + pxds[i] = pxd;
20351 }
20352
20353 if (failed) {
20354 - free_pmds(pmds);
20355 + free_pxds(pxds);
20356 return -ENOMEM;
20357 }
20358
20359 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20360 * preallocate which never got a corresponding vma will need to be
20361 * freed manually.
20362 */
20363 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20364 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20365 {
20366 int i;
20367
20368 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20369 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20370 pgd_t pgd = pgdp[i];
20371
20372 if (pgd_val(pgd) != 0) {
20373 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20374 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20375
20376 - pgdp[i] = native_make_pgd(0);
20377 + set_pgd(pgdp + i, native_make_pgd(0));
20378
20379 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20380 - pmd_free(mm, pmd);
20381 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20382 + pxd_free(mm, pxd);
20383 }
20384 }
20385 }
20386
20387 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20388 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20389 {
20390 - pud_t *pud;
20391 + pyd_t *pyd;
20392 unsigned long addr;
20393 int i;
20394
20395 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20396 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20397 return;
20398
20399 - pud = pud_offset(pgd, 0);
20400 +#ifdef CONFIG_X86_64
20401 + pyd = pyd_offset(mm, 0L);
20402 +#else
20403 + pyd = pyd_offset(pgd, 0L);
20404 +#endif
20405
20406 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20407 - i++, pud++, addr += PUD_SIZE) {
20408 - pmd_t *pmd = pmds[i];
20409 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20410 + i++, pyd++, addr += PYD_SIZE) {
20411 + pxd_t *pxd = pxds[i];
20412
20413 if (i >= KERNEL_PGD_BOUNDARY)
20414 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20415 - sizeof(pmd_t) * PTRS_PER_PMD);
20416 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20417 + sizeof(pxd_t) * PTRS_PER_PMD);
20418
20419 - pud_populate(mm, pud, pmd);
20420 + pyd_populate(mm, pyd, pxd);
20421 }
20422 }
20423
20424 pgd_t *pgd_alloc(struct mm_struct *mm)
20425 {
20426 pgd_t *pgd;
20427 - pmd_t *pmds[PREALLOCATED_PMDS];
20428 + pxd_t *pxds[PREALLOCATED_PXDS];
20429
20430 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20431
20432 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20433
20434 mm->pgd = pgd;
20435
20436 - if (preallocate_pmds(pmds) != 0)
20437 + if (preallocate_pxds(pxds) != 0)
20438 goto out_free_pgd;
20439
20440 if (paravirt_pgd_alloc(mm) != 0)
20441 - goto out_free_pmds;
20442 + goto out_free_pxds;
20443
20444 /*
20445 * Make sure that pre-populating the pmds is atomic with
20446 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20447 spin_lock(&pgd_lock);
20448
20449 pgd_ctor(mm, pgd);
20450 - pgd_prepopulate_pmd(mm, pgd, pmds);
20451 + pgd_prepopulate_pxd(mm, pgd, pxds);
20452
20453 spin_unlock(&pgd_lock);
20454
20455 return pgd;
20456
20457 -out_free_pmds:
20458 - free_pmds(pmds);
20459 +out_free_pxds:
20460 + free_pxds(pxds);
20461 out_free_pgd:
20462 free_page((unsigned long)pgd);
20463 out:
20464 @@ -295,7 +344,7 @@ out:
20465
20466 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20467 {
20468 - pgd_mop_up_pmds(mm, pgd);
20469 + pgd_mop_up_pxds(mm, pgd);
20470 pgd_dtor(pgd);
20471 paravirt_pgd_free(mm, pgd);
20472 free_page((unsigned long)pgd);
20473 diff -urNp linux-3.0.3/arch/x86/mm/setup_nx.c linux-3.0.3/arch/x86/mm/setup_nx.c
20474 --- linux-3.0.3/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20475 +++ linux-3.0.3/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20476 @@ -5,8 +5,10 @@
20477 #include <asm/pgtable.h>
20478 #include <asm/proto.h>
20479
20480 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20481 static int disable_nx __cpuinitdata;
20482
20483 +#ifndef CONFIG_PAX_PAGEEXEC
20484 /*
20485 * noexec = on|off
20486 *
20487 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20488 return 0;
20489 }
20490 early_param("noexec", noexec_setup);
20491 +#endif
20492 +
20493 +#endif
20494
20495 void __cpuinit x86_configure_nx(void)
20496 {
20497 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20498 if (cpu_has_nx && !disable_nx)
20499 __supported_pte_mask |= _PAGE_NX;
20500 else
20501 +#endif
20502 __supported_pte_mask &= ~_PAGE_NX;
20503 }
20504
20505 diff -urNp linux-3.0.3/arch/x86/mm/tlb.c linux-3.0.3/arch/x86/mm/tlb.c
20506 --- linux-3.0.3/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20507 +++ linux-3.0.3/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20508 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20509 BUG();
20510 cpumask_clear_cpu(cpu,
20511 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20512 +
20513 +#ifndef CONFIG_PAX_PER_CPU_PGD
20514 load_cr3(swapper_pg_dir);
20515 +#endif
20516 +
20517 }
20518 EXPORT_SYMBOL_GPL(leave_mm);
20519
20520 diff -urNp linux-3.0.3/arch/x86/net/bpf_jit_comp.c linux-3.0.3/arch/x86/net/bpf_jit_comp.c
20521 --- linux-3.0.3/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20522 +++ linux-3.0.3/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20523 @@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20524 module_free(NULL, image);
20525 return;
20526 }
20527 + pax_open_kernel();
20528 memcpy(image + proglen, temp, ilen);
20529 + pax_close_kernel();
20530 }
20531 proglen += ilen;
20532 addrs[i] = proglen;
20533 @@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
20534 break;
20535 }
20536 if (proglen == oldproglen) {
20537 - image = module_alloc(max_t(unsigned int,
20538 + image = module_alloc_exec(max_t(unsigned int,
20539 proglen,
20540 sizeof(struct work_struct)));
20541 if (!image)
20542 diff -urNp linux-3.0.3/arch/x86/oprofile/backtrace.c linux-3.0.3/arch/x86/oprofile/backtrace.c
20543 --- linux-3.0.3/arch/x86/oprofile/backtrace.c 2011-08-23 21:44:40.000000000 -0400
20544 +++ linux-3.0.3/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
20545 @@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
20546 {
20547 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20548
20549 - if (!user_mode_vm(regs)) {
20550 + if (!user_mode(regs)) {
20551 unsigned long stack = kernel_stack_pointer(regs);
20552 if (depth)
20553 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20554 diff -urNp linux-3.0.3/arch/x86/pci/mrst.c linux-3.0.3/arch/x86/pci/mrst.c
20555 --- linux-3.0.3/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
20556 +++ linux-3.0.3/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
20557 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20558 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20559 pci_mmcfg_late_init();
20560 pcibios_enable_irq = mrst_pci_irq_enable;
20561 - pci_root_ops = pci_mrst_ops;
20562 + pax_open_kernel();
20563 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20564 + pax_close_kernel();
20565 /* Continue with standard init */
20566 return 1;
20567 }
20568 diff -urNp linux-3.0.3/arch/x86/pci/pcbios.c linux-3.0.3/arch/x86/pci/pcbios.c
20569 --- linux-3.0.3/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
20570 +++ linux-3.0.3/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
20571 @@ -79,50 +79,93 @@ union bios32 {
20572 static struct {
20573 unsigned long address;
20574 unsigned short segment;
20575 -} bios32_indirect = { 0, __KERNEL_CS };
20576 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20577
20578 /*
20579 * Returns the entry point for the given service, NULL on error
20580 */
20581
20582 -static unsigned long bios32_service(unsigned long service)
20583 +static unsigned long __devinit bios32_service(unsigned long service)
20584 {
20585 unsigned char return_code; /* %al */
20586 unsigned long address; /* %ebx */
20587 unsigned long length; /* %ecx */
20588 unsigned long entry; /* %edx */
20589 unsigned long flags;
20590 + struct desc_struct d, *gdt;
20591
20592 local_irq_save(flags);
20593 - __asm__("lcall *(%%edi); cld"
20594 +
20595 + gdt = get_cpu_gdt_table(smp_processor_id());
20596 +
20597 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20598 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20599 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20600 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20601 +
20602 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20603 : "=a" (return_code),
20604 "=b" (address),
20605 "=c" (length),
20606 "=d" (entry)
20607 : "0" (service),
20608 "1" (0),
20609 - "D" (&bios32_indirect));
20610 + "D" (&bios32_indirect),
20611 + "r"(__PCIBIOS_DS)
20612 + : "memory");
20613 +
20614 + pax_open_kernel();
20615 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20616 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20617 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20618 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20619 + pax_close_kernel();
20620 +
20621 local_irq_restore(flags);
20622
20623 switch (return_code) {
20624 - case 0:
20625 - return address + entry;
20626 - case 0x80: /* Not present */
20627 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20628 - return 0;
20629 - default: /* Shouldn't happen */
20630 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20631 - service, return_code);
20632 + case 0: {
20633 + int cpu;
20634 + unsigned char flags;
20635 +
20636 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20637 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20638 + printk(KERN_WARNING "bios32_service: not valid\n");
20639 return 0;
20640 + }
20641 + address = address + PAGE_OFFSET;
20642 + length += 16UL; /* some BIOSs underreport this... */
20643 + flags = 4;
20644 + if (length >= 64*1024*1024) {
20645 + length >>= PAGE_SHIFT;
20646 + flags |= 8;
20647 + }
20648 +
20649 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20650 + gdt = get_cpu_gdt_table(cpu);
20651 + pack_descriptor(&d, address, length, 0x9b, flags);
20652 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20653 + pack_descriptor(&d, address, length, 0x93, flags);
20654 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20655 + }
20656 + return entry;
20657 + }
20658 + case 0x80: /* Not present */
20659 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20660 + return 0;
20661 + default: /* Shouldn't happen */
20662 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20663 + service, return_code);
20664 + return 0;
20665 }
20666 }
20667
20668 static struct {
20669 unsigned long address;
20670 unsigned short segment;
20671 -} pci_indirect = { 0, __KERNEL_CS };
20672 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20673
20674 -static int pci_bios_present;
20675 +static int pci_bios_present __read_only;
20676
20677 static int __devinit check_pcibios(void)
20678 {
20679 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20680 unsigned long flags, pcibios_entry;
20681
20682 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20683 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20684 + pci_indirect.address = pcibios_entry;
20685
20686 local_irq_save(flags);
20687 - __asm__(
20688 - "lcall *(%%edi); cld\n\t"
20689 + __asm__("movw %w6, %%ds\n\t"
20690 + "lcall *%%ss:(%%edi); cld\n\t"
20691 + "push %%ss\n\t"
20692 + "pop %%ds\n\t"
20693 "jc 1f\n\t"
20694 "xor %%ah, %%ah\n"
20695 "1:"
20696 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20697 "=b" (ebx),
20698 "=c" (ecx)
20699 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20700 - "D" (&pci_indirect)
20701 + "D" (&pci_indirect),
20702 + "r" (__PCIBIOS_DS)
20703 : "memory");
20704 local_irq_restore(flags);
20705
20706 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20707
20708 switch (len) {
20709 case 1:
20710 - __asm__("lcall *(%%esi); cld\n\t"
20711 + __asm__("movw %w6, %%ds\n\t"
20712 + "lcall *%%ss:(%%esi); cld\n\t"
20713 + "push %%ss\n\t"
20714 + "pop %%ds\n\t"
20715 "jc 1f\n\t"
20716 "xor %%ah, %%ah\n"
20717 "1:"
20718 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20719 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20720 "b" (bx),
20721 "D" ((long)reg),
20722 - "S" (&pci_indirect));
20723 + "S" (&pci_indirect),
20724 + "r" (__PCIBIOS_DS));
20725 /*
20726 * Zero-extend the result beyond 8 bits, do not trust the
20727 * BIOS having done it:
20728 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20729 *value &= 0xff;
20730 break;
20731 case 2:
20732 - __asm__("lcall *(%%esi); cld\n\t"
20733 + __asm__("movw %w6, %%ds\n\t"
20734 + "lcall *%%ss:(%%esi); cld\n\t"
20735 + "push %%ss\n\t"
20736 + "pop %%ds\n\t"
20737 "jc 1f\n\t"
20738 "xor %%ah, %%ah\n"
20739 "1:"
20740 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20741 : "1" (PCIBIOS_READ_CONFIG_WORD),
20742 "b" (bx),
20743 "D" ((long)reg),
20744 - "S" (&pci_indirect));
20745 + "S" (&pci_indirect),
20746 + "r" (__PCIBIOS_DS));
20747 /*
20748 * Zero-extend the result beyond 16 bits, do not trust the
20749 * BIOS having done it:
20750 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20751 *value &= 0xffff;
20752 break;
20753 case 4:
20754 - __asm__("lcall *(%%esi); cld\n\t"
20755 + __asm__("movw %w6, %%ds\n\t"
20756 + "lcall *%%ss:(%%esi); cld\n\t"
20757 + "push %%ss\n\t"
20758 + "pop %%ds\n\t"
20759 "jc 1f\n\t"
20760 "xor %%ah, %%ah\n"
20761 "1:"
20762 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20763 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20764 "b" (bx),
20765 "D" ((long)reg),
20766 - "S" (&pci_indirect));
20767 + "S" (&pci_indirect),
20768 + "r" (__PCIBIOS_DS));
20769 break;
20770 }
20771
20772 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20773
20774 switch (len) {
20775 case 1:
20776 - __asm__("lcall *(%%esi); cld\n\t"
20777 + __asm__("movw %w6, %%ds\n\t"
20778 + "lcall *%%ss:(%%esi); cld\n\t"
20779 + "push %%ss\n\t"
20780 + "pop %%ds\n\t"
20781 "jc 1f\n\t"
20782 "xor %%ah, %%ah\n"
20783 "1:"
20784 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20785 "c" (value),
20786 "b" (bx),
20787 "D" ((long)reg),
20788 - "S" (&pci_indirect));
20789 + "S" (&pci_indirect),
20790 + "r" (__PCIBIOS_DS));
20791 break;
20792 case 2:
20793 - __asm__("lcall *(%%esi); cld\n\t"
20794 + __asm__("movw %w6, %%ds\n\t"
20795 + "lcall *%%ss:(%%esi); cld\n\t"
20796 + "push %%ss\n\t"
20797 + "pop %%ds\n\t"
20798 "jc 1f\n\t"
20799 "xor %%ah, %%ah\n"
20800 "1:"
20801 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20802 "c" (value),
20803 "b" (bx),
20804 "D" ((long)reg),
20805 - "S" (&pci_indirect));
20806 + "S" (&pci_indirect),
20807 + "r" (__PCIBIOS_DS));
20808 break;
20809 case 4:
20810 - __asm__("lcall *(%%esi); cld\n\t"
20811 + __asm__("movw %w6, %%ds\n\t"
20812 + "lcall *%%ss:(%%esi); cld\n\t"
20813 + "push %%ss\n\t"
20814 + "pop %%ds\n\t"
20815 "jc 1f\n\t"
20816 "xor %%ah, %%ah\n"
20817 "1:"
20818 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20819 "c" (value),
20820 "b" (bx),
20821 "D" ((long)reg),
20822 - "S" (&pci_indirect));
20823 + "S" (&pci_indirect),
20824 + "r" (__PCIBIOS_DS));
20825 break;
20826 }
20827
20828 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20829
20830 DBG("PCI: Fetching IRQ routing table... ");
20831 __asm__("push %%es\n\t"
20832 + "movw %w8, %%ds\n\t"
20833 "push %%ds\n\t"
20834 "pop %%es\n\t"
20835 - "lcall *(%%esi); cld\n\t"
20836 + "lcall *%%ss:(%%esi); cld\n\t"
20837 "pop %%es\n\t"
20838 + "push %%ss\n\t"
20839 + "pop %%ds\n"
20840 "jc 1f\n\t"
20841 "xor %%ah, %%ah\n"
20842 "1:"
20843 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20844 "1" (0),
20845 "D" ((long) &opt),
20846 "S" (&pci_indirect),
20847 - "m" (opt)
20848 + "m" (opt),
20849 + "r" (__PCIBIOS_DS)
20850 : "memory");
20851 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20852 if (ret & 0xff00)
20853 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20854 {
20855 int ret;
20856
20857 - __asm__("lcall *(%%esi); cld\n\t"
20858 + __asm__("movw %w5, %%ds\n\t"
20859 + "lcall *%%ss:(%%esi); cld\n\t"
20860 + "push %%ss\n\t"
20861 + "pop %%ds\n"
20862 "jc 1f\n\t"
20863 "xor %%ah, %%ah\n"
20864 "1:"
20865 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20866 : "0" (PCIBIOS_SET_PCI_HW_INT),
20867 "b" ((dev->bus->number << 8) | dev->devfn),
20868 "c" ((irq << 8) | (pin + 10)),
20869 - "S" (&pci_indirect));
20870 + "S" (&pci_indirect),
20871 + "r" (__PCIBIOS_DS));
20872 return !(ret & 0xff00);
20873 }
20874 EXPORT_SYMBOL(pcibios_set_irq_routing);
20875 diff -urNp linux-3.0.3/arch/x86/platform/efi/efi_32.c linux-3.0.3/arch/x86/platform/efi/efi_32.c
20876 --- linux-3.0.3/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
20877 +++ linux-3.0.3/arch/x86/platform/efi/efi_32.c 2011-08-23 21:47:55.000000000 -0400
20878 @@ -38,70 +38,37 @@
20879 */
20880
20881 static unsigned long efi_rt_eflags;
20882 -static pgd_t efi_bak_pg_dir_pointer[2];
20883 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
20884
20885 -void efi_call_phys_prelog(void)
20886 +void __init efi_call_phys_prelog(void)
20887 {
20888 - unsigned long cr4;
20889 - unsigned long temp;
20890 struct desc_ptr gdt_descr;
20891
20892 local_irq_save(efi_rt_eflags);
20893
20894 - /*
20895 - * If I don't have PAE, I should just duplicate two entries in page
20896 - * directory. If I have PAE, I just need to duplicate one entry in
20897 - * page directory.
20898 - */
20899 - cr4 = read_cr4_safe();
20900 -
20901 - if (cr4 & X86_CR4_PAE) {
20902 - efi_bak_pg_dir_pointer[0].pgd =
20903 - swapper_pg_dir[pgd_index(0)].pgd;
20904 - swapper_pg_dir[0].pgd =
20905 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20906 - } else {
20907 - efi_bak_pg_dir_pointer[0].pgd =
20908 - swapper_pg_dir[pgd_index(0)].pgd;
20909 - efi_bak_pg_dir_pointer[1].pgd =
20910 - swapper_pg_dir[pgd_index(0x400000)].pgd;
20911 - swapper_pg_dir[pgd_index(0)].pgd =
20912 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
20913 - temp = PAGE_OFFSET + 0x400000;
20914 - swapper_pg_dir[pgd_index(0x400000)].pgd =
20915 - swapper_pg_dir[pgd_index(temp)].pgd;
20916 - }
20917 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
20918 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20919 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20920
20921 /*
20922 * After the lock is released, the original page table is restored.
20923 */
20924 __flush_tlb_all();
20925
20926 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
20927 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
20928 gdt_descr.size = GDT_SIZE - 1;
20929 load_gdt(&gdt_descr);
20930 }
20931
20932 -void efi_call_phys_epilog(void)
20933 +void __init efi_call_phys_epilog(void)
20934 {
20935 - unsigned long cr4;
20936 struct desc_ptr gdt_descr;
20937
20938 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
20939 + gdt_descr.address = get_cpu_gdt_table(0);
20940 gdt_descr.size = GDT_SIZE - 1;
20941 load_gdt(&gdt_descr);
20942
20943 - cr4 = read_cr4_safe();
20944 -
20945 - if (cr4 & X86_CR4_PAE) {
20946 - swapper_pg_dir[pgd_index(0)].pgd =
20947 - efi_bak_pg_dir_pointer[0].pgd;
20948 - } else {
20949 - swapper_pg_dir[pgd_index(0)].pgd =
20950 - efi_bak_pg_dir_pointer[0].pgd;
20951 - swapper_pg_dir[pgd_index(0x400000)].pgd =
20952 - efi_bak_pg_dir_pointer[1].pgd;
20953 - }
20954 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
20955
20956 /*
20957 * After the lock is released, the original page table is restored.
20958 diff -urNp linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S
20959 --- linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
20960 +++ linux-3.0.3/arch/x86/platform/efi/efi_stub_32.S 2011-08-23 21:47:55.000000000 -0400
20961 @@ -6,6 +6,7 @@
20962 */
20963
20964 #include <linux/linkage.h>
20965 +#include <linux/init.h>
20966 #include <asm/page_types.h>
20967
20968 /*
20969 @@ -20,7 +21,7 @@
20970 * service functions will comply with gcc calling convention, too.
20971 */
20972
20973 -.text
20974 +__INIT
20975 ENTRY(efi_call_phys)
20976 /*
20977 * 0. The function can only be called in Linux kernel. So CS has been
20978 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
20979 * The mapping of lower virtual memory has been created in prelog and
20980 * epilog.
20981 */
20982 - movl $1f, %edx
20983 - subl $__PAGE_OFFSET, %edx
20984 - jmp *%edx
20985 + jmp 1f-__PAGE_OFFSET
20986 1:
20987
20988 /*
20989 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
20990 * parameter 2, ..., param n. To make things easy, we save the return
20991 * address of efi_call_phys in a global variable.
20992 */
20993 - popl %edx
20994 - movl %edx, saved_return_addr
20995 - /* get the function pointer into ECX*/
20996 - popl %ecx
20997 - movl %ecx, efi_rt_function_ptr
20998 - movl $2f, %edx
20999 - subl $__PAGE_OFFSET, %edx
21000 - pushl %edx
21001 + popl (saved_return_addr)
21002 + popl (efi_rt_function_ptr)
21003
21004 /*
21005 * 3. Clear PG bit in %CR0.
21006 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21007 /*
21008 * 5. Call the physical function.
21009 */
21010 - jmp *%ecx
21011 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21012
21013 -2:
21014 /*
21015 * 6. After EFI runtime service returns, control will return to
21016 * following instruction. We'd better readjust stack pointer first.
21017 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21018 movl %cr0, %edx
21019 orl $0x80000000, %edx
21020 movl %edx, %cr0
21021 - jmp 1f
21022 -1:
21023 +
21024 /*
21025 * 8. Now restore the virtual mode from flat mode by
21026 * adding EIP with PAGE_OFFSET.
21027 */
21028 - movl $1f, %edx
21029 - jmp *%edx
21030 + jmp 1f+__PAGE_OFFSET
21031 1:
21032
21033 /*
21034 * 9. Balance the stack. And because EAX contain the return value,
21035 * we'd better not clobber it.
21036 */
21037 - leal efi_rt_function_ptr, %edx
21038 - movl (%edx), %ecx
21039 - pushl %ecx
21040 + pushl (efi_rt_function_ptr)
21041
21042 /*
21043 - * 10. Push the saved return address onto the stack and return.
21044 + * 10. Return to the saved return address.
21045 */
21046 - leal saved_return_addr, %edx
21047 - movl (%edx), %ecx
21048 - pushl %ecx
21049 - ret
21050 + jmpl *(saved_return_addr)
21051 ENDPROC(efi_call_phys)
21052 .previous
21053
21054 -.data
21055 +__INITDATA
21056 saved_return_addr:
21057 .long 0
21058 efi_rt_function_ptr:
21059 diff -urNp linux-3.0.3/arch/x86/platform/mrst/mrst.c linux-3.0.3/arch/x86/platform/mrst/mrst.c
21060 --- linux-3.0.3/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21061 +++ linux-3.0.3/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21062 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21063 }
21064
21065 /* Reboot and power off are handled by the SCU on a MID device */
21066 -static void mrst_power_off(void)
21067 +static __noreturn void mrst_power_off(void)
21068 {
21069 intel_scu_ipc_simple_command(0xf1, 1);
21070 + BUG();
21071 }
21072
21073 -static void mrst_reboot(void)
21074 +static __noreturn void mrst_reboot(void)
21075 {
21076 intel_scu_ipc_simple_command(0xf1, 0);
21077 + BUG();
21078 }
21079
21080 /*
21081 diff -urNp linux-3.0.3/arch/x86/platform/uv/tlb_uv.c linux-3.0.3/arch/x86/platform/uv/tlb_uv.c
21082 --- linux-3.0.3/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21083 +++ linux-3.0.3/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21084 @@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21085 cpumask_t mask;
21086 struct reset_args reset_args;
21087
21088 + pax_track_stack();
21089 +
21090 reset_args.sender = sender;
21091 cpus_clear(mask);
21092 /* find a single cpu for each uvhub in this distribution mask */
21093 diff -urNp linux-3.0.3/arch/x86/power/cpu.c linux-3.0.3/arch/x86/power/cpu.c
21094 --- linux-3.0.3/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21095 +++ linux-3.0.3/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21096 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21097 static void fix_processor_context(void)
21098 {
21099 int cpu = smp_processor_id();
21100 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21101 + struct tss_struct *t = init_tss + cpu;
21102
21103 set_tss_desc(cpu, t); /*
21104 * This just modifies memory; should not be
21105 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21106 */
21107
21108 #ifdef CONFIG_X86_64
21109 + pax_open_kernel();
21110 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21111 + pax_close_kernel();
21112
21113 syscall_init(); /* This sets MSR_*STAR and related */
21114 #endif
21115 diff -urNp linux-3.0.3/arch/x86/vdso/Makefile linux-3.0.3/arch/x86/vdso/Makefile
21116 --- linux-3.0.3/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21117 +++ linux-3.0.3/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21118 @@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21119 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21120 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21121
21122 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21123 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21124 GCOV_PROFILE := n
21125
21126 #
21127 diff -urNp linux-3.0.3/arch/x86/vdso/vdso32-setup.c linux-3.0.3/arch/x86/vdso/vdso32-setup.c
21128 --- linux-3.0.3/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21129 +++ linux-3.0.3/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21130 @@ -25,6 +25,7 @@
21131 #include <asm/tlbflush.h>
21132 #include <asm/vdso.h>
21133 #include <asm/proto.h>
21134 +#include <asm/mman.h>
21135
21136 enum {
21137 VDSO_DISABLED = 0,
21138 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21139 void enable_sep_cpu(void)
21140 {
21141 int cpu = get_cpu();
21142 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21143 + struct tss_struct *tss = init_tss + cpu;
21144
21145 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21146 put_cpu();
21147 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21148 gate_vma.vm_start = FIXADDR_USER_START;
21149 gate_vma.vm_end = FIXADDR_USER_END;
21150 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21151 - gate_vma.vm_page_prot = __P101;
21152 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21153 /*
21154 * Make sure the vDSO gets into every core dump.
21155 * Dumping its contents makes post-mortem fully interpretable later
21156 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21157 if (compat)
21158 addr = VDSO_HIGH_BASE;
21159 else {
21160 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21161 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21162 if (IS_ERR_VALUE(addr)) {
21163 ret = addr;
21164 goto up_fail;
21165 }
21166 }
21167
21168 - current->mm->context.vdso = (void *)addr;
21169 + current->mm->context.vdso = addr;
21170
21171 if (compat_uses_vma || !compat) {
21172 /*
21173 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21174 }
21175
21176 current_thread_info()->sysenter_return =
21177 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21178 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21179
21180 up_fail:
21181 if (ret)
21182 - current->mm->context.vdso = NULL;
21183 + current->mm->context.vdso = 0;
21184
21185 up_write(&mm->mmap_sem);
21186
21187 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21188
21189 const char *arch_vma_name(struct vm_area_struct *vma)
21190 {
21191 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21192 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21193 return "[vdso]";
21194 +
21195 +#ifdef CONFIG_PAX_SEGMEXEC
21196 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21197 + return "[vdso]";
21198 +#endif
21199 +
21200 return NULL;
21201 }
21202
21203 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21204 * Check to see if the corresponding task was created in compat vdso
21205 * mode.
21206 */
21207 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21208 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21209 return &gate_vma;
21210 return NULL;
21211 }
21212 diff -urNp linux-3.0.3/arch/x86/vdso/vma.c linux-3.0.3/arch/x86/vdso/vma.c
21213 --- linux-3.0.3/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21214 +++ linux-3.0.3/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21215 @@ -15,18 +15,19 @@
21216 #include <asm/proto.h>
21217 #include <asm/vdso.h>
21218
21219 -unsigned int __read_mostly vdso_enabled = 1;
21220 -
21221 extern char vdso_start[], vdso_end[];
21222 extern unsigned short vdso_sync_cpuid;
21223 +extern char __vsyscall_0;
21224
21225 static struct page **vdso_pages;
21226 +static struct page *vsyscall_page;
21227 static unsigned vdso_size;
21228
21229 static int __init init_vdso_vars(void)
21230 {
21231 - int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21232 - int i;
21233 + size_t nbytes = vdso_end - vdso_start;
21234 + size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21235 + size_t i;
21236
21237 vdso_size = npages << PAGE_SHIFT;
21238 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21239 @@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21240 goto oom;
21241 for (i = 0; i < npages; i++) {
21242 struct page *p;
21243 - p = alloc_page(GFP_KERNEL);
21244 + p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21245 if (!p)
21246 goto oom;
21247 vdso_pages[i] = p;
21248 - copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21249 + memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21250 + nbytes -= PAGE_SIZE;
21251 }
21252 + vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21253
21254 return 0;
21255
21256 oom:
21257 - printk("Cannot allocate vdso\n");
21258 - vdso_enabled = 0;
21259 - return -ENOMEM;
21260 + panic("Cannot allocate vdso\n");
21261 }
21262 subsys_initcall(init_vdso_vars);
21263
21264 @@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21265 unsigned long addr;
21266 int ret;
21267
21268 - if (!vdso_enabled)
21269 - return 0;
21270 -
21271 down_write(&mm->mmap_sem);
21272 - addr = vdso_addr(mm->start_stack, vdso_size);
21273 - addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21274 + addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21275 + addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21276 if (IS_ERR_VALUE(addr)) {
21277 ret = addr;
21278 goto up_fail;
21279 }
21280
21281 - current->mm->context.vdso = (void *)addr;
21282 + mm->context.vdso = addr + PAGE_SIZE;
21283
21284 - ret = install_special_mapping(mm, addr, vdso_size,
21285 + ret = install_special_mapping(mm, addr, PAGE_SIZE,
21286 VM_READ|VM_EXEC|
21287 - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21288 + VM_MAYREAD|VM_MAYEXEC|
21289 VM_ALWAYSDUMP,
21290 - vdso_pages);
21291 + &vsyscall_page);
21292 if (ret) {
21293 - current->mm->context.vdso = NULL;
21294 + mm->context.vdso = 0;
21295 goto up_fail;
21296 }
21297
21298 + ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21299 + VM_READ|VM_EXEC|
21300 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21301 + VM_ALWAYSDUMP,
21302 + vdso_pages);
21303 + if (ret)
21304 + mm->context.vdso = 0;
21305 +
21306 up_fail:
21307 up_write(&mm->mmap_sem);
21308 return ret;
21309 }
21310 -
21311 -static __init int vdso_setup(char *s)
21312 -{
21313 - vdso_enabled = simple_strtoul(s, NULL, 0);
21314 - return 0;
21315 -}
21316 -__setup("vdso=", vdso_setup);
21317 diff -urNp linux-3.0.3/arch/x86/xen/enlighten.c linux-3.0.3/arch/x86/xen/enlighten.c
21318 --- linux-3.0.3/arch/x86/xen/enlighten.c 2011-08-23 21:44:40.000000000 -0400
21319 +++ linux-3.0.3/arch/x86/xen/enlighten.c 2011-08-23 21:47:55.000000000 -0400
21320 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21321
21322 struct shared_info xen_dummy_shared_info;
21323
21324 -void *xen_initial_gdt;
21325 -
21326 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21327 __read_mostly int xen_have_vector_callback;
21328 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21329 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21330 #endif
21331 };
21332
21333 -static void xen_reboot(int reason)
21334 +static __noreturn void xen_reboot(int reason)
21335 {
21336 struct sched_shutdown r = { .reason = reason };
21337
21338 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21339 BUG();
21340 }
21341
21342 -static void xen_restart(char *msg)
21343 +static __noreturn void xen_restart(char *msg)
21344 {
21345 xen_reboot(SHUTDOWN_reboot);
21346 }
21347
21348 -static void xen_emergency_restart(void)
21349 +static __noreturn void xen_emergency_restart(void)
21350 {
21351 xen_reboot(SHUTDOWN_reboot);
21352 }
21353
21354 -static void xen_machine_halt(void)
21355 +static __noreturn void xen_machine_halt(void)
21356 {
21357 xen_reboot(SHUTDOWN_poweroff);
21358 }
21359 @@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21360 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21361
21362 /* Work out if we support NX */
21363 - x86_configure_nx();
21364 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21365 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21366 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21367 + unsigned l, h;
21368 +
21369 + __supported_pte_mask |= _PAGE_NX;
21370 + rdmsr(MSR_EFER, l, h);
21371 + l |= EFER_NX;
21372 + wrmsr(MSR_EFER, l, h);
21373 + }
21374 +#endif
21375
21376 xen_setup_features();
21377
21378 @@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21379
21380 machine_ops = xen_machine_ops;
21381
21382 - /*
21383 - * The only reliable way to retain the initial address of the
21384 - * percpu gdt_page is to remember it here, so we can go and
21385 - * mark it RW later, when the initial percpu area is freed.
21386 - */
21387 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21388 -
21389 xen_smp_init();
21390
21391 #ifdef CONFIG_ACPI_NUMA
21392 diff -urNp linux-3.0.3/arch/x86/xen/mmu.c linux-3.0.3/arch/x86/xen/mmu.c
21393 --- linux-3.0.3/arch/x86/xen/mmu.c 2011-07-21 22:17:23.000000000 -0400
21394 +++ linux-3.0.3/arch/x86/xen/mmu.c 2011-08-24 18:10:12.000000000 -0400
21395 @@ -1679,6 +1679,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21396 convert_pfn_mfn(init_level4_pgt);
21397 convert_pfn_mfn(level3_ident_pgt);
21398 convert_pfn_mfn(level3_kernel_pgt);
21399 + convert_pfn_mfn(level3_vmalloc_pgt);
21400 + convert_pfn_mfn(level3_vmemmap_pgt);
21401
21402 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21403 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21404 @@ -1697,7 +1699,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21405 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21406 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21407 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21408 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21409 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21410 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21411 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21412 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21413 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21414
21415 @@ -1909,6 +1914,7 @@ static void __init xen_post_allocator_in
21416 pv_mmu_ops.set_pud = xen_set_pud;
21417 #if PAGETABLE_LEVELS == 4
21418 pv_mmu_ops.set_pgd = xen_set_pgd;
21419 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21420 #endif
21421
21422 /* This will work as long as patching hasn't happened yet
21423 @@ -1990,6 +1996,7 @@ static const struct pv_mmu_ops xen_mmu_o
21424 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21425 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21426 .set_pgd = xen_set_pgd_hyper,
21427 + .set_pgd_batched = xen_set_pgd_hyper,
21428
21429 .alloc_pud = xen_alloc_pmd_init,
21430 .release_pud = xen_release_pmd_init,
21431 diff -urNp linux-3.0.3/arch/x86/xen/smp.c linux-3.0.3/arch/x86/xen/smp.c
21432 --- linux-3.0.3/arch/x86/xen/smp.c 2011-07-21 22:17:23.000000000 -0400
21433 +++ linux-3.0.3/arch/x86/xen/smp.c 2011-08-23 21:47:55.000000000 -0400
21434 @@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
21435 {
21436 BUG_ON(smp_processor_id() != 0);
21437 native_smp_prepare_boot_cpu();
21438 -
21439 - /* We've switched to the "real" per-cpu gdt, so make sure the
21440 - old memory can be recycled */
21441 - make_lowmem_page_readwrite(xen_initial_gdt);
21442 -
21443 xen_filter_cpu_maps();
21444 xen_setup_vcpu_info_placement();
21445 }
21446 @@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
21447 gdt = get_cpu_gdt_table(cpu);
21448
21449 ctxt->flags = VGCF_IN_KERNEL;
21450 - ctxt->user_regs.ds = __USER_DS;
21451 - ctxt->user_regs.es = __USER_DS;
21452 + ctxt->user_regs.ds = __KERNEL_DS;
21453 + ctxt->user_regs.es = __KERNEL_DS;
21454 ctxt->user_regs.ss = __KERNEL_DS;
21455 #ifdef CONFIG_X86_32
21456 ctxt->user_regs.fs = __KERNEL_PERCPU;
21457 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21458 + savesegment(gs, ctxt->user_regs.gs);
21459 #else
21460 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21461 #endif
21462 @@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
21463 int rc;
21464
21465 per_cpu(current_task, cpu) = idle;
21466 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21467 #ifdef CONFIG_X86_32
21468 irq_ctx_init(cpu);
21469 #else
21470 clear_tsk_thread_flag(idle, TIF_FORK);
21471 - per_cpu(kernel_stack, cpu) =
21472 - (unsigned long)task_stack_page(idle) -
21473 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21474 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21475 #endif
21476 xen_setup_runstate_info(cpu);
21477 xen_setup_timer(cpu);
21478 diff -urNp linux-3.0.3/arch/x86/xen/xen-asm_32.S linux-3.0.3/arch/x86/xen/xen-asm_32.S
21479 --- linux-3.0.3/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
21480 +++ linux-3.0.3/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
21481 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21482 ESP_OFFSET=4 # bytes pushed onto stack
21483
21484 /*
21485 - * Store vcpu_info pointer for easy access. Do it this way to
21486 - * avoid having to reload %fs
21487 + * Store vcpu_info pointer for easy access.
21488 */
21489 #ifdef CONFIG_SMP
21490 - GET_THREAD_INFO(%eax)
21491 - movl TI_cpu(%eax), %eax
21492 - movl __per_cpu_offset(,%eax,4), %eax
21493 - mov xen_vcpu(%eax), %eax
21494 + push %fs
21495 + mov $(__KERNEL_PERCPU), %eax
21496 + mov %eax, %fs
21497 + mov PER_CPU_VAR(xen_vcpu), %eax
21498 + pop %fs
21499 #else
21500 movl xen_vcpu, %eax
21501 #endif
21502 diff -urNp linux-3.0.3/arch/x86/xen/xen-head.S linux-3.0.3/arch/x86/xen/xen-head.S
21503 --- linux-3.0.3/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
21504 +++ linux-3.0.3/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
21505 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21506 #ifdef CONFIG_X86_32
21507 mov %esi,xen_start_info
21508 mov $init_thread_union+THREAD_SIZE,%esp
21509 +#ifdef CONFIG_SMP
21510 + movl $cpu_gdt_table,%edi
21511 + movl $__per_cpu_load,%eax
21512 + movw %ax,__KERNEL_PERCPU + 2(%edi)
21513 + rorl $16,%eax
21514 + movb %al,__KERNEL_PERCPU + 4(%edi)
21515 + movb %ah,__KERNEL_PERCPU + 7(%edi)
21516 + movl $__per_cpu_end - 1,%eax
21517 + subl $__per_cpu_start,%eax
21518 + movw %ax,__KERNEL_PERCPU + 0(%edi)
21519 +#endif
21520 #else
21521 mov %rsi,xen_start_info
21522 mov $init_thread_union+THREAD_SIZE,%rsp
21523 diff -urNp linux-3.0.3/arch/x86/xen/xen-ops.h linux-3.0.3/arch/x86/xen/xen-ops.h
21524 --- linux-3.0.3/arch/x86/xen/xen-ops.h 2011-08-23 21:44:40.000000000 -0400
21525 +++ linux-3.0.3/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
21526 @@ -10,8 +10,6 @@
21527 extern const char xen_hypervisor_callback[];
21528 extern const char xen_failsafe_callback[];
21529
21530 -extern void *xen_initial_gdt;
21531 -
21532 struct trap_info;
21533 void xen_copy_trap_info(struct trap_info *traps);
21534
21535 diff -urNp linux-3.0.3/block/blk-iopoll.c linux-3.0.3/block/blk-iopoll.c
21536 --- linux-3.0.3/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
21537 +++ linux-3.0.3/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
21538 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21539 }
21540 EXPORT_SYMBOL(blk_iopoll_complete);
21541
21542 -static void blk_iopoll_softirq(struct softirq_action *h)
21543 +static void blk_iopoll_softirq(void)
21544 {
21545 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21546 int rearm = 0, budget = blk_iopoll_budget;
21547 diff -urNp linux-3.0.3/block/blk-map.c linux-3.0.3/block/blk-map.c
21548 --- linux-3.0.3/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
21549 +++ linux-3.0.3/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
21550 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21551 if (!len || !kbuf)
21552 return -EINVAL;
21553
21554 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21555 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21556 if (do_copy)
21557 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21558 else
21559 diff -urNp linux-3.0.3/block/blk-softirq.c linux-3.0.3/block/blk-softirq.c
21560 --- linux-3.0.3/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
21561 +++ linux-3.0.3/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
21562 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21563 * Softirq action handler - move entries to local list and loop over them
21564 * while passing them to the queue registered handler.
21565 */
21566 -static void blk_done_softirq(struct softirq_action *h)
21567 +static void blk_done_softirq(void)
21568 {
21569 struct list_head *cpu_list, local_list;
21570
21571 diff -urNp linux-3.0.3/block/bsg.c linux-3.0.3/block/bsg.c
21572 --- linux-3.0.3/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
21573 +++ linux-3.0.3/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
21574 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21575 struct sg_io_v4 *hdr, struct bsg_device *bd,
21576 fmode_t has_write_perm)
21577 {
21578 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21579 + unsigned char *cmdptr;
21580 +
21581 if (hdr->request_len > BLK_MAX_CDB) {
21582 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21583 if (!rq->cmd)
21584 return -ENOMEM;
21585 - }
21586 + cmdptr = rq->cmd;
21587 + } else
21588 + cmdptr = tmpcmd;
21589
21590 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21591 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21592 hdr->request_len))
21593 return -EFAULT;
21594
21595 + if (cmdptr != rq->cmd)
21596 + memcpy(rq->cmd, cmdptr, hdr->request_len);
21597 +
21598 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21599 if (blk_verify_command(rq->cmd, has_write_perm))
21600 return -EPERM;
21601 diff -urNp linux-3.0.3/block/scsi_ioctl.c linux-3.0.3/block/scsi_ioctl.c
21602 --- linux-3.0.3/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
21603 +++ linux-3.0.3/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
21604 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21605 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21606 struct sg_io_hdr *hdr, fmode_t mode)
21607 {
21608 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21609 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21610 + unsigned char *cmdptr;
21611 +
21612 + if (rq->cmd != rq->__cmd)
21613 + cmdptr = rq->cmd;
21614 + else
21615 + cmdptr = tmpcmd;
21616 +
21617 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21618 return -EFAULT;
21619 +
21620 + if (cmdptr != rq->cmd)
21621 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21622 +
21623 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21624 return -EPERM;
21625
21626 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21627 int err;
21628 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21629 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21630 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21631 + unsigned char *cmdptr;
21632
21633 if (!sic)
21634 return -EINVAL;
21635 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21636 */
21637 err = -EFAULT;
21638 rq->cmd_len = cmdlen;
21639 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
21640 +
21641 + if (rq->cmd != rq->__cmd)
21642 + cmdptr = rq->cmd;
21643 + else
21644 + cmdptr = tmpcmd;
21645 +
21646 + if (copy_from_user(cmdptr, sic->data, cmdlen))
21647 goto error;
21648
21649 + if (rq->cmd != cmdptr)
21650 + memcpy(rq->cmd, cmdptr, cmdlen);
21651 +
21652 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21653 goto error;
21654
21655 diff -urNp linux-3.0.3/crypto/cryptd.c linux-3.0.3/crypto/cryptd.c
21656 --- linux-3.0.3/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
21657 +++ linux-3.0.3/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
21658 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21659
21660 struct cryptd_blkcipher_request_ctx {
21661 crypto_completion_t complete;
21662 -};
21663 +} __no_const;
21664
21665 struct cryptd_hash_ctx {
21666 struct crypto_shash *child;
21667 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21668
21669 struct cryptd_aead_request_ctx {
21670 crypto_completion_t complete;
21671 -};
21672 +} __no_const;
21673
21674 static void cryptd_queue_worker(struct work_struct *work);
21675
21676 diff -urNp linux-3.0.3/crypto/gf128mul.c linux-3.0.3/crypto/gf128mul.c
21677 --- linux-3.0.3/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
21678 +++ linux-3.0.3/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
21679 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21680 for (i = 0; i < 7; ++i)
21681 gf128mul_x_lle(&p[i + 1], &p[i]);
21682
21683 - memset(r, 0, sizeof(r));
21684 + memset(r, 0, sizeof(*r));
21685 for (i = 0;;) {
21686 u8 ch = ((u8 *)b)[15 - i];
21687
21688 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21689 for (i = 0; i < 7; ++i)
21690 gf128mul_x_bbe(&p[i + 1], &p[i]);
21691
21692 - memset(r, 0, sizeof(r));
21693 + memset(r, 0, sizeof(*r));
21694 for (i = 0;;) {
21695 u8 ch = ((u8 *)b)[i];
21696
21697 diff -urNp linux-3.0.3/crypto/serpent.c linux-3.0.3/crypto/serpent.c
21698 --- linux-3.0.3/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
21699 +++ linux-3.0.3/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
21700 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21701 u32 r0,r1,r2,r3,r4;
21702 int i;
21703
21704 + pax_track_stack();
21705 +
21706 /* Copy key, add padding */
21707
21708 for (i = 0; i < keylen; ++i)
21709 diff -urNp linux-3.0.3/Documentation/dontdiff linux-3.0.3/Documentation/dontdiff
21710 --- linux-3.0.3/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
21711 +++ linux-3.0.3/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
21712 @@ -5,6 +5,7 @@
21713 *.cis
21714 *.cpio
21715 *.csp
21716 +*.dbg
21717 *.dsp
21718 *.dvi
21719 *.elf
21720 @@ -48,9 +49,11 @@
21721 *.tab.h
21722 *.tex
21723 *.ver
21724 +*.vim
21725 *.xml
21726 *.xz
21727 *_MODULES
21728 +*_reg_safe.h
21729 *_vga16.c
21730 *~
21731 \#*#
21732 @@ -70,6 +73,7 @@ Kerntypes
21733 Module.markers
21734 Module.symvers
21735 PENDING
21736 +PERF*
21737 SCCS
21738 System.map*
21739 TAGS
21740 @@ -98,6 +102,8 @@ bzImage*
21741 capability_names.h
21742 capflags.c
21743 classlist.h*
21744 +clut_vga16.c
21745 +common-cmds.h
21746 comp*.log
21747 compile.h*
21748 conf
21749 @@ -126,12 +132,14 @@ fore200e_pca_fw.c*
21750 gconf
21751 gconf.glade.h
21752 gen-devlist
21753 +gen-kdb_cmds.c
21754 gen_crc32table
21755 gen_init_cpio
21756 generated
21757 genheaders
21758 genksyms
21759 *_gray256.c
21760 +hash
21761 hpet_example
21762 hugepage-mmap
21763 hugepage-shm
21764 @@ -146,7 +154,6 @@ int32.c
21765 int4.c
21766 int8.c
21767 kallsyms
21768 -kconfig
21769 keywords.c
21770 ksym.c*
21771 ksym.h*
21772 @@ -154,7 +161,6 @@ kxgettext
21773 lkc_defs.h
21774 lex.c
21775 lex.*.c
21776 -linux
21777 logo_*.c
21778 logo_*_clut224.c
21779 logo_*_mono.c
21780 @@ -174,6 +180,7 @@ mkboot
21781 mkbugboot
21782 mkcpustr
21783 mkdep
21784 +mkpiggy
21785 mkprep
21786 mkregtable
21787 mktables
21788 @@ -209,6 +216,7 @@ r300_reg_safe.h
21789 r420_reg_safe.h
21790 r600_reg_safe.h
21791 recordmcount
21792 +regdb.c
21793 relocs
21794 rlim_names.h
21795 rn50_reg_safe.h
21796 @@ -219,6 +227,7 @@ setup
21797 setup.bin
21798 setup.elf
21799 sImage
21800 +slabinfo
21801 sm_tbl*
21802 split-include
21803 syscalltab.h
21804 @@ -246,7 +255,9 @@ vmlinux
21805 vmlinux-*
21806 vmlinux.aout
21807 vmlinux.bin.all
21808 +vmlinux.bin.bz2
21809 vmlinux.lds
21810 +vmlinux.relocs
21811 vmlinuz
21812 voffset.h
21813 vsyscall.lds
21814 @@ -254,6 +265,7 @@ vsyscall_32.lds
21815 wanxlfw.inc
21816 uImage
21817 unifdef
21818 +utsrelease.h
21819 wakeup.bin
21820 wakeup.elf
21821 wakeup.lds
21822 diff -urNp linux-3.0.3/Documentation/kernel-parameters.txt linux-3.0.3/Documentation/kernel-parameters.txt
21823 --- linux-3.0.3/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
21824 +++ linux-3.0.3/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
21825 @@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
21826 the specified number of seconds. This is to be used if
21827 your oopses keep scrolling off the screen.
21828
21829 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
21830 + virtualization environments that don't cope well with the
21831 + expand down segment used by UDEREF on X86-32 or the frequent
21832 + page table updates on X86-64.
21833 +
21834 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
21835 +
21836 pcbit= [HW,ISDN]
21837
21838 pcd. [PARIDE]
21839 diff -urNp linux-3.0.3/drivers/acpi/apei/cper.c linux-3.0.3/drivers/acpi/apei/cper.c
21840 --- linux-3.0.3/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
21841 +++ linux-3.0.3/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
21842 @@ -38,12 +38,12 @@
21843 */
21844 u64 cper_next_record_id(void)
21845 {
21846 - static atomic64_t seq;
21847 + static atomic64_unchecked_t seq;
21848
21849 - if (!atomic64_read(&seq))
21850 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
21851 + if (!atomic64_read_unchecked(&seq))
21852 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
21853
21854 - return atomic64_inc_return(&seq);
21855 + return atomic64_inc_return_unchecked(&seq);
21856 }
21857 EXPORT_SYMBOL_GPL(cper_next_record_id);
21858
21859 diff -urNp linux-3.0.3/drivers/acpi/ec_sys.c linux-3.0.3/drivers/acpi/ec_sys.c
21860 --- linux-3.0.3/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
21861 +++ linux-3.0.3/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
21862 @@ -11,6 +11,7 @@
21863 #include <linux/kernel.h>
21864 #include <linux/acpi.h>
21865 #include <linux/debugfs.h>
21866 +#include <asm/uaccess.h>
21867 #include "internal.h"
21868
21869 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
21870 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
21871 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
21872 */
21873 unsigned int size = EC_SPACE_SIZE;
21874 - u8 *data = (u8 *) buf;
21875 + u8 data;
21876 loff_t init_off = *off;
21877 int err = 0;
21878
21879 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
21880 size = count;
21881
21882 while (size) {
21883 - err = ec_read(*off, &data[*off - init_off]);
21884 + err = ec_read(*off, &data);
21885 if (err)
21886 return err;
21887 + if (put_user(data, &buf[*off - init_off]))
21888 + return -EFAULT;
21889 *off += 1;
21890 size--;
21891 }
21892 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
21893
21894 unsigned int size = count;
21895 loff_t init_off = *off;
21896 - u8 *data = (u8 *) buf;
21897 int err = 0;
21898
21899 if (*off >= EC_SPACE_SIZE)
21900 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
21901 }
21902
21903 while (size) {
21904 - u8 byte_write = data[*off - init_off];
21905 + u8 byte_write;
21906 + if (get_user(byte_write, &buf[*off - init_off]))
21907 + return -EFAULT;
21908 err = ec_write(*off, byte_write);
21909 if (err)
21910 return err;
21911 diff -urNp linux-3.0.3/drivers/acpi/proc.c linux-3.0.3/drivers/acpi/proc.c
21912 --- linux-3.0.3/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
21913 +++ linux-3.0.3/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
21914 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
21915 size_t count, loff_t * ppos)
21916 {
21917 struct list_head *node, *next;
21918 - char strbuf[5];
21919 - char str[5] = "";
21920 - unsigned int len = count;
21921 -
21922 - if (len > 4)
21923 - len = 4;
21924 - if (len < 0)
21925 - return -EFAULT;
21926 + char strbuf[5] = {0};
21927
21928 - if (copy_from_user(strbuf, buffer, len))
21929 + if (count > 4)
21930 + count = 4;
21931 + if (copy_from_user(strbuf, buffer, count))
21932 return -EFAULT;
21933 - strbuf[len] = '\0';
21934 - sscanf(strbuf, "%s", str);
21935 + strbuf[count] = '\0';
21936
21937 mutex_lock(&acpi_device_lock);
21938 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
21939 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
21940 if (!dev->wakeup.flags.valid)
21941 continue;
21942
21943 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
21944 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
21945 if (device_can_wakeup(&dev->dev)) {
21946 bool enable = !device_may_wakeup(&dev->dev);
21947 device_set_wakeup_enable(&dev->dev, enable);
21948 diff -urNp linux-3.0.3/drivers/acpi/processor_driver.c linux-3.0.3/drivers/acpi/processor_driver.c
21949 --- linux-3.0.3/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
21950 +++ linux-3.0.3/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
21951 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
21952 return 0;
21953 #endif
21954
21955 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
21956 + BUG_ON(pr->id >= nr_cpu_ids);
21957
21958 /*
21959 * Buggy BIOS check
21960 diff -urNp linux-3.0.3/drivers/ata/libata-core.c linux-3.0.3/drivers/ata/libata-core.c
21961 --- linux-3.0.3/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
21962 +++ linux-3.0.3/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
21963 @@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
21964 struct ata_port *ap;
21965 unsigned int tag;
21966
21967 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21968 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21969 ap = qc->ap;
21970
21971 qc->flags = 0;
21972 @@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
21973 struct ata_port *ap;
21974 struct ata_link *link;
21975
21976 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21977 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
21978 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
21979 ap = qc->ap;
21980 link = qc->dev->link;
21981 @@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
21982 return;
21983
21984 spin_lock(&lock);
21985 + pax_open_kernel();
21986
21987 for (cur = ops->inherits; cur; cur = cur->inherits) {
21988 void **inherit = (void **)cur;
21989 @@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
21990 if (IS_ERR(*pp))
21991 *pp = NULL;
21992
21993 - ops->inherits = NULL;
21994 + *(struct ata_port_operations **)&ops->inherits = NULL;
21995
21996 + pax_close_kernel();
21997 spin_unlock(&lock);
21998 }
21999
22000 diff -urNp linux-3.0.3/drivers/ata/libata-eh.c linux-3.0.3/drivers/ata/libata-eh.c
22001 --- linux-3.0.3/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22002 +++ linux-3.0.3/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22003 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22004 {
22005 struct ata_link *link;
22006
22007 + pax_track_stack();
22008 +
22009 ata_for_each_link(link, ap, HOST_FIRST)
22010 ata_eh_link_report(link);
22011 }
22012 diff -urNp linux-3.0.3/drivers/ata/pata_arasan_cf.c linux-3.0.3/drivers/ata/pata_arasan_cf.c
22013 --- linux-3.0.3/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22014 +++ linux-3.0.3/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22015 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22016 /* Handle platform specific quirks */
22017 if (pdata->quirk) {
22018 if (pdata->quirk & CF_BROKEN_PIO) {
22019 - ap->ops->set_piomode = NULL;
22020 + pax_open_kernel();
22021 + *(void **)&ap->ops->set_piomode = NULL;
22022 + pax_close_kernel();
22023 ap->pio_mask = 0;
22024 }
22025 if (pdata->quirk & CF_BROKEN_MWDMA)
22026 diff -urNp linux-3.0.3/drivers/atm/adummy.c linux-3.0.3/drivers/atm/adummy.c
22027 --- linux-3.0.3/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22028 +++ linux-3.0.3/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22029 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22030 vcc->pop(vcc, skb);
22031 else
22032 dev_kfree_skb_any(skb);
22033 - atomic_inc(&vcc->stats->tx);
22034 + atomic_inc_unchecked(&vcc->stats->tx);
22035
22036 return 0;
22037 }
22038 diff -urNp linux-3.0.3/drivers/atm/ambassador.c linux-3.0.3/drivers/atm/ambassador.c
22039 --- linux-3.0.3/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22040 +++ linux-3.0.3/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22041 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22042 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22043
22044 // VC layer stats
22045 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22046 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22047
22048 // free the descriptor
22049 kfree (tx_descr);
22050 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22051 dump_skb ("<<<", vc, skb);
22052
22053 // VC layer stats
22054 - atomic_inc(&atm_vcc->stats->rx);
22055 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22056 __net_timestamp(skb);
22057 // end of our responsibility
22058 atm_vcc->push (atm_vcc, skb);
22059 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22060 } else {
22061 PRINTK (KERN_INFO, "dropped over-size frame");
22062 // should we count this?
22063 - atomic_inc(&atm_vcc->stats->rx_drop);
22064 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22065 }
22066
22067 } else {
22068 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22069 }
22070
22071 if (check_area (skb->data, skb->len)) {
22072 - atomic_inc(&atm_vcc->stats->tx_err);
22073 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22074 return -ENOMEM; // ?
22075 }
22076
22077 diff -urNp linux-3.0.3/drivers/atm/atmtcp.c linux-3.0.3/drivers/atm/atmtcp.c
22078 --- linux-3.0.3/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22079 +++ linux-3.0.3/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22080 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22081 if (vcc->pop) vcc->pop(vcc,skb);
22082 else dev_kfree_skb(skb);
22083 if (dev_data) return 0;
22084 - atomic_inc(&vcc->stats->tx_err);
22085 + atomic_inc_unchecked(&vcc->stats->tx_err);
22086 return -ENOLINK;
22087 }
22088 size = skb->len+sizeof(struct atmtcp_hdr);
22089 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22090 if (!new_skb) {
22091 if (vcc->pop) vcc->pop(vcc,skb);
22092 else dev_kfree_skb(skb);
22093 - atomic_inc(&vcc->stats->tx_err);
22094 + atomic_inc_unchecked(&vcc->stats->tx_err);
22095 return -ENOBUFS;
22096 }
22097 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22098 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22099 if (vcc->pop) vcc->pop(vcc,skb);
22100 else dev_kfree_skb(skb);
22101 out_vcc->push(out_vcc,new_skb);
22102 - atomic_inc(&vcc->stats->tx);
22103 - atomic_inc(&out_vcc->stats->rx);
22104 + atomic_inc_unchecked(&vcc->stats->tx);
22105 + atomic_inc_unchecked(&out_vcc->stats->rx);
22106 return 0;
22107 }
22108
22109 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22110 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22111 read_unlock(&vcc_sklist_lock);
22112 if (!out_vcc) {
22113 - atomic_inc(&vcc->stats->tx_err);
22114 + atomic_inc_unchecked(&vcc->stats->tx_err);
22115 goto done;
22116 }
22117 skb_pull(skb,sizeof(struct atmtcp_hdr));
22118 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22119 __net_timestamp(new_skb);
22120 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22121 out_vcc->push(out_vcc,new_skb);
22122 - atomic_inc(&vcc->stats->tx);
22123 - atomic_inc(&out_vcc->stats->rx);
22124 + atomic_inc_unchecked(&vcc->stats->tx);
22125 + atomic_inc_unchecked(&out_vcc->stats->rx);
22126 done:
22127 if (vcc->pop) vcc->pop(vcc,skb);
22128 else dev_kfree_skb(skb);
22129 diff -urNp linux-3.0.3/drivers/atm/eni.c linux-3.0.3/drivers/atm/eni.c
22130 --- linux-3.0.3/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22131 +++ linux-3.0.3/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22132 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22133 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22134 vcc->dev->number);
22135 length = 0;
22136 - atomic_inc(&vcc->stats->rx_err);
22137 + atomic_inc_unchecked(&vcc->stats->rx_err);
22138 }
22139 else {
22140 length = ATM_CELL_SIZE-1; /* no HEC */
22141 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22142 size);
22143 }
22144 eff = length = 0;
22145 - atomic_inc(&vcc->stats->rx_err);
22146 + atomic_inc_unchecked(&vcc->stats->rx_err);
22147 }
22148 else {
22149 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22150 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22151 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22152 vcc->dev->number,vcc->vci,length,size << 2,descr);
22153 length = eff = 0;
22154 - atomic_inc(&vcc->stats->rx_err);
22155 + atomic_inc_unchecked(&vcc->stats->rx_err);
22156 }
22157 }
22158 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22159 @@ -771,7 +771,7 @@ rx_dequeued++;
22160 vcc->push(vcc,skb);
22161 pushed++;
22162 }
22163 - atomic_inc(&vcc->stats->rx);
22164 + atomic_inc_unchecked(&vcc->stats->rx);
22165 }
22166 wake_up(&eni_dev->rx_wait);
22167 }
22168 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22169 PCI_DMA_TODEVICE);
22170 if (vcc->pop) vcc->pop(vcc,skb);
22171 else dev_kfree_skb_irq(skb);
22172 - atomic_inc(&vcc->stats->tx);
22173 + atomic_inc_unchecked(&vcc->stats->tx);
22174 wake_up(&eni_dev->tx_wait);
22175 dma_complete++;
22176 }
22177 diff -urNp linux-3.0.3/drivers/atm/firestream.c linux-3.0.3/drivers/atm/firestream.c
22178 --- linux-3.0.3/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22179 +++ linux-3.0.3/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22180 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22181 }
22182 }
22183
22184 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22185 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22186
22187 fs_dprintk (FS_DEBUG_TXMEM, "i");
22188 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22189 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22190 #endif
22191 skb_put (skb, qe->p1 & 0xffff);
22192 ATM_SKB(skb)->vcc = atm_vcc;
22193 - atomic_inc(&atm_vcc->stats->rx);
22194 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22195 __net_timestamp(skb);
22196 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22197 atm_vcc->push (atm_vcc, skb);
22198 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22199 kfree (pe);
22200 }
22201 if (atm_vcc)
22202 - atomic_inc(&atm_vcc->stats->rx_drop);
22203 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22204 break;
22205 case 0x1f: /* Reassembly abort: no buffers. */
22206 /* Silently increment error counter. */
22207 if (atm_vcc)
22208 - atomic_inc(&atm_vcc->stats->rx_drop);
22209 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22210 break;
22211 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22212 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22213 diff -urNp linux-3.0.3/drivers/atm/fore200e.c linux-3.0.3/drivers/atm/fore200e.c
22214 --- linux-3.0.3/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22215 +++ linux-3.0.3/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22216 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22217 #endif
22218 /* check error condition */
22219 if (*entry->status & STATUS_ERROR)
22220 - atomic_inc(&vcc->stats->tx_err);
22221 + atomic_inc_unchecked(&vcc->stats->tx_err);
22222 else
22223 - atomic_inc(&vcc->stats->tx);
22224 + atomic_inc_unchecked(&vcc->stats->tx);
22225 }
22226 }
22227
22228 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22229 if (skb == NULL) {
22230 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22231
22232 - atomic_inc(&vcc->stats->rx_drop);
22233 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22234 return -ENOMEM;
22235 }
22236
22237 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22238
22239 dev_kfree_skb_any(skb);
22240
22241 - atomic_inc(&vcc->stats->rx_drop);
22242 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22243 return -ENOMEM;
22244 }
22245
22246 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22247
22248 vcc->push(vcc, skb);
22249 - atomic_inc(&vcc->stats->rx);
22250 + atomic_inc_unchecked(&vcc->stats->rx);
22251
22252 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22253
22254 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22255 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22256 fore200e->atm_dev->number,
22257 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22258 - atomic_inc(&vcc->stats->rx_err);
22259 + atomic_inc_unchecked(&vcc->stats->rx_err);
22260 }
22261 }
22262
22263 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22264 goto retry_here;
22265 }
22266
22267 - atomic_inc(&vcc->stats->tx_err);
22268 + atomic_inc_unchecked(&vcc->stats->tx_err);
22269
22270 fore200e->tx_sat++;
22271 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22272 diff -urNp linux-3.0.3/drivers/atm/he.c linux-3.0.3/drivers/atm/he.c
22273 --- linux-3.0.3/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22274 +++ linux-3.0.3/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22275 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22276
22277 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22278 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22279 - atomic_inc(&vcc->stats->rx_drop);
22280 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22281 goto return_host_buffers;
22282 }
22283
22284 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22285 RBRQ_LEN_ERR(he_dev->rbrq_head)
22286 ? "LEN_ERR" : "",
22287 vcc->vpi, vcc->vci);
22288 - atomic_inc(&vcc->stats->rx_err);
22289 + atomic_inc_unchecked(&vcc->stats->rx_err);
22290 goto return_host_buffers;
22291 }
22292
22293 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22294 vcc->push(vcc, skb);
22295 spin_lock(&he_dev->global_lock);
22296
22297 - atomic_inc(&vcc->stats->rx);
22298 + atomic_inc_unchecked(&vcc->stats->rx);
22299
22300 return_host_buffers:
22301 ++pdus_assembled;
22302 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22303 tpd->vcc->pop(tpd->vcc, tpd->skb);
22304 else
22305 dev_kfree_skb_any(tpd->skb);
22306 - atomic_inc(&tpd->vcc->stats->tx_err);
22307 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22308 }
22309 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22310 return;
22311 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22312 vcc->pop(vcc, skb);
22313 else
22314 dev_kfree_skb_any(skb);
22315 - atomic_inc(&vcc->stats->tx_err);
22316 + atomic_inc_unchecked(&vcc->stats->tx_err);
22317 return -EINVAL;
22318 }
22319
22320 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22321 vcc->pop(vcc, skb);
22322 else
22323 dev_kfree_skb_any(skb);
22324 - atomic_inc(&vcc->stats->tx_err);
22325 + atomic_inc_unchecked(&vcc->stats->tx_err);
22326 return -EINVAL;
22327 }
22328 #endif
22329 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22330 vcc->pop(vcc, skb);
22331 else
22332 dev_kfree_skb_any(skb);
22333 - atomic_inc(&vcc->stats->tx_err);
22334 + atomic_inc_unchecked(&vcc->stats->tx_err);
22335 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22336 return -ENOMEM;
22337 }
22338 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22339 vcc->pop(vcc, skb);
22340 else
22341 dev_kfree_skb_any(skb);
22342 - atomic_inc(&vcc->stats->tx_err);
22343 + atomic_inc_unchecked(&vcc->stats->tx_err);
22344 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22345 return -ENOMEM;
22346 }
22347 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22348 __enqueue_tpd(he_dev, tpd, cid);
22349 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22350
22351 - atomic_inc(&vcc->stats->tx);
22352 + atomic_inc_unchecked(&vcc->stats->tx);
22353
22354 return 0;
22355 }
22356 diff -urNp linux-3.0.3/drivers/atm/horizon.c linux-3.0.3/drivers/atm/horizon.c
22357 --- linux-3.0.3/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22358 +++ linux-3.0.3/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22359 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22360 {
22361 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22362 // VC layer stats
22363 - atomic_inc(&vcc->stats->rx);
22364 + atomic_inc_unchecked(&vcc->stats->rx);
22365 __net_timestamp(skb);
22366 // end of our responsibility
22367 vcc->push (vcc, skb);
22368 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22369 dev->tx_iovec = NULL;
22370
22371 // VC layer stats
22372 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22373 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22374
22375 // free the skb
22376 hrz_kfree_skb (skb);
22377 diff -urNp linux-3.0.3/drivers/atm/idt77252.c linux-3.0.3/drivers/atm/idt77252.c
22378 --- linux-3.0.3/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22379 +++ linux-3.0.3/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22380 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22381 else
22382 dev_kfree_skb(skb);
22383
22384 - atomic_inc(&vcc->stats->tx);
22385 + atomic_inc_unchecked(&vcc->stats->tx);
22386 }
22387
22388 atomic_dec(&scq->used);
22389 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22390 if ((sb = dev_alloc_skb(64)) == NULL) {
22391 printk("%s: Can't allocate buffers for aal0.\n",
22392 card->name);
22393 - atomic_add(i, &vcc->stats->rx_drop);
22394 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22395 break;
22396 }
22397 if (!atm_charge(vcc, sb->truesize)) {
22398 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22399 card->name);
22400 - atomic_add(i - 1, &vcc->stats->rx_drop);
22401 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22402 dev_kfree_skb(sb);
22403 break;
22404 }
22405 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22406 ATM_SKB(sb)->vcc = vcc;
22407 __net_timestamp(sb);
22408 vcc->push(vcc, sb);
22409 - atomic_inc(&vcc->stats->rx);
22410 + atomic_inc_unchecked(&vcc->stats->rx);
22411
22412 cell += ATM_CELL_PAYLOAD;
22413 }
22414 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22415 "(CDC: %08x)\n",
22416 card->name, len, rpp->len, readl(SAR_REG_CDC));
22417 recycle_rx_pool_skb(card, rpp);
22418 - atomic_inc(&vcc->stats->rx_err);
22419 + atomic_inc_unchecked(&vcc->stats->rx_err);
22420 return;
22421 }
22422 if (stat & SAR_RSQE_CRC) {
22423 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22424 recycle_rx_pool_skb(card, rpp);
22425 - atomic_inc(&vcc->stats->rx_err);
22426 + atomic_inc_unchecked(&vcc->stats->rx_err);
22427 return;
22428 }
22429 if (skb_queue_len(&rpp->queue) > 1) {
22430 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22431 RXPRINTK("%s: Can't alloc RX skb.\n",
22432 card->name);
22433 recycle_rx_pool_skb(card, rpp);
22434 - atomic_inc(&vcc->stats->rx_err);
22435 + atomic_inc_unchecked(&vcc->stats->rx_err);
22436 return;
22437 }
22438 if (!atm_charge(vcc, skb->truesize)) {
22439 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22440 __net_timestamp(skb);
22441
22442 vcc->push(vcc, skb);
22443 - atomic_inc(&vcc->stats->rx);
22444 + atomic_inc_unchecked(&vcc->stats->rx);
22445
22446 return;
22447 }
22448 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22449 __net_timestamp(skb);
22450
22451 vcc->push(vcc, skb);
22452 - atomic_inc(&vcc->stats->rx);
22453 + atomic_inc_unchecked(&vcc->stats->rx);
22454
22455 if (skb->truesize > SAR_FB_SIZE_3)
22456 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22457 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22458 if (vcc->qos.aal != ATM_AAL0) {
22459 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22460 card->name, vpi, vci);
22461 - atomic_inc(&vcc->stats->rx_drop);
22462 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22463 goto drop;
22464 }
22465
22466 if ((sb = dev_alloc_skb(64)) == NULL) {
22467 printk("%s: Can't allocate buffers for AAL0.\n",
22468 card->name);
22469 - atomic_inc(&vcc->stats->rx_err);
22470 + atomic_inc_unchecked(&vcc->stats->rx_err);
22471 goto drop;
22472 }
22473
22474 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22475 ATM_SKB(sb)->vcc = vcc;
22476 __net_timestamp(sb);
22477 vcc->push(vcc, sb);
22478 - atomic_inc(&vcc->stats->rx);
22479 + atomic_inc_unchecked(&vcc->stats->rx);
22480
22481 drop:
22482 skb_pull(queue, 64);
22483 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22484
22485 if (vc == NULL) {
22486 printk("%s: NULL connection in send().\n", card->name);
22487 - atomic_inc(&vcc->stats->tx_err);
22488 + atomic_inc_unchecked(&vcc->stats->tx_err);
22489 dev_kfree_skb(skb);
22490 return -EINVAL;
22491 }
22492 if (!test_bit(VCF_TX, &vc->flags)) {
22493 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22494 - atomic_inc(&vcc->stats->tx_err);
22495 + atomic_inc_unchecked(&vcc->stats->tx_err);
22496 dev_kfree_skb(skb);
22497 return -EINVAL;
22498 }
22499 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22500 break;
22501 default:
22502 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22503 - atomic_inc(&vcc->stats->tx_err);
22504 + atomic_inc_unchecked(&vcc->stats->tx_err);
22505 dev_kfree_skb(skb);
22506 return -EINVAL;
22507 }
22508
22509 if (skb_shinfo(skb)->nr_frags != 0) {
22510 printk("%s: No scatter-gather yet.\n", card->name);
22511 - atomic_inc(&vcc->stats->tx_err);
22512 + atomic_inc_unchecked(&vcc->stats->tx_err);
22513 dev_kfree_skb(skb);
22514 return -EINVAL;
22515 }
22516 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22517
22518 err = queue_skb(card, vc, skb, oam);
22519 if (err) {
22520 - atomic_inc(&vcc->stats->tx_err);
22521 + atomic_inc_unchecked(&vcc->stats->tx_err);
22522 dev_kfree_skb(skb);
22523 return err;
22524 }
22525 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22526 skb = dev_alloc_skb(64);
22527 if (!skb) {
22528 printk("%s: Out of memory in send_oam().\n", card->name);
22529 - atomic_inc(&vcc->stats->tx_err);
22530 + atomic_inc_unchecked(&vcc->stats->tx_err);
22531 return -ENOMEM;
22532 }
22533 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22534 diff -urNp linux-3.0.3/drivers/atm/iphase.c linux-3.0.3/drivers/atm/iphase.c
22535 --- linux-3.0.3/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
22536 +++ linux-3.0.3/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
22537 @@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
22538 status = (u_short) (buf_desc_ptr->desc_mode);
22539 if (status & (RX_CER | RX_PTE | RX_OFL))
22540 {
22541 - atomic_inc(&vcc->stats->rx_err);
22542 + atomic_inc_unchecked(&vcc->stats->rx_err);
22543 IF_ERR(printk("IA: bad packet, dropping it");)
22544 if (status & RX_CER) {
22545 IF_ERR(printk(" cause: packet CRC error\n");)
22546 @@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
22547 len = dma_addr - buf_addr;
22548 if (len > iadev->rx_buf_sz) {
22549 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22550 - atomic_inc(&vcc->stats->rx_err);
22551 + atomic_inc_unchecked(&vcc->stats->rx_err);
22552 goto out_free_desc;
22553 }
22554
22555 @@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
22556 ia_vcc = INPH_IA_VCC(vcc);
22557 if (ia_vcc == NULL)
22558 {
22559 - atomic_inc(&vcc->stats->rx_err);
22560 + atomic_inc_unchecked(&vcc->stats->rx_err);
22561 dev_kfree_skb_any(skb);
22562 atm_return(vcc, atm_guess_pdu2truesize(len));
22563 goto INCR_DLE;
22564 @@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
22565 if ((length > iadev->rx_buf_sz) || (length >
22566 (skb->len - sizeof(struct cpcs_trailer))))
22567 {
22568 - atomic_inc(&vcc->stats->rx_err);
22569 + atomic_inc_unchecked(&vcc->stats->rx_err);
22570 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22571 length, skb->len);)
22572 dev_kfree_skb_any(skb);
22573 @@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
22574
22575 IF_RX(printk("rx_dle_intr: skb push");)
22576 vcc->push(vcc,skb);
22577 - atomic_inc(&vcc->stats->rx);
22578 + atomic_inc_unchecked(&vcc->stats->rx);
22579 iadev->rx_pkt_cnt++;
22580 }
22581 INCR_DLE:
22582 @@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
22583 {
22584 struct k_sonet_stats *stats;
22585 stats = &PRIV(_ia_dev[board])->sonet_stats;
22586 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22587 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22588 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22589 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22590 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22591 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22592 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22593 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22594 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22595 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22596 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22597 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22598 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22599 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22600 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22601 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22602 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22603 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22604 }
22605 ia_cmds.status = 0;
22606 break;
22607 @@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22608 if ((desc == 0) || (desc > iadev->num_tx_desc))
22609 {
22610 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22611 - atomic_inc(&vcc->stats->tx);
22612 + atomic_inc_unchecked(&vcc->stats->tx);
22613 if (vcc->pop)
22614 vcc->pop(vcc, skb);
22615 else
22616 @@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22617 ATM_DESC(skb) = vcc->vci;
22618 skb_queue_tail(&iadev->tx_dma_q, skb);
22619
22620 - atomic_inc(&vcc->stats->tx);
22621 + atomic_inc_unchecked(&vcc->stats->tx);
22622 iadev->tx_pkt_cnt++;
22623 /* Increment transaction counter */
22624 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22625
22626 #if 0
22627 /* add flow control logic */
22628 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22629 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22630 if (iavcc->vc_desc_cnt > 10) {
22631 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22632 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22633 diff -urNp linux-3.0.3/drivers/atm/lanai.c linux-3.0.3/drivers/atm/lanai.c
22634 --- linux-3.0.3/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
22635 +++ linux-3.0.3/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
22636 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22637 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22638 lanai_endtx(lanai, lvcc);
22639 lanai_free_skb(lvcc->tx.atmvcc, skb);
22640 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22641 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22642 }
22643
22644 /* Try to fill the buffer - don't call unless there is backlog */
22645 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22646 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22647 __net_timestamp(skb);
22648 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22649 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22650 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22651 out:
22652 lvcc->rx.buf.ptr = end;
22653 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22654 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22655 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22656 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22657 lanai->stats.service_rxnotaal5++;
22658 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22659 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22660 return 0;
22661 }
22662 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22663 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22664 int bytes;
22665 read_unlock(&vcc_sklist_lock);
22666 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22667 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22668 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22669 lvcc->stats.x.aal5.service_trash++;
22670 bytes = (SERVICE_GET_END(s) * 16) -
22671 (((unsigned long) lvcc->rx.buf.ptr) -
22672 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22673 }
22674 if (s & SERVICE_STREAM) {
22675 read_unlock(&vcc_sklist_lock);
22676 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22677 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22678 lvcc->stats.x.aal5.service_stream++;
22679 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22680 "PDU on VCI %d!\n", lanai->number, vci);
22681 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22682 return 0;
22683 }
22684 DPRINTK("got rx crc error on vci %d\n", vci);
22685 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22686 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22687 lvcc->stats.x.aal5.service_rxcrc++;
22688 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22689 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22690 diff -urNp linux-3.0.3/drivers/atm/nicstar.c linux-3.0.3/drivers/atm/nicstar.c
22691 --- linux-3.0.3/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
22692 +++ linux-3.0.3/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
22693 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22694 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22695 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22696 card->index);
22697 - atomic_inc(&vcc->stats->tx_err);
22698 + atomic_inc_unchecked(&vcc->stats->tx_err);
22699 dev_kfree_skb_any(skb);
22700 return -EINVAL;
22701 }
22702 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22703 if (!vc->tx) {
22704 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22705 card->index);
22706 - atomic_inc(&vcc->stats->tx_err);
22707 + atomic_inc_unchecked(&vcc->stats->tx_err);
22708 dev_kfree_skb_any(skb);
22709 return -EINVAL;
22710 }
22711 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22712 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22713 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22714 card->index);
22715 - atomic_inc(&vcc->stats->tx_err);
22716 + atomic_inc_unchecked(&vcc->stats->tx_err);
22717 dev_kfree_skb_any(skb);
22718 return -EINVAL;
22719 }
22720
22721 if (skb_shinfo(skb)->nr_frags != 0) {
22722 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22723 - atomic_inc(&vcc->stats->tx_err);
22724 + atomic_inc_unchecked(&vcc->stats->tx_err);
22725 dev_kfree_skb_any(skb);
22726 return -EINVAL;
22727 }
22728 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22729 }
22730
22731 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22732 - atomic_inc(&vcc->stats->tx_err);
22733 + atomic_inc_unchecked(&vcc->stats->tx_err);
22734 dev_kfree_skb_any(skb);
22735 return -EIO;
22736 }
22737 - atomic_inc(&vcc->stats->tx);
22738 + atomic_inc_unchecked(&vcc->stats->tx);
22739
22740 return 0;
22741 }
22742 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22743 printk
22744 ("nicstar%d: Can't allocate buffers for aal0.\n",
22745 card->index);
22746 - atomic_add(i, &vcc->stats->rx_drop);
22747 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22748 break;
22749 }
22750 if (!atm_charge(vcc, sb->truesize)) {
22751 RXPRINTK
22752 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22753 card->index);
22754 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22755 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22756 dev_kfree_skb_any(sb);
22757 break;
22758 }
22759 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22760 ATM_SKB(sb)->vcc = vcc;
22761 __net_timestamp(sb);
22762 vcc->push(vcc, sb);
22763 - atomic_inc(&vcc->stats->rx);
22764 + atomic_inc_unchecked(&vcc->stats->rx);
22765 cell += ATM_CELL_PAYLOAD;
22766 }
22767
22768 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22769 if (iovb == NULL) {
22770 printk("nicstar%d: Out of iovec buffers.\n",
22771 card->index);
22772 - atomic_inc(&vcc->stats->rx_drop);
22773 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22774 recycle_rx_buf(card, skb);
22775 return;
22776 }
22777 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22778 small or large buffer itself. */
22779 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22780 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22781 - atomic_inc(&vcc->stats->rx_err);
22782 + atomic_inc_unchecked(&vcc->stats->rx_err);
22783 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22784 NS_MAX_IOVECS);
22785 NS_PRV_IOVCNT(iovb) = 0;
22786 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22787 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22788 card->index);
22789 which_list(card, skb);
22790 - atomic_inc(&vcc->stats->rx_err);
22791 + atomic_inc_unchecked(&vcc->stats->rx_err);
22792 recycle_rx_buf(card, skb);
22793 vc->rx_iov = NULL;
22794 recycle_iov_buf(card, iovb);
22795 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22796 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22797 card->index);
22798 which_list(card, skb);
22799 - atomic_inc(&vcc->stats->rx_err);
22800 + atomic_inc_unchecked(&vcc->stats->rx_err);
22801 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22802 NS_PRV_IOVCNT(iovb));
22803 vc->rx_iov = NULL;
22804 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22805 printk(" - PDU size mismatch.\n");
22806 else
22807 printk(".\n");
22808 - atomic_inc(&vcc->stats->rx_err);
22809 + atomic_inc_unchecked(&vcc->stats->rx_err);
22810 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22811 NS_PRV_IOVCNT(iovb));
22812 vc->rx_iov = NULL;
22813 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22814 /* skb points to a small buffer */
22815 if (!atm_charge(vcc, skb->truesize)) {
22816 push_rxbufs(card, skb);
22817 - atomic_inc(&vcc->stats->rx_drop);
22818 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22819 } else {
22820 skb_put(skb, len);
22821 dequeue_sm_buf(card, skb);
22822 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22823 ATM_SKB(skb)->vcc = vcc;
22824 __net_timestamp(skb);
22825 vcc->push(vcc, skb);
22826 - atomic_inc(&vcc->stats->rx);
22827 + atomic_inc_unchecked(&vcc->stats->rx);
22828 }
22829 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22830 struct sk_buff *sb;
22831 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22832 if (len <= NS_SMBUFSIZE) {
22833 if (!atm_charge(vcc, sb->truesize)) {
22834 push_rxbufs(card, sb);
22835 - atomic_inc(&vcc->stats->rx_drop);
22836 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22837 } else {
22838 skb_put(sb, len);
22839 dequeue_sm_buf(card, sb);
22840 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
22841 ATM_SKB(sb)->vcc = vcc;
22842 __net_timestamp(sb);
22843 vcc->push(vcc, sb);
22844 - atomic_inc(&vcc->stats->rx);
22845 + atomic_inc_unchecked(&vcc->stats->rx);
22846 }
22847
22848 push_rxbufs(card, skb);
22849 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
22850
22851 if (!atm_charge(vcc, skb->truesize)) {
22852 push_rxbufs(card, skb);
22853 - atomic_inc(&vcc->stats->rx_drop);
22854 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22855 } else {
22856 dequeue_lg_buf(card, skb);
22857 #ifdef NS_USE_DESTRUCTORS
22858 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
22859 ATM_SKB(skb)->vcc = vcc;
22860 __net_timestamp(skb);
22861 vcc->push(vcc, skb);
22862 - atomic_inc(&vcc->stats->rx);
22863 + atomic_inc_unchecked(&vcc->stats->rx);
22864 }
22865
22866 push_rxbufs(card, sb);
22867 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
22868 printk
22869 ("nicstar%d: Out of huge buffers.\n",
22870 card->index);
22871 - atomic_inc(&vcc->stats->rx_drop);
22872 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22873 recycle_iovec_rx_bufs(card,
22874 (struct iovec *)
22875 iovb->data,
22876 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
22877 card->hbpool.count++;
22878 } else
22879 dev_kfree_skb_any(hb);
22880 - atomic_inc(&vcc->stats->rx_drop);
22881 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22882 } else {
22883 /* Copy the small buffer to the huge buffer */
22884 sb = (struct sk_buff *)iov->iov_base;
22885 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
22886 #endif /* NS_USE_DESTRUCTORS */
22887 __net_timestamp(hb);
22888 vcc->push(vcc, hb);
22889 - atomic_inc(&vcc->stats->rx);
22890 + atomic_inc_unchecked(&vcc->stats->rx);
22891 }
22892 }
22893
22894 diff -urNp linux-3.0.3/drivers/atm/solos-pci.c linux-3.0.3/drivers/atm/solos-pci.c
22895 --- linux-3.0.3/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
22896 +++ linux-3.0.3/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
22897 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
22898 }
22899 atm_charge(vcc, skb->truesize);
22900 vcc->push(vcc, skb);
22901 - atomic_inc(&vcc->stats->rx);
22902 + atomic_inc_unchecked(&vcc->stats->rx);
22903 break;
22904
22905 case PKT_STATUS:
22906 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
22907 char msg[500];
22908 char item[10];
22909
22910 + pax_track_stack();
22911 +
22912 len = buf->len;
22913 for (i = 0; i < len; i++){
22914 if(i % 8 == 0)
22915 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
22916 vcc = SKB_CB(oldskb)->vcc;
22917
22918 if (vcc) {
22919 - atomic_inc(&vcc->stats->tx);
22920 + atomic_inc_unchecked(&vcc->stats->tx);
22921 solos_pop(vcc, oldskb);
22922 } else
22923 dev_kfree_skb_irq(oldskb);
22924 diff -urNp linux-3.0.3/drivers/atm/suni.c linux-3.0.3/drivers/atm/suni.c
22925 --- linux-3.0.3/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
22926 +++ linux-3.0.3/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
22927 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
22928
22929
22930 #define ADD_LIMITED(s,v) \
22931 - atomic_add((v),&stats->s); \
22932 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
22933 + atomic_add_unchecked((v),&stats->s); \
22934 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
22935
22936
22937 static void suni_hz(unsigned long from_timer)
22938 diff -urNp linux-3.0.3/drivers/atm/uPD98402.c linux-3.0.3/drivers/atm/uPD98402.c
22939 --- linux-3.0.3/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
22940 +++ linux-3.0.3/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
22941 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
22942 struct sonet_stats tmp;
22943 int error = 0;
22944
22945 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22946 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
22947 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
22948 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
22949 if (zero && !error) {
22950 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
22951
22952
22953 #define ADD_LIMITED(s,v) \
22954 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
22955 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
22956 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22957 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
22958 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
22959 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
22960
22961
22962 static void stat_event(struct atm_dev *dev)
22963 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
22964 if (reason & uPD98402_INT_PFM) stat_event(dev);
22965 if (reason & uPD98402_INT_PCO) {
22966 (void) GET(PCOCR); /* clear interrupt cause */
22967 - atomic_add(GET(HECCT),
22968 + atomic_add_unchecked(GET(HECCT),
22969 &PRIV(dev)->sonet_stats.uncorr_hcs);
22970 }
22971 if ((reason & uPD98402_INT_RFO) &&
22972 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
22973 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
22974 uPD98402_INT_LOS),PIMR); /* enable them */
22975 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
22976 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22977 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
22978 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
22979 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
22980 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
22981 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
22982 return 0;
22983 }
22984
22985 diff -urNp linux-3.0.3/drivers/atm/zatm.c linux-3.0.3/drivers/atm/zatm.c
22986 --- linux-3.0.3/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
22987 +++ linux-3.0.3/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
22988 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
22989 }
22990 if (!size) {
22991 dev_kfree_skb_irq(skb);
22992 - if (vcc) atomic_inc(&vcc->stats->rx_err);
22993 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
22994 continue;
22995 }
22996 if (!atm_charge(vcc,skb->truesize)) {
22997 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
22998 skb->len = size;
22999 ATM_SKB(skb)->vcc = vcc;
23000 vcc->push(vcc,skb);
23001 - atomic_inc(&vcc->stats->rx);
23002 + atomic_inc_unchecked(&vcc->stats->rx);
23003 }
23004 zout(pos & 0xffff,MTA(mbx));
23005 #if 0 /* probably a stupid idea */
23006 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23007 skb_queue_head(&zatm_vcc->backlog,skb);
23008 break;
23009 }
23010 - atomic_inc(&vcc->stats->tx);
23011 + atomic_inc_unchecked(&vcc->stats->tx);
23012 wake_up(&zatm_vcc->tx_wait);
23013 }
23014
23015 diff -urNp linux-3.0.3/drivers/base/power/wakeup.c linux-3.0.3/drivers/base/power/wakeup.c
23016 --- linux-3.0.3/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23017 +++ linux-3.0.3/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23018 @@ -29,14 +29,14 @@ bool events_check_enabled;
23019 * They need to be modified together atomically, so it's better to use one
23020 * atomic variable to hold them both.
23021 */
23022 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23023 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23024
23025 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23026 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23027
23028 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23029 {
23030 - unsigned int comb = atomic_read(&combined_event_count);
23031 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
23032
23033 *cnt = (comb >> IN_PROGRESS_BITS);
23034 *inpr = comb & MAX_IN_PROGRESS;
23035 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23036 ws->last_time = ktime_get();
23037
23038 /* Increment the counter of events in progress. */
23039 - atomic_inc(&combined_event_count);
23040 + atomic_inc_unchecked(&combined_event_count);
23041 }
23042
23043 /**
23044 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23045 * Increment the counter of registered wakeup events and decrement the
23046 * couter of wakeup events in progress simultaneously.
23047 */
23048 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23049 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23050 }
23051
23052 /**
23053 diff -urNp linux-3.0.3/drivers/block/cciss.c linux-3.0.3/drivers/block/cciss.c
23054 --- linux-3.0.3/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23055 +++ linux-3.0.3/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23056 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23057 int err;
23058 u32 cp;
23059
23060 + memset(&arg64, 0, sizeof(arg64));
23061 +
23062 err = 0;
23063 err |=
23064 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23065 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23066 while (!list_empty(&h->reqQ)) {
23067 c = list_entry(h->reqQ.next, CommandList_struct, list);
23068 /* can't do anything if fifo is full */
23069 - if ((h->access.fifo_full(h))) {
23070 + if ((h->access->fifo_full(h))) {
23071 dev_warn(&h->pdev->dev, "fifo full\n");
23072 break;
23073 }
23074 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23075 h->Qdepth--;
23076
23077 /* Tell the controller execute command */
23078 - h->access.submit_command(h, c);
23079 + h->access->submit_command(h, c);
23080
23081 /* Put job onto the completed Q */
23082 addQ(&h->cmpQ, c);
23083 @@ -3422,17 +3424,17 @@ startio:
23084
23085 static inline unsigned long get_next_completion(ctlr_info_t *h)
23086 {
23087 - return h->access.command_completed(h);
23088 + return h->access->command_completed(h);
23089 }
23090
23091 static inline int interrupt_pending(ctlr_info_t *h)
23092 {
23093 - return h->access.intr_pending(h);
23094 + return h->access->intr_pending(h);
23095 }
23096
23097 static inline long interrupt_not_for_us(ctlr_info_t *h)
23098 {
23099 - return ((h->access.intr_pending(h) == 0) ||
23100 + return ((h->access->intr_pending(h) == 0) ||
23101 (h->interrupts_enabled == 0));
23102 }
23103
23104 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23105 u32 a;
23106
23107 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23108 - return h->access.command_completed(h);
23109 + return h->access->command_completed(h);
23110
23111 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23112 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23113 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23114 trans_support & CFGTBL_Trans_use_short_tags);
23115
23116 /* Change the access methods to the performant access methods */
23117 - h->access = SA5_performant_access;
23118 + h->access = &SA5_performant_access;
23119 h->transMethod = CFGTBL_Trans_Performant;
23120
23121 return;
23122 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23123 if (prod_index < 0)
23124 return -ENODEV;
23125 h->product_name = products[prod_index].product_name;
23126 - h->access = *(products[prod_index].access);
23127 + h->access = products[prod_index].access;
23128
23129 if (cciss_board_disabled(h)) {
23130 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23131 @@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23132 }
23133
23134 /* make sure the board interrupts are off */
23135 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23136 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23137 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23138 if (rc)
23139 goto clean2;
23140 @@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23141 * fake ones to scoop up any residual completions.
23142 */
23143 spin_lock_irqsave(&h->lock, flags);
23144 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23145 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23146 spin_unlock_irqrestore(&h->lock, flags);
23147 free_irq(h->intr[PERF_MODE_INT], h);
23148 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23149 @@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23150 dev_info(&h->pdev->dev, "Board READY.\n");
23151 dev_info(&h->pdev->dev,
23152 "Waiting for stale completions to drain.\n");
23153 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23154 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23155 msleep(10000);
23156 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23157 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23158
23159 rc = controller_reset_failed(h->cfgtable);
23160 if (rc)
23161 @@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23162 cciss_scsi_setup(h);
23163
23164 /* Turn the interrupts on so we can service requests */
23165 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23166 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23167
23168 /* Get the firmware version */
23169 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23170 @@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23171 kfree(flush_buf);
23172 if (return_code != IO_OK)
23173 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23174 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23175 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23176 free_irq(h->intr[PERF_MODE_INT], h);
23177 }
23178
23179 diff -urNp linux-3.0.3/drivers/block/cciss.h linux-3.0.3/drivers/block/cciss.h
23180 --- linux-3.0.3/drivers/block/cciss.h 2011-08-23 21:44:40.000000000 -0400
23181 +++ linux-3.0.3/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23182 @@ -100,7 +100,7 @@ struct ctlr_info
23183 /* information about each logical volume */
23184 drive_info_struct *drv[CISS_MAX_LUN];
23185
23186 - struct access_method access;
23187 + struct access_method *access;
23188
23189 /* queue and queue Info */
23190 struct list_head reqQ;
23191 diff -urNp linux-3.0.3/drivers/block/cpqarray.c linux-3.0.3/drivers/block/cpqarray.c
23192 --- linux-3.0.3/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23193 +++ linux-3.0.3/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23194 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23195 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23196 goto Enomem4;
23197 }
23198 - hba[i]->access.set_intr_mask(hba[i], 0);
23199 + hba[i]->access->set_intr_mask(hba[i], 0);
23200 if (request_irq(hba[i]->intr, do_ida_intr,
23201 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23202 {
23203 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23204 add_timer(&hba[i]->timer);
23205
23206 /* Enable IRQ now that spinlock and rate limit timer are set up */
23207 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23208 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23209
23210 for(j=0; j<NWD; j++) {
23211 struct gendisk *disk = ida_gendisk[i][j];
23212 @@ -694,7 +694,7 @@ DBGINFO(
23213 for(i=0; i<NR_PRODUCTS; i++) {
23214 if (board_id == products[i].board_id) {
23215 c->product_name = products[i].product_name;
23216 - c->access = *(products[i].access);
23217 + c->access = products[i].access;
23218 break;
23219 }
23220 }
23221 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23222 hba[ctlr]->intr = intr;
23223 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23224 hba[ctlr]->product_name = products[j].product_name;
23225 - hba[ctlr]->access = *(products[j].access);
23226 + hba[ctlr]->access = products[j].access;
23227 hba[ctlr]->ctlr = ctlr;
23228 hba[ctlr]->board_id = board_id;
23229 hba[ctlr]->pci_dev = NULL; /* not PCI */
23230 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23231 struct scatterlist tmp_sg[SG_MAX];
23232 int i, dir, seg;
23233
23234 + pax_track_stack();
23235 +
23236 queue_next:
23237 creq = blk_peek_request(q);
23238 if (!creq)
23239 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23240
23241 while((c = h->reqQ) != NULL) {
23242 /* Can't do anything if we're busy */
23243 - if (h->access.fifo_full(h) == 0)
23244 + if (h->access->fifo_full(h) == 0)
23245 return;
23246
23247 /* Get the first entry from the request Q */
23248 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23249 h->Qdepth--;
23250
23251 /* Tell the controller to do our bidding */
23252 - h->access.submit_command(h, c);
23253 + h->access->submit_command(h, c);
23254
23255 /* Get onto the completion Q */
23256 addQ(&h->cmpQ, c);
23257 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23258 unsigned long flags;
23259 __u32 a,a1;
23260
23261 - istat = h->access.intr_pending(h);
23262 + istat = h->access->intr_pending(h);
23263 /* Is this interrupt for us? */
23264 if (istat == 0)
23265 return IRQ_NONE;
23266 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23267 */
23268 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23269 if (istat & FIFO_NOT_EMPTY) {
23270 - while((a = h->access.command_completed(h))) {
23271 + while((a = h->access->command_completed(h))) {
23272 a1 = a; a &= ~3;
23273 if ((c = h->cmpQ) == NULL)
23274 {
23275 @@ -1449,11 +1451,11 @@ static int sendcmd(
23276 /*
23277 * Disable interrupt
23278 */
23279 - info_p->access.set_intr_mask(info_p, 0);
23280 + info_p->access->set_intr_mask(info_p, 0);
23281 /* Make sure there is room in the command FIFO */
23282 /* Actually it should be completely empty at this time. */
23283 for (i = 200000; i > 0; i--) {
23284 - temp = info_p->access.fifo_full(info_p);
23285 + temp = info_p->access->fifo_full(info_p);
23286 if (temp != 0) {
23287 break;
23288 }
23289 @@ -1466,7 +1468,7 @@ DBG(
23290 /*
23291 * Send the cmd
23292 */
23293 - info_p->access.submit_command(info_p, c);
23294 + info_p->access->submit_command(info_p, c);
23295 complete = pollcomplete(ctlr);
23296
23297 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23298 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23299 * we check the new geometry. Then turn interrupts back on when
23300 * we're done.
23301 */
23302 - host->access.set_intr_mask(host, 0);
23303 + host->access->set_intr_mask(host, 0);
23304 getgeometry(ctlr);
23305 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23306 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23307
23308 for(i=0; i<NWD; i++) {
23309 struct gendisk *disk = ida_gendisk[ctlr][i];
23310 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23311 /* Wait (up to 2 seconds) for a command to complete */
23312
23313 for (i = 200000; i > 0; i--) {
23314 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23315 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23316 if (done == 0) {
23317 udelay(10); /* a short fixed delay */
23318 } else
23319 diff -urNp linux-3.0.3/drivers/block/cpqarray.h linux-3.0.3/drivers/block/cpqarray.h
23320 --- linux-3.0.3/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23321 +++ linux-3.0.3/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23322 @@ -99,7 +99,7 @@ struct ctlr_info {
23323 drv_info_t drv[NWD];
23324 struct proc_dir_entry *proc;
23325
23326 - struct access_method access;
23327 + struct access_method *access;
23328
23329 cmdlist_t *reqQ;
23330 cmdlist_t *cmpQ;
23331 diff -urNp linux-3.0.3/drivers/block/DAC960.c linux-3.0.3/drivers/block/DAC960.c
23332 --- linux-3.0.3/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23333 +++ linux-3.0.3/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23334 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23335 unsigned long flags;
23336 int Channel, TargetID;
23337
23338 + pax_track_stack();
23339 +
23340 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23341 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23342 sizeof(DAC960_SCSI_Inquiry_T) +
23343 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_int.h linux-3.0.3/drivers/block/drbd/drbd_int.h
23344 --- linux-3.0.3/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23345 +++ linux-3.0.3/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23346 @@ -737,7 +737,7 @@ struct drbd_request;
23347 struct drbd_epoch {
23348 struct list_head list;
23349 unsigned int barrier_nr;
23350 - atomic_t epoch_size; /* increased on every request added. */
23351 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23352 atomic_t active; /* increased on every req. added, and dec on every finished. */
23353 unsigned long flags;
23354 };
23355 @@ -1109,7 +1109,7 @@ struct drbd_conf {
23356 void *int_dig_in;
23357 void *int_dig_vv;
23358 wait_queue_head_t seq_wait;
23359 - atomic_t packet_seq;
23360 + atomic_unchecked_t packet_seq;
23361 unsigned int peer_seq;
23362 spinlock_t peer_seq_lock;
23363 unsigned int minor;
23364 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_main.c linux-3.0.3/drivers/block/drbd/drbd_main.c
23365 --- linux-3.0.3/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23366 +++ linux-3.0.3/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23367 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23368 p.sector = sector;
23369 p.block_id = block_id;
23370 p.blksize = blksize;
23371 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23372 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23373
23374 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23375 return false;
23376 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23377 p.sector = cpu_to_be64(req->sector);
23378 p.block_id = (unsigned long)req;
23379 p.seq_num = cpu_to_be32(req->seq_num =
23380 - atomic_add_return(1, &mdev->packet_seq));
23381 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23382
23383 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23384
23385 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23386 atomic_set(&mdev->unacked_cnt, 0);
23387 atomic_set(&mdev->local_cnt, 0);
23388 atomic_set(&mdev->net_cnt, 0);
23389 - atomic_set(&mdev->packet_seq, 0);
23390 + atomic_set_unchecked(&mdev->packet_seq, 0);
23391 atomic_set(&mdev->pp_in_use, 0);
23392 atomic_set(&mdev->pp_in_use_by_net, 0);
23393 atomic_set(&mdev->rs_sect_in, 0);
23394 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23395 mdev->receiver.t_state);
23396
23397 /* no need to lock it, I'm the only thread alive */
23398 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23399 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23400 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23401 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23402 mdev->al_writ_cnt =
23403 mdev->bm_writ_cnt =
23404 mdev->read_cnt =
23405 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_nl.c linux-3.0.3/drivers/block/drbd/drbd_nl.c
23406 --- linux-3.0.3/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23407 +++ linux-3.0.3/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23408 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23409 module_put(THIS_MODULE);
23410 }
23411
23412 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23413 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23414
23415 static unsigned short *
23416 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23417 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23418 cn_reply->id.idx = CN_IDX_DRBD;
23419 cn_reply->id.val = CN_VAL_DRBD;
23420
23421 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23422 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23423 cn_reply->ack = 0; /* not used here. */
23424 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23425 (int)((char *)tl - (char *)reply->tag_list);
23426 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23427 cn_reply->id.idx = CN_IDX_DRBD;
23428 cn_reply->id.val = CN_VAL_DRBD;
23429
23430 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23431 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23432 cn_reply->ack = 0; /* not used here. */
23433 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23434 (int)((char *)tl - (char *)reply->tag_list);
23435 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23436 cn_reply->id.idx = CN_IDX_DRBD;
23437 cn_reply->id.val = CN_VAL_DRBD;
23438
23439 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23440 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23441 cn_reply->ack = 0; // not used here.
23442 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23443 (int)((char*)tl - (char*)reply->tag_list);
23444 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
23445 cn_reply->id.idx = CN_IDX_DRBD;
23446 cn_reply->id.val = CN_VAL_DRBD;
23447
23448 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23449 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23450 cn_reply->ack = 0; /* not used here. */
23451 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23452 (int)((char *)tl - (char *)reply->tag_list);
23453 diff -urNp linux-3.0.3/drivers/block/drbd/drbd_receiver.c linux-3.0.3/drivers/block/drbd/drbd_receiver.c
23454 --- linux-3.0.3/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
23455 +++ linux-3.0.3/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
23456 @@ -894,7 +894,7 @@ retry:
23457 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23458 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23459
23460 - atomic_set(&mdev->packet_seq, 0);
23461 + atomic_set_unchecked(&mdev->packet_seq, 0);
23462 mdev->peer_seq = 0;
23463
23464 drbd_thread_start(&mdev->asender);
23465 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
23466 do {
23467 next_epoch = NULL;
23468
23469 - epoch_size = atomic_read(&epoch->epoch_size);
23470 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23471
23472 switch (ev & ~EV_CLEANUP) {
23473 case EV_PUT:
23474 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
23475 rv = FE_DESTROYED;
23476 } else {
23477 epoch->flags = 0;
23478 - atomic_set(&epoch->epoch_size, 0);
23479 + atomic_set_unchecked(&epoch->epoch_size, 0);
23480 /* atomic_set(&epoch->active, 0); is already zero */
23481 if (rv == FE_STILL_LIVE)
23482 rv = FE_RECYCLED;
23483 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
23484 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23485 drbd_flush(mdev);
23486
23487 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23488 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23489 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23490 if (epoch)
23491 break;
23492 }
23493
23494 epoch = mdev->current_epoch;
23495 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23496 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23497
23498 D_ASSERT(atomic_read(&epoch->active) == 0);
23499 D_ASSERT(epoch->flags == 0);
23500 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
23501 }
23502
23503 epoch->flags = 0;
23504 - atomic_set(&epoch->epoch_size, 0);
23505 + atomic_set_unchecked(&epoch->epoch_size, 0);
23506 atomic_set(&epoch->active, 0);
23507
23508 spin_lock(&mdev->epoch_lock);
23509 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23510 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23511 list_add(&epoch->list, &mdev->current_epoch->list);
23512 mdev->current_epoch = epoch;
23513 mdev->epochs++;
23514 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
23515 spin_unlock(&mdev->peer_seq_lock);
23516
23517 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23518 - atomic_inc(&mdev->current_epoch->epoch_size);
23519 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23520 return drbd_drain_block(mdev, data_size);
23521 }
23522
23523 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
23524
23525 spin_lock(&mdev->epoch_lock);
23526 e->epoch = mdev->current_epoch;
23527 - atomic_inc(&e->epoch->epoch_size);
23528 + atomic_inc_unchecked(&e->epoch->epoch_size);
23529 atomic_inc(&e->epoch->active);
23530 spin_unlock(&mdev->epoch_lock);
23531
23532 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
23533 D_ASSERT(list_empty(&mdev->done_ee));
23534
23535 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23536 - atomic_set(&mdev->current_epoch->epoch_size, 0);
23537 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23538 D_ASSERT(list_empty(&mdev->current_epoch->list));
23539 }
23540
23541 diff -urNp linux-3.0.3/drivers/block/nbd.c linux-3.0.3/drivers/block/nbd.c
23542 --- linux-3.0.3/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
23543 +++ linux-3.0.3/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
23544 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23545 struct kvec iov;
23546 sigset_t blocked, oldset;
23547
23548 + pax_track_stack();
23549 +
23550 if (unlikely(!sock)) {
23551 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23552 lo->disk->disk_name, (send ? "send" : "recv"));
23553 @@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
23554 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23555 unsigned int cmd, unsigned long arg)
23556 {
23557 + pax_track_stack();
23558 +
23559 switch (cmd) {
23560 case NBD_DISCONNECT: {
23561 struct request sreq;
23562 diff -urNp linux-3.0.3/drivers/char/agp/frontend.c linux-3.0.3/drivers/char/agp/frontend.c
23563 --- linux-3.0.3/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
23564 +++ linux-3.0.3/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
23565 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23566 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23567 return -EFAULT;
23568
23569 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23570 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23571 return -EFAULT;
23572
23573 client = agp_find_client_by_pid(reserve.pid);
23574 diff -urNp linux-3.0.3/drivers/char/briq_panel.c linux-3.0.3/drivers/char/briq_panel.c
23575 --- linux-3.0.3/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
23576 +++ linux-3.0.3/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
23577 @@ -9,6 +9,7 @@
23578 #include <linux/types.h>
23579 #include <linux/errno.h>
23580 #include <linux/tty.h>
23581 +#include <linux/mutex.h>
23582 #include <linux/timer.h>
23583 #include <linux/kernel.h>
23584 #include <linux/wait.h>
23585 @@ -34,6 +35,7 @@ static int vfd_is_open;
23586 static unsigned char vfd[40];
23587 static int vfd_cursor;
23588 static unsigned char ledpb, led;
23589 +static DEFINE_MUTEX(vfd_mutex);
23590
23591 static void update_vfd(void)
23592 {
23593 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23594 if (!vfd_is_open)
23595 return -EBUSY;
23596
23597 + mutex_lock(&vfd_mutex);
23598 for (;;) {
23599 char c;
23600 if (!indx)
23601 break;
23602 - if (get_user(c, buf))
23603 + if (get_user(c, buf)) {
23604 + mutex_unlock(&vfd_mutex);
23605 return -EFAULT;
23606 + }
23607 if (esc) {
23608 set_led(c);
23609 esc = 0;
23610 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23611 buf++;
23612 }
23613 update_vfd();
23614 + mutex_unlock(&vfd_mutex);
23615
23616 return len;
23617 }
23618 diff -urNp linux-3.0.3/drivers/char/genrtc.c linux-3.0.3/drivers/char/genrtc.c
23619 --- linux-3.0.3/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
23620 +++ linux-3.0.3/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
23621 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23622 switch (cmd) {
23623
23624 case RTC_PLL_GET:
23625 + memset(&pll, 0, sizeof(pll));
23626 if (get_rtc_pll(&pll))
23627 return -EINVAL;
23628 else
23629 diff -urNp linux-3.0.3/drivers/char/hpet.c linux-3.0.3/drivers/char/hpet.c
23630 --- linux-3.0.3/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
23631 +++ linux-3.0.3/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
23632 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
23633 }
23634
23635 static int
23636 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23637 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23638 struct hpet_info *info)
23639 {
23640 struct hpet_timer __iomem *timer;
23641 diff -urNp linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c
23642 --- linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
23643 +++ linux-3.0.3/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
23644 @@ -415,7 +415,7 @@ struct ipmi_smi {
23645 struct proc_dir_entry *proc_dir;
23646 char proc_dir_name[10];
23647
23648 - atomic_t stats[IPMI_NUM_STATS];
23649 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23650
23651 /*
23652 * run_to_completion duplicate of smb_info, smi_info
23653 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23654
23655
23656 #define ipmi_inc_stat(intf, stat) \
23657 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23658 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23659 #define ipmi_get_stat(intf, stat) \
23660 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23661 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23662
23663 static int is_lan_addr(struct ipmi_addr *addr)
23664 {
23665 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23666 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23667 init_waitqueue_head(&intf->waitq);
23668 for (i = 0; i < IPMI_NUM_STATS; i++)
23669 - atomic_set(&intf->stats[i], 0);
23670 + atomic_set_unchecked(&intf->stats[i], 0);
23671
23672 intf->proc_dir = NULL;
23673
23674 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
23675 struct ipmi_smi_msg smi_msg;
23676 struct ipmi_recv_msg recv_msg;
23677
23678 + pax_track_stack();
23679 +
23680 si = (struct ipmi_system_interface_addr *) &addr;
23681 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23682 si->channel = IPMI_BMC_CHANNEL;
23683 diff -urNp linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c
23684 --- linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
23685 +++ linux-3.0.3/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
23686 @@ -277,7 +277,7 @@ struct smi_info {
23687 unsigned char slave_addr;
23688
23689 /* Counters and things for the proc filesystem. */
23690 - atomic_t stats[SI_NUM_STATS];
23691 + atomic_unchecked_t stats[SI_NUM_STATS];
23692
23693 struct task_struct *thread;
23694
23695 @@ -286,9 +286,9 @@ struct smi_info {
23696 };
23697
23698 #define smi_inc_stat(smi, stat) \
23699 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23700 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23701 #define smi_get_stat(smi, stat) \
23702 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23703 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23704
23705 #define SI_MAX_PARMS 4
23706
23707 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
23708 atomic_set(&new_smi->req_events, 0);
23709 new_smi->run_to_completion = 0;
23710 for (i = 0; i < SI_NUM_STATS; i++)
23711 - atomic_set(&new_smi->stats[i], 0);
23712 + atomic_set_unchecked(&new_smi->stats[i], 0);
23713
23714 new_smi->interrupt_disabled = 1;
23715 atomic_set(&new_smi->stop_operation, 0);
23716 diff -urNp linux-3.0.3/drivers/char/Kconfig linux-3.0.3/drivers/char/Kconfig
23717 --- linux-3.0.3/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
23718 +++ linux-3.0.3/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
23719 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23720
23721 config DEVKMEM
23722 bool "/dev/kmem virtual device support"
23723 - default y
23724 + default n
23725 + depends on !GRKERNSEC_KMEM
23726 help
23727 Say Y here if you want to support the /dev/kmem device. The
23728 /dev/kmem device is rarely used, but can be used for certain
23729 @@ -596,6 +597,7 @@ config DEVPORT
23730 bool
23731 depends on !M68K
23732 depends on ISA || PCI
23733 + depends on !GRKERNSEC_KMEM
23734 default y
23735
23736 source "drivers/s390/char/Kconfig"
23737 diff -urNp linux-3.0.3/drivers/char/mem.c linux-3.0.3/drivers/char/mem.c
23738 --- linux-3.0.3/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
23739 +++ linux-3.0.3/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
23740 @@ -18,6 +18,7 @@
23741 #include <linux/raw.h>
23742 #include <linux/tty.h>
23743 #include <linux/capability.h>
23744 +#include <linux/security.h>
23745 #include <linux/ptrace.h>
23746 #include <linux/device.h>
23747 #include <linux/highmem.h>
23748 @@ -34,6 +35,10 @@
23749 # include <linux/efi.h>
23750 #endif
23751
23752 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23753 +extern struct file_operations grsec_fops;
23754 +#endif
23755 +
23756 static inline unsigned long size_inside_page(unsigned long start,
23757 unsigned long size)
23758 {
23759 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23760
23761 while (cursor < to) {
23762 if (!devmem_is_allowed(pfn)) {
23763 +#ifdef CONFIG_GRKERNSEC_KMEM
23764 + gr_handle_mem_readwrite(from, to);
23765 +#else
23766 printk(KERN_INFO
23767 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23768 current->comm, from, to);
23769 +#endif
23770 return 0;
23771 }
23772 cursor += PAGE_SIZE;
23773 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23774 }
23775 return 1;
23776 }
23777 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23778 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23779 +{
23780 + return 0;
23781 +}
23782 #else
23783 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23784 {
23785 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23786
23787 while (count > 0) {
23788 unsigned long remaining;
23789 + char *temp;
23790
23791 sz = size_inside_page(p, count);
23792
23793 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23794 if (!ptr)
23795 return -EFAULT;
23796
23797 - remaining = copy_to_user(buf, ptr, sz);
23798 +#ifdef CONFIG_PAX_USERCOPY
23799 + temp = kmalloc(sz, GFP_KERNEL);
23800 + if (!temp) {
23801 + unxlate_dev_mem_ptr(p, ptr);
23802 + return -ENOMEM;
23803 + }
23804 + memcpy(temp, ptr, sz);
23805 +#else
23806 + temp = ptr;
23807 +#endif
23808 +
23809 + remaining = copy_to_user(buf, temp, sz);
23810 +
23811 +#ifdef CONFIG_PAX_USERCOPY
23812 + kfree(temp);
23813 +#endif
23814 +
23815 unxlate_dev_mem_ptr(p, ptr);
23816 if (remaining)
23817 return -EFAULT;
23818 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23819 size_t count, loff_t *ppos)
23820 {
23821 unsigned long p = *ppos;
23822 - ssize_t low_count, read, sz;
23823 + ssize_t low_count, read, sz, err = 0;
23824 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23825 - int err = 0;
23826
23827 read = 0;
23828 if (p < (unsigned long) high_memory) {
23829 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23830 }
23831 #endif
23832 while (low_count > 0) {
23833 + char *temp;
23834 +
23835 sz = size_inside_page(p, low_count);
23836
23837 /*
23838 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23839 */
23840 kbuf = xlate_dev_kmem_ptr((char *)p);
23841
23842 - if (copy_to_user(buf, kbuf, sz))
23843 +#ifdef CONFIG_PAX_USERCOPY
23844 + temp = kmalloc(sz, GFP_KERNEL);
23845 + if (!temp)
23846 + return -ENOMEM;
23847 + memcpy(temp, kbuf, sz);
23848 +#else
23849 + temp = kbuf;
23850 +#endif
23851 +
23852 + err = copy_to_user(buf, temp, sz);
23853 +
23854 +#ifdef CONFIG_PAX_USERCOPY
23855 + kfree(temp);
23856 +#endif
23857 +
23858 + if (err)
23859 return -EFAULT;
23860 buf += sz;
23861 p += sz;
23862 @@ -866,6 +913,9 @@ static const struct memdev {
23863 #ifdef CONFIG_CRASH_DUMP
23864 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23865 #endif
23866 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23867 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23868 +#endif
23869 };
23870
23871 static int memory_open(struct inode *inode, struct file *filp)
23872 diff -urNp linux-3.0.3/drivers/char/nvram.c linux-3.0.3/drivers/char/nvram.c
23873 --- linux-3.0.3/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
23874 +++ linux-3.0.3/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
23875 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
23876
23877 spin_unlock_irq(&rtc_lock);
23878
23879 - if (copy_to_user(buf, contents, tmp - contents))
23880 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23881 return -EFAULT;
23882
23883 *ppos = i;
23884 diff -urNp linux-3.0.3/drivers/char/random.c linux-3.0.3/drivers/char/random.c
23885 --- linux-3.0.3/drivers/char/random.c 2011-08-23 21:44:40.000000000 -0400
23886 +++ linux-3.0.3/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
23887 @@ -261,8 +261,13 @@
23888 /*
23889 * Configuration information
23890 */
23891 +#ifdef CONFIG_GRKERNSEC_RANDNET
23892 +#define INPUT_POOL_WORDS 512
23893 +#define OUTPUT_POOL_WORDS 128
23894 +#else
23895 #define INPUT_POOL_WORDS 128
23896 #define OUTPUT_POOL_WORDS 32
23897 +#endif
23898 #define SEC_XFER_SIZE 512
23899 #define EXTRACT_SIZE 10
23900
23901 @@ -300,10 +305,17 @@ static struct poolinfo {
23902 int poolwords;
23903 int tap1, tap2, tap3, tap4, tap5;
23904 } poolinfo_table[] = {
23905 +#ifdef CONFIG_GRKERNSEC_RANDNET
23906 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
23907 + { 512, 411, 308, 208, 104, 1 },
23908 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
23909 + { 128, 103, 76, 51, 25, 1 },
23910 +#else
23911 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
23912 { 128, 103, 76, 51, 25, 1 },
23913 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
23914 { 32, 26, 20, 14, 7, 1 },
23915 +#endif
23916 #if 0
23917 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
23918 { 2048, 1638, 1231, 819, 411, 1 },
23919 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
23920
23921 extract_buf(r, tmp);
23922 i = min_t(int, nbytes, EXTRACT_SIZE);
23923 - if (copy_to_user(buf, tmp, i)) {
23924 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
23925 ret = -EFAULT;
23926 break;
23927 }
23928 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
23929 #include <linux/sysctl.h>
23930
23931 static int min_read_thresh = 8, min_write_thresh;
23932 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
23933 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
23934 static int max_write_thresh = INPUT_POOL_WORDS * 32;
23935 static char sysctl_bootid[16];
23936
23937 diff -urNp linux-3.0.3/drivers/char/sonypi.c linux-3.0.3/drivers/char/sonypi.c
23938 --- linux-3.0.3/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
23939 +++ linux-3.0.3/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
23940 @@ -55,6 +55,7 @@
23941 #include <asm/uaccess.h>
23942 #include <asm/io.h>
23943 #include <asm/system.h>
23944 +#include <asm/local.h>
23945
23946 #include <linux/sonypi.h>
23947
23948 @@ -491,7 +492,7 @@ static struct sonypi_device {
23949 spinlock_t fifo_lock;
23950 wait_queue_head_t fifo_proc_list;
23951 struct fasync_struct *fifo_async;
23952 - int open_count;
23953 + local_t open_count;
23954 int model;
23955 struct input_dev *input_jog_dev;
23956 struct input_dev *input_key_dev;
23957 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
23958 static int sonypi_misc_release(struct inode *inode, struct file *file)
23959 {
23960 mutex_lock(&sonypi_device.lock);
23961 - sonypi_device.open_count--;
23962 + local_dec(&sonypi_device.open_count);
23963 mutex_unlock(&sonypi_device.lock);
23964 return 0;
23965 }
23966 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
23967 {
23968 mutex_lock(&sonypi_device.lock);
23969 /* Flush input queue on first open */
23970 - if (!sonypi_device.open_count)
23971 + if (!local_read(&sonypi_device.open_count))
23972 kfifo_reset(&sonypi_device.fifo);
23973 - sonypi_device.open_count++;
23974 + local_inc(&sonypi_device.open_count);
23975 mutex_unlock(&sonypi_device.lock);
23976
23977 return 0;
23978 diff -urNp linux-3.0.3/drivers/char/tpm/tpm_bios.c linux-3.0.3/drivers/char/tpm/tpm_bios.c
23979 --- linux-3.0.3/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
23980 +++ linux-3.0.3/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
23981 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
23982 event = addr;
23983
23984 if ((event->event_type == 0 && event->event_size == 0) ||
23985 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
23986 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
23987 return NULL;
23988
23989 return addr;
23990 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
23991 return NULL;
23992
23993 if ((event->event_type == 0 && event->event_size == 0) ||
23994 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
23995 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
23996 return NULL;
23997
23998 (*pos)++;
23999 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24000 int i;
24001
24002 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24003 - seq_putc(m, data[i]);
24004 + if (!seq_putc(m, data[i]))
24005 + return -EFAULT;
24006
24007 return 0;
24008 }
24009 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24010 log->bios_event_log_end = log->bios_event_log + len;
24011
24012 virt = acpi_os_map_memory(start, len);
24013 + if (!virt) {
24014 + kfree(log->bios_event_log);
24015 + log->bios_event_log = NULL;
24016 + return -EFAULT;
24017 + }
24018
24019 memcpy(log->bios_event_log, virt, len);
24020
24021 diff -urNp linux-3.0.3/drivers/char/tpm/tpm.c linux-3.0.3/drivers/char/tpm/tpm.c
24022 --- linux-3.0.3/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24023 +++ linux-3.0.3/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24024 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24025 chip->vendor.req_complete_val)
24026 goto out_recv;
24027
24028 - if ((status == chip->vendor.req_canceled)) {
24029 + if (status == chip->vendor.req_canceled) {
24030 dev_err(chip->dev, "Operation Canceled\n");
24031 rc = -ECANCELED;
24032 goto out;
24033 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24034
24035 struct tpm_chip *chip = dev_get_drvdata(dev);
24036
24037 + pax_track_stack();
24038 +
24039 tpm_cmd.header.in = tpm_readpubek_header;
24040 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24041 "attempting to read the PUBEK");
24042 diff -urNp linux-3.0.3/drivers/crypto/hifn_795x.c linux-3.0.3/drivers/crypto/hifn_795x.c
24043 --- linux-3.0.3/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24044 +++ linux-3.0.3/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24045 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24046 0xCA, 0x34, 0x2B, 0x2E};
24047 struct scatterlist sg;
24048
24049 + pax_track_stack();
24050 +
24051 memset(src, 0, sizeof(src));
24052 memset(ctx.key, 0, sizeof(ctx.key));
24053
24054 diff -urNp linux-3.0.3/drivers/crypto/padlock-aes.c linux-3.0.3/drivers/crypto/padlock-aes.c
24055 --- linux-3.0.3/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24056 +++ linux-3.0.3/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24057 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24058 struct crypto_aes_ctx gen_aes;
24059 int cpu;
24060
24061 + pax_track_stack();
24062 +
24063 if (key_len % 8) {
24064 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24065 return -EINVAL;
24066 diff -urNp linux-3.0.3/drivers/edac/edac_pci_sysfs.c linux-3.0.3/drivers/edac/edac_pci_sysfs.c
24067 --- linux-3.0.3/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24068 +++ linux-3.0.3/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24069 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24070 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24071 static int edac_pci_poll_msec = 1000; /* one second workq period */
24072
24073 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24074 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24075 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24076 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24077
24078 static struct kobject *edac_pci_top_main_kobj;
24079 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24080 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24081 edac_printk(KERN_CRIT, EDAC_PCI,
24082 "Signaled System Error on %s\n",
24083 pci_name(dev));
24084 - atomic_inc(&pci_nonparity_count);
24085 + atomic_inc_unchecked(&pci_nonparity_count);
24086 }
24087
24088 if (status & (PCI_STATUS_PARITY)) {
24089 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24090 "Master Data Parity Error on %s\n",
24091 pci_name(dev));
24092
24093 - atomic_inc(&pci_parity_count);
24094 + atomic_inc_unchecked(&pci_parity_count);
24095 }
24096
24097 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24098 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24099 "Detected Parity Error on %s\n",
24100 pci_name(dev));
24101
24102 - atomic_inc(&pci_parity_count);
24103 + atomic_inc_unchecked(&pci_parity_count);
24104 }
24105 }
24106
24107 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24108 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24109 "Signaled System Error on %s\n",
24110 pci_name(dev));
24111 - atomic_inc(&pci_nonparity_count);
24112 + atomic_inc_unchecked(&pci_nonparity_count);
24113 }
24114
24115 if (status & (PCI_STATUS_PARITY)) {
24116 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24117 "Master Data Parity Error on "
24118 "%s\n", pci_name(dev));
24119
24120 - atomic_inc(&pci_parity_count);
24121 + atomic_inc_unchecked(&pci_parity_count);
24122 }
24123
24124 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24125 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24126 "Detected Parity Error on %s\n",
24127 pci_name(dev));
24128
24129 - atomic_inc(&pci_parity_count);
24130 + atomic_inc_unchecked(&pci_parity_count);
24131 }
24132 }
24133 }
24134 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24135 if (!check_pci_errors)
24136 return;
24137
24138 - before_count = atomic_read(&pci_parity_count);
24139 + before_count = atomic_read_unchecked(&pci_parity_count);
24140
24141 /* scan all PCI devices looking for a Parity Error on devices and
24142 * bridges.
24143 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24144 /* Only if operator has selected panic on PCI Error */
24145 if (edac_pci_get_panic_on_pe()) {
24146 /* If the count is different 'after' from 'before' */
24147 - if (before_count != atomic_read(&pci_parity_count))
24148 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24149 panic("EDAC: PCI Parity Error");
24150 }
24151 }
24152 diff -urNp linux-3.0.3/drivers/edac/i7core_edac.c linux-3.0.3/drivers/edac/i7core_edac.c
24153 --- linux-3.0.3/drivers/edac/i7core_edac.c 2011-07-21 22:17:23.000000000 -0400
24154 +++ linux-3.0.3/drivers/edac/i7core_edac.c 2011-08-23 21:47:55.000000000 -0400
24155 @@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24156 char *type, *optype, *err, *msg;
24157 unsigned long error = m->status & 0x1ff0000l;
24158 u32 optypenum = (m->status >> 4) & 0x07;
24159 - u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24160 + u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24161 u32 dimm = (m->misc >> 16) & 0x3;
24162 u32 channel = (m->misc >> 18) & 0x3;
24163 u32 syndrome = m->misc >> 32;
24164 diff -urNp linux-3.0.3/drivers/edac/mce_amd.h linux-3.0.3/drivers/edac/mce_amd.h
24165 --- linux-3.0.3/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24166 +++ linux-3.0.3/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24167 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24168 bool (*dc_mce)(u16, u8);
24169 bool (*ic_mce)(u16, u8);
24170 bool (*nb_mce)(u16, u8);
24171 -};
24172 +} __no_const;
24173
24174 void amd_report_gart_errors(bool);
24175 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24176 diff -urNp linux-3.0.3/drivers/firewire/core-card.c linux-3.0.3/drivers/firewire/core-card.c
24177 --- linux-3.0.3/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24178 +++ linux-3.0.3/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24179 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24180
24181 void fw_core_remove_card(struct fw_card *card)
24182 {
24183 - struct fw_card_driver dummy_driver = dummy_driver_template;
24184 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24185
24186 card->driver->update_phy_reg(card, 4,
24187 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24188 diff -urNp linux-3.0.3/drivers/firewire/core-cdev.c linux-3.0.3/drivers/firewire/core-cdev.c
24189 --- linux-3.0.3/drivers/firewire/core-cdev.c 2011-08-23 21:44:40.000000000 -0400
24190 +++ linux-3.0.3/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24191 @@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24192 int ret;
24193
24194 if ((request->channels == 0 && request->bandwidth == 0) ||
24195 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24196 - request->bandwidth < 0)
24197 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24198 return -EINVAL;
24199
24200 r = kmalloc(sizeof(*r), GFP_KERNEL);
24201 diff -urNp linux-3.0.3/drivers/firewire/core.h linux-3.0.3/drivers/firewire/core.h
24202 --- linux-3.0.3/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24203 +++ linux-3.0.3/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24204 @@ -101,6 +101,7 @@ struct fw_card_driver {
24205
24206 int (*stop_iso)(struct fw_iso_context *ctx);
24207 };
24208 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24209
24210 void fw_card_initialize(struct fw_card *card,
24211 const struct fw_card_driver *driver, struct device *device);
24212 diff -urNp linux-3.0.3/drivers/firewire/core-transaction.c linux-3.0.3/drivers/firewire/core-transaction.c
24213 --- linux-3.0.3/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24214 +++ linux-3.0.3/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24215 @@ -37,6 +37,7 @@
24216 #include <linux/timer.h>
24217 #include <linux/types.h>
24218 #include <linux/workqueue.h>
24219 +#include <linux/sched.h>
24220
24221 #include <asm/byteorder.h>
24222
24223 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24224 struct transaction_callback_data d;
24225 struct fw_transaction t;
24226
24227 + pax_track_stack();
24228 +
24229 init_timer_on_stack(&t.split_timeout_timer);
24230 init_completion(&d.done);
24231 d.payload = payload;
24232 diff -urNp linux-3.0.3/drivers/firmware/dmi_scan.c linux-3.0.3/drivers/firmware/dmi_scan.c
24233 --- linux-3.0.3/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24234 +++ linux-3.0.3/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24235 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24236 }
24237 }
24238 else {
24239 - /*
24240 - * no iounmap() for that ioremap(); it would be a no-op, but
24241 - * it's so early in setup that sucker gets confused into doing
24242 - * what it shouldn't if we actually call it.
24243 - */
24244 p = dmi_ioremap(0xF0000, 0x10000);
24245 if (p == NULL)
24246 goto error;
24247 diff -urNp linux-3.0.3/drivers/gpio/vr41xx_giu.c linux-3.0.3/drivers/gpio/vr41xx_giu.c
24248 --- linux-3.0.3/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24249 +++ linux-3.0.3/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24250 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24251 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24252 maskl, pendl, maskh, pendh);
24253
24254 - atomic_inc(&irq_err_count);
24255 + atomic_inc_unchecked(&irq_err_count);
24256
24257 return -EINVAL;
24258 }
24259 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c
24260 --- linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24261 +++ linux-3.0.3/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24262 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24263 struct drm_crtc *tmp;
24264 int crtc_mask = 1;
24265
24266 - WARN(!crtc, "checking null crtc?\n");
24267 + BUG_ON(!crtc);
24268
24269 dev = crtc->dev;
24270
24271 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24272 struct drm_encoder *encoder;
24273 bool ret = true;
24274
24275 + pax_track_stack();
24276 +
24277 crtc->enabled = drm_helper_crtc_in_use(crtc);
24278 if (!crtc->enabled)
24279 return true;
24280 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_drv.c linux-3.0.3/drivers/gpu/drm/drm_drv.c
24281 --- linux-3.0.3/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24282 +++ linux-3.0.3/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24283 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24284
24285 dev = file_priv->minor->dev;
24286 atomic_inc(&dev->ioctl_count);
24287 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24288 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24289 ++file_priv->ioctl_count;
24290
24291 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24292 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_fops.c linux-3.0.3/drivers/gpu/drm/drm_fops.c
24293 --- linux-3.0.3/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24294 +++ linux-3.0.3/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24295 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24296 }
24297
24298 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24299 - atomic_set(&dev->counts[i], 0);
24300 + atomic_set_unchecked(&dev->counts[i], 0);
24301
24302 dev->sigdata.lock = NULL;
24303
24304 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24305
24306 retcode = drm_open_helper(inode, filp, dev);
24307 if (!retcode) {
24308 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24309 - if (!dev->open_count++)
24310 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24311 + if (local_inc_return(&dev->open_count) == 1)
24312 retcode = drm_setup(dev);
24313 }
24314 if (!retcode) {
24315 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24316
24317 mutex_lock(&drm_global_mutex);
24318
24319 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24320 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24321
24322 if (dev->driver->preclose)
24323 dev->driver->preclose(dev, file_priv);
24324 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24325 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24326 task_pid_nr(current),
24327 (long)old_encode_dev(file_priv->minor->device),
24328 - dev->open_count);
24329 + local_read(&dev->open_count));
24330
24331 /* if the master has gone away we can't do anything with the lock */
24332 if (file_priv->minor->master)
24333 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24334 * End inline drm_release
24335 */
24336
24337 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24338 - if (!--dev->open_count) {
24339 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24340 + if (local_dec_and_test(&dev->open_count)) {
24341 if (atomic_read(&dev->ioctl_count)) {
24342 DRM_ERROR("Device busy: %d\n",
24343 atomic_read(&dev->ioctl_count));
24344 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_global.c linux-3.0.3/drivers/gpu/drm/drm_global.c
24345 --- linux-3.0.3/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24346 +++ linux-3.0.3/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24347 @@ -36,7 +36,7 @@
24348 struct drm_global_item {
24349 struct mutex mutex;
24350 void *object;
24351 - int refcount;
24352 + atomic_t refcount;
24353 };
24354
24355 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24356 @@ -49,7 +49,7 @@ void drm_global_init(void)
24357 struct drm_global_item *item = &glob[i];
24358 mutex_init(&item->mutex);
24359 item->object = NULL;
24360 - item->refcount = 0;
24361 + atomic_set(&item->refcount, 0);
24362 }
24363 }
24364
24365 @@ -59,7 +59,7 @@ void drm_global_release(void)
24366 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24367 struct drm_global_item *item = &glob[i];
24368 BUG_ON(item->object != NULL);
24369 - BUG_ON(item->refcount != 0);
24370 + BUG_ON(atomic_read(&item->refcount) != 0);
24371 }
24372 }
24373
24374 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24375 void *object;
24376
24377 mutex_lock(&item->mutex);
24378 - if (item->refcount == 0) {
24379 + if (atomic_read(&item->refcount) == 0) {
24380 item->object = kzalloc(ref->size, GFP_KERNEL);
24381 if (unlikely(item->object == NULL)) {
24382 ret = -ENOMEM;
24383 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24384 goto out_err;
24385
24386 }
24387 - ++item->refcount;
24388 + atomic_inc(&item->refcount);
24389 ref->object = item->object;
24390 object = item->object;
24391 mutex_unlock(&item->mutex);
24392 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24393 struct drm_global_item *item = &glob[ref->global_type];
24394
24395 mutex_lock(&item->mutex);
24396 - BUG_ON(item->refcount == 0);
24397 + BUG_ON(atomic_read(&item->refcount) == 0);
24398 BUG_ON(ref->object != item->object);
24399 - if (--item->refcount == 0) {
24400 + if (atomic_dec_and_test(&item->refcount)) {
24401 ref->release(ref);
24402 item->object = NULL;
24403 }
24404 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_info.c linux-3.0.3/drivers/gpu/drm/drm_info.c
24405 --- linux-3.0.3/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24406 +++ linux-3.0.3/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24407 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24408 struct drm_local_map *map;
24409 struct drm_map_list *r_list;
24410
24411 - /* Hardcoded from _DRM_FRAME_BUFFER,
24412 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24413 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24414 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24415 + static const char * const types[] = {
24416 + [_DRM_FRAME_BUFFER] = "FB",
24417 + [_DRM_REGISTERS] = "REG",
24418 + [_DRM_SHM] = "SHM",
24419 + [_DRM_AGP] = "AGP",
24420 + [_DRM_SCATTER_GATHER] = "SG",
24421 + [_DRM_CONSISTENT] = "PCI",
24422 + [_DRM_GEM] = "GEM" };
24423 const char *type;
24424 int i;
24425
24426 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24427 map = r_list->map;
24428 if (!map)
24429 continue;
24430 - if (map->type < 0 || map->type > 5)
24431 + if (map->type >= ARRAY_SIZE(types))
24432 type = "??";
24433 else
24434 type = types[map->type];
24435 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24436 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24437 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24438 vma->vm_flags & VM_IO ? 'i' : '-',
24439 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24440 + 0);
24441 +#else
24442 vma->vm_pgoff);
24443 +#endif
24444
24445 #if defined(__i386__)
24446 pgprot = pgprot_val(vma->vm_page_prot);
24447 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_ioctl.c linux-3.0.3/drivers/gpu/drm/drm_ioctl.c
24448 --- linux-3.0.3/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
24449 +++ linux-3.0.3/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
24450 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24451 stats->data[i].value =
24452 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24453 else
24454 - stats->data[i].value = atomic_read(&dev->counts[i]);
24455 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24456 stats->data[i].type = dev->types[i];
24457 }
24458
24459 diff -urNp linux-3.0.3/drivers/gpu/drm/drm_lock.c linux-3.0.3/drivers/gpu/drm/drm_lock.c
24460 --- linux-3.0.3/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
24461 +++ linux-3.0.3/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
24462 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24463 if (drm_lock_take(&master->lock, lock->context)) {
24464 master->lock.file_priv = file_priv;
24465 master->lock.lock_time = jiffies;
24466 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24467 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24468 break; /* Got lock */
24469 }
24470
24471 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24472 return -EINVAL;
24473 }
24474
24475 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24476 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24477
24478 if (drm_lock_free(&master->lock, lock->context)) {
24479 /* FIXME: Should really bail out here. */
24480 diff -urNp linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c
24481 --- linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
24482 +++ linux-3.0.3/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
24483 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24484 dma->buflist[vertex->idx],
24485 vertex->discard, vertex->used);
24486
24487 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24488 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24489 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24490 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24491 sarea_priv->last_enqueue = dev_priv->counter - 1;
24492 sarea_priv->last_dispatch = (int)hw_status[5];
24493
24494 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24495 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24496 mc->last_render);
24497
24498 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24499 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24500 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24501 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24502 sarea_priv->last_enqueue = dev_priv->counter - 1;
24503 sarea_priv->last_dispatch = (int)hw_status[5];
24504
24505 diff -urNp linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h
24506 --- linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
24507 +++ linux-3.0.3/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
24508 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24509 int page_flipping;
24510
24511 wait_queue_head_t irq_queue;
24512 - atomic_t irq_received;
24513 - atomic_t irq_emitted;
24514 + atomic_unchecked_t irq_received;
24515 + atomic_unchecked_t irq_emitted;
24516
24517 int front_offset;
24518 } drm_i810_private_t;
24519 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c
24520 --- linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
24521 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
24522 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
24523 I915_READ(GTIMR));
24524 }
24525 seq_printf(m, "Interrupts received: %d\n",
24526 - atomic_read(&dev_priv->irq_received));
24527 + atomic_read_unchecked(&dev_priv->irq_received));
24528 for (i = 0; i < I915_NUM_RINGS; i++) {
24529 if (IS_GEN6(dev)) {
24530 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24531 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c
24532 --- linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:44:40.000000000 -0400
24533 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
24534 @@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
24535 bool can_switch;
24536
24537 spin_lock(&dev->count_lock);
24538 - can_switch = (dev->open_count == 0);
24539 + can_switch = (local_read(&dev->open_count) == 0);
24540 spin_unlock(&dev->count_lock);
24541 return can_switch;
24542 }
24543 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h
24544 --- linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
24545 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
24546 @@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
24547 /* render clock increase/decrease */
24548 /* display clock increase/decrease */
24549 /* pll clock increase/decrease */
24550 -};
24551 +} __no_const;
24552
24553 struct intel_device_info {
24554 u8 gen;
24555 @@ -300,7 +300,7 @@ typedef struct drm_i915_private {
24556 int current_page;
24557 int page_flipping;
24558
24559 - atomic_t irq_received;
24560 + atomic_unchecked_t irq_received;
24561
24562 /* protects the irq masks */
24563 spinlock_t irq_lock;
24564 @@ -874,7 +874,7 @@ struct drm_i915_gem_object {
24565 * will be page flipped away on the next vblank. When it
24566 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24567 */
24568 - atomic_t pending_flip;
24569 + atomic_unchecked_t pending_flip;
24570 };
24571
24572 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24573 @@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
24574 extern void intel_teardown_gmbus(struct drm_device *dev);
24575 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24576 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24577 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24578 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24579 {
24580 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24581 }
24582 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24583 --- linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
24584 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
24585 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
24586 i915_gem_clflush_object(obj);
24587
24588 if (obj->base.pending_write_domain)
24589 - cd->flips |= atomic_read(&obj->pending_flip);
24590 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24591
24592 /* The actual obj->write_domain will be updated with
24593 * pending_write_domain after we emit the accumulated flush for all
24594 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c
24595 --- linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:44:40.000000000 -0400
24596 +++ linux-3.0.3/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
24597 @@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
24598 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
24599 struct drm_i915_master_private *master_priv;
24600
24601 - atomic_inc(&dev_priv->irq_received);
24602 + atomic_inc_unchecked(&dev_priv->irq_received);
24603
24604 /* disable master interrupt before clearing iir */
24605 de_ier = I915_READ(DEIER);
24606 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
24607 struct drm_i915_master_private *master_priv;
24608 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
24609
24610 - atomic_inc(&dev_priv->irq_received);
24611 + atomic_inc_unchecked(&dev_priv->irq_received);
24612
24613 if (IS_GEN6(dev))
24614 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
24615 @@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
24616 int ret = IRQ_NONE, pipe;
24617 bool blc_event = false;
24618
24619 - atomic_inc(&dev_priv->irq_received);
24620 + atomic_inc_unchecked(&dev_priv->irq_received);
24621
24622 iir = I915_READ(IIR);
24623
24624 @@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
24625 {
24626 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24627
24628 - atomic_set(&dev_priv->irq_received, 0);
24629 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24630
24631 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24632 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24633 @@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
24634 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24635 int pipe;
24636
24637 - atomic_set(&dev_priv->irq_received, 0);
24638 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24639
24640 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24641 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24642 diff -urNp linux-3.0.3/drivers/gpu/drm/i915/intel_display.c linux-3.0.3/drivers/gpu/drm/i915/intel_display.c
24643 --- linux-3.0.3/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:44:40.000000000 -0400
24644 +++ linux-3.0.3/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
24645 @@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24646
24647 wait_event(dev_priv->pending_flip_queue,
24648 atomic_read(&dev_priv->mm.wedged) ||
24649 - atomic_read(&obj->pending_flip) == 0);
24650 + atomic_read_unchecked(&obj->pending_flip) == 0);
24651
24652 /* Big Hammer, we also need to ensure that any pending
24653 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24654 @@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
24655 obj = to_intel_framebuffer(crtc->fb)->obj;
24656 dev_priv = crtc->dev->dev_private;
24657 wait_event(dev_priv->pending_flip_queue,
24658 - atomic_read(&obj->pending_flip) == 0);
24659 + atomic_read_unchecked(&obj->pending_flip) == 0);
24660 }
24661
24662 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24663 @@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
24664
24665 atomic_clear_mask(1 << intel_crtc->plane,
24666 &obj->pending_flip.counter);
24667 - if (atomic_read(&obj->pending_flip) == 0)
24668 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
24669 wake_up(&dev_priv->pending_flip_queue);
24670
24671 schedule_work(&work->work);
24672 @@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
24673 /* Block clients from rendering to the new back buffer until
24674 * the flip occurs and the object is no longer visible.
24675 */
24676 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24677 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24678
24679 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
24680 if (ret)
24681 @@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
24682 return 0;
24683
24684 cleanup_pending:
24685 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24686 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24687 cleanup_objs:
24688 drm_gem_object_unreference(&work->old_fb_obj->base);
24689 drm_gem_object_unreference(&obj->base);
24690 diff -urNp linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h
24691 --- linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
24692 +++ linux-3.0.3/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
24693 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24694 u32 clear_cmd;
24695 u32 maccess;
24696
24697 - atomic_t vbl_received; /**< Number of vblanks received. */
24698 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24699 wait_queue_head_t fence_queue;
24700 - atomic_t last_fence_retired;
24701 + atomic_unchecked_t last_fence_retired;
24702 u32 next_fence_to_post;
24703
24704 unsigned int fb_cpp;
24705 diff -urNp linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c
24706 --- linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
24707 +++ linux-3.0.3/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
24708 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24709 if (crtc != 0)
24710 return 0;
24711
24712 - return atomic_read(&dev_priv->vbl_received);
24713 + return atomic_read_unchecked(&dev_priv->vbl_received);
24714 }
24715
24716
24717 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24718 /* VBLANK interrupt */
24719 if (status & MGA_VLINEPEN) {
24720 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24721 - atomic_inc(&dev_priv->vbl_received);
24722 + atomic_inc_unchecked(&dev_priv->vbl_received);
24723 drm_handle_vblank(dev, 0);
24724 handled = 1;
24725 }
24726 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24727 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24728 MGA_WRITE(MGA_PRIMEND, prim_end);
24729
24730 - atomic_inc(&dev_priv->last_fence_retired);
24731 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
24732 DRM_WAKEUP(&dev_priv->fence_queue);
24733 handled = 1;
24734 }
24735 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24736 * using fences.
24737 */
24738 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24739 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24740 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24741 - *sequence) <= (1 << 23)));
24742
24743 *sequence = cur_fence;
24744 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c
24745 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
24746 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-23 21:47:55.000000000 -0400
24747 @@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
24748 struct bit_table {
24749 const char id;
24750 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
24751 -};
24752 +} __no_const;
24753
24754 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
24755
24756 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h
24757 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
24758 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
24759 @@ -227,7 +227,7 @@ struct nouveau_channel {
24760 struct list_head pending;
24761 uint32_t sequence;
24762 uint32_t sequence_ack;
24763 - atomic_t last_sequence_irq;
24764 + atomic_unchecked_t last_sequence_irq;
24765 } fence;
24766
24767 /* DMA push buffer */
24768 @@ -304,7 +304,7 @@ struct nouveau_exec_engine {
24769 u32 handle, u16 class);
24770 void (*set_tile_region)(struct drm_device *dev, int i);
24771 void (*tlb_flush)(struct drm_device *, int engine);
24772 -};
24773 +} __no_const;
24774
24775 struct nouveau_instmem_engine {
24776 void *priv;
24777 @@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
24778 struct nouveau_mc_engine {
24779 int (*init)(struct drm_device *dev);
24780 void (*takedown)(struct drm_device *dev);
24781 -};
24782 +} __no_const;
24783
24784 struct nouveau_timer_engine {
24785 int (*init)(struct drm_device *dev);
24786 void (*takedown)(struct drm_device *dev);
24787 uint64_t (*read)(struct drm_device *dev);
24788 -};
24789 +} __no_const;
24790
24791 struct nouveau_fb_engine {
24792 int num_tiles;
24793 @@ -494,7 +494,7 @@ struct nouveau_vram_engine {
24794 void (*put)(struct drm_device *, struct nouveau_mem **);
24795
24796 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24797 -};
24798 +} __no_const;
24799
24800 struct nouveau_engine {
24801 struct nouveau_instmem_engine instmem;
24802 @@ -640,7 +640,7 @@ struct drm_nouveau_private {
24803 struct drm_global_reference mem_global_ref;
24804 struct ttm_bo_global_ref bo_global_ref;
24805 struct ttm_bo_device bdev;
24806 - atomic_t validate_sequence;
24807 + atomic_unchecked_t validate_sequence;
24808 } ttm;
24809
24810 struct {
24811 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c
24812 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
24813 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
24814 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24815 if (USE_REFCNT(dev))
24816 sequence = nvchan_rd32(chan, 0x48);
24817 else
24818 - sequence = atomic_read(&chan->fence.last_sequence_irq);
24819 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24820
24821 if (chan->fence.sequence_ack == sequence)
24822 goto out;
24823 @@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
24824
24825 INIT_LIST_HEAD(&chan->fence.pending);
24826 spin_lock_init(&chan->fence.lock);
24827 - atomic_set(&chan->fence.last_sequence_irq, 0);
24828 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24829 return 0;
24830 }
24831
24832 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c
24833 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
24834 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
24835 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24836 int trycnt = 0;
24837 int ret, i;
24838
24839 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24840 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24841 retry:
24842 if (++trycnt > 100000) {
24843 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24844 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c
24845 --- linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
24846 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
24847 @@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
24848 bool can_switch;
24849
24850 spin_lock(&dev->count_lock);
24851 - can_switch = (dev->open_count == 0);
24852 + can_switch = (local_read(&dev->open_count) == 0);
24853 spin_unlock(&dev->count_lock);
24854 return can_switch;
24855 }
24856 diff -urNp linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c
24857 --- linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
24858 +++ linux-3.0.3/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
24859 @@ -560,7 +560,7 @@ static int
24860 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24861 u32 class, u32 mthd, u32 data)
24862 {
24863 - atomic_set(&chan->fence.last_sequence_irq, data);
24864 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24865 return 0;
24866 }
24867
24868 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c
24869 --- linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
24870 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
24871 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24872
24873 /* GH: Simple idle check.
24874 */
24875 - atomic_set(&dev_priv->idle_count, 0);
24876 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24877
24878 /* We don't support anything other than bus-mastering ring mode,
24879 * but the ring can be in either AGP or PCI space for the ring
24880 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h
24881 --- linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
24882 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
24883 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24884 int is_pci;
24885 unsigned long cce_buffers_offset;
24886
24887 - atomic_t idle_count;
24888 + atomic_unchecked_t idle_count;
24889
24890 int page_flipping;
24891 int current_page;
24892 u32 crtc_offset;
24893 u32 crtc_offset_cntl;
24894
24895 - atomic_t vbl_received;
24896 + atomic_unchecked_t vbl_received;
24897
24898 u32 color_fmt;
24899 unsigned int front_offset;
24900 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c
24901 --- linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
24902 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
24903 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24904 if (crtc != 0)
24905 return 0;
24906
24907 - return atomic_read(&dev_priv->vbl_received);
24908 + return atomic_read_unchecked(&dev_priv->vbl_received);
24909 }
24910
24911 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24912 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24913 /* VBLANK interrupt */
24914 if (status & R128_CRTC_VBLANK_INT) {
24915 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24916 - atomic_inc(&dev_priv->vbl_received);
24917 + atomic_inc_unchecked(&dev_priv->vbl_received);
24918 drm_handle_vblank(dev, 0);
24919 return IRQ_HANDLED;
24920 }
24921 diff -urNp linux-3.0.3/drivers/gpu/drm/r128/r128_state.c linux-3.0.3/drivers/gpu/drm/r128/r128_state.c
24922 --- linux-3.0.3/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
24923 +++ linux-3.0.3/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
24924 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
24925
24926 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
24927 {
24928 - if (atomic_read(&dev_priv->idle_count) == 0)
24929 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
24930 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
24931 else
24932 - atomic_set(&dev_priv->idle_count, 0);
24933 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24934 }
24935
24936 #endif
24937 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/atom.c linux-3.0.3/drivers/gpu/drm/radeon/atom.c
24938 --- linux-3.0.3/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
24939 +++ linux-3.0.3/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
24940 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
24941 char name[512];
24942 int i;
24943
24944 + pax_track_stack();
24945 +
24946 ctx->card = card;
24947 ctx->bios = bios;
24948
24949 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c
24950 --- linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
24951 +++ linux-3.0.3/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
24952 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
24953 regex_t mask_rex;
24954 regmatch_t match[4];
24955 char buf[1024];
24956 - size_t end;
24957 + long end;
24958 int len;
24959 int done = 0;
24960 int r;
24961 unsigned o;
24962 struct offset *offset;
24963 char last_reg_s[10];
24964 - int last_reg;
24965 + unsigned long last_reg;
24966
24967 if (regcomp
24968 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24969 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c
24970 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
24971 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
24972 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
24973 struct radeon_gpio_rec gpio;
24974 struct radeon_hpd hpd;
24975
24976 + pax_track_stack();
24977 +
24978 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
24979 return false;
24980
24981 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c
24982 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:44:40.000000000 -0400
24983 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
24984 @@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
24985 bool can_switch;
24986
24987 spin_lock(&dev->count_lock);
24988 - can_switch = (dev->open_count == 0);
24989 + can_switch = (local_read(&dev->open_count) == 0);
24990 spin_unlock(&dev->count_lock);
24991 return can_switch;
24992 }
24993 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c
24994 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:44:40.000000000 -0400
24995 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
24996 @@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
24997 uint32_t post_div;
24998 u32 pll_out_min, pll_out_max;
24999
25000 + pax_track_stack();
25001 +
25002 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25003 freq = freq * 1000;
25004
25005 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h
25006 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
25007 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
25008 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25009
25010 /* SW interrupt */
25011 wait_queue_head_t swi_queue;
25012 - atomic_t swi_emitted;
25013 + atomic_unchecked_t swi_emitted;
25014 int vblank_crtc;
25015 uint32_t irq_enable_reg;
25016 uint32_t r500_disp_irq_reg;
25017 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c
25018 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
25019 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
25020 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25021 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25022 return 0;
25023 }
25024 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25025 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25026 if (!rdev->cp.ready)
25027 /* FIXME: cp is not running assume everythings is done right
25028 * away
25029 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25030 return r;
25031 }
25032 radeon_fence_write(rdev, 0);
25033 - atomic_set(&rdev->fence_drv.seq, 0);
25034 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25035 INIT_LIST_HEAD(&rdev->fence_drv.created);
25036 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25037 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25038 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon.h linux-3.0.3/drivers/gpu/drm/radeon/radeon.h
25039 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25040 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25041 @@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25042 */
25043 struct radeon_fence_driver {
25044 uint32_t scratch_reg;
25045 - atomic_t seq;
25046 + atomic_unchecked_t seq;
25047 uint32_t last_seq;
25048 unsigned long last_jiffies;
25049 unsigned long last_timeout;
25050 @@ -960,7 +960,7 @@ struct radeon_asic {
25051 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25052 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25053 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25054 -};
25055 +} __no_const;
25056
25057 /*
25058 * Asic structures
25059 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c
25060 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25061 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
25062 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25063 request = compat_alloc_user_space(sizeof(*request));
25064 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25065 || __put_user(req32.param, &request->param)
25066 - || __put_user((void __user *)(unsigned long)req32.value,
25067 + || __put_user((unsigned long)req32.value,
25068 &request->value))
25069 return -EFAULT;
25070
25071 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c
25072 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
25073 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
25074 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25075 unsigned int ret;
25076 RING_LOCALS;
25077
25078 - atomic_inc(&dev_priv->swi_emitted);
25079 - ret = atomic_read(&dev_priv->swi_emitted);
25080 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25081 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25082
25083 BEGIN_RING(4);
25084 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25085 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25086 drm_radeon_private_t *dev_priv =
25087 (drm_radeon_private_t *) dev->dev_private;
25088
25089 - atomic_set(&dev_priv->swi_emitted, 0);
25090 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25091 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25092
25093 dev->max_vblank_count = 0x001fffff;
25094 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c
25095 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25096 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25097 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25098 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25099 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25100
25101 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25102 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25103 sarea_priv->nbox * sizeof(depth_boxes[0])))
25104 return -EFAULT;
25105
25106 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25107 {
25108 drm_radeon_private_t *dev_priv = dev->dev_private;
25109 drm_radeon_getparam_t *param = data;
25110 - int value;
25111 + int value = 0;
25112
25113 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25114
25115 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c
25116 --- linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25117 +++ linux-3.0.3/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25118 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25119 }
25120 if (unlikely(ttm_vm_ops == NULL)) {
25121 ttm_vm_ops = vma->vm_ops;
25122 - radeon_ttm_vm_ops = *ttm_vm_ops;
25123 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25124 + pax_open_kernel();
25125 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25126 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25127 + pax_close_kernel();
25128 }
25129 vma->vm_ops = &radeon_ttm_vm_ops;
25130 return 0;
25131 diff -urNp linux-3.0.3/drivers/gpu/drm/radeon/rs690.c linux-3.0.3/drivers/gpu/drm/radeon/rs690.c
25132 --- linux-3.0.3/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25133 +++ linux-3.0.3/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25134 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25135 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25136 rdev->pm.sideport_bandwidth.full)
25137 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25138 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25139 + read_delay_latency.full = dfixed_const(800 * 1000);
25140 read_delay_latency.full = dfixed_div(read_delay_latency,
25141 rdev->pm.igp_sideport_mclk);
25142 + a.full = dfixed_const(370);
25143 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25144 } else {
25145 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25146 rdev->pm.k8_bandwidth.full)
25147 diff -urNp linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c
25148 --- linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25149 +++ linux-3.0.3/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25150 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25151 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25152 struct shrink_control *sc)
25153 {
25154 - static atomic_t start_pool = ATOMIC_INIT(0);
25155 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25156 unsigned i;
25157 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25158 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25159 struct ttm_page_pool *pool;
25160 int shrink_pages = sc->nr_to_scan;
25161
25162 diff -urNp linux-3.0.3/drivers/gpu/drm/via/via_drv.h linux-3.0.3/drivers/gpu/drm/via/via_drv.h
25163 --- linux-3.0.3/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25164 +++ linux-3.0.3/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25165 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25166 typedef uint32_t maskarray_t[5];
25167
25168 typedef struct drm_via_irq {
25169 - atomic_t irq_received;
25170 + atomic_unchecked_t irq_received;
25171 uint32_t pending_mask;
25172 uint32_t enable_mask;
25173 wait_queue_head_t irq_queue;
25174 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25175 struct timeval last_vblank;
25176 int last_vblank_valid;
25177 unsigned usec_per_vblank;
25178 - atomic_t vbl_received;
25179 + atomic_unchecked_t vbl_received;
25180 drm_via_state_t hc_state;
25181 char pci_buf[VIA_PCI_BUF_SIZE];
25182 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25183 diff -urNp linux-3.0.3/drivers/gpu/drm/via/via_irq.c linux-3.0.3/drivers/gpu/drm/via/via_irq.c
25184 --- linux-3.0.3/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25185 +++ linux-3.0.3/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25186 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25187 if (crtc != 0)
25188 return 0;
25189
25190 - return atomic_read(&dev_priv->vbl_received);
25191 + return atomic_read_unchecked(&dev_priv->vbl_received);
25192 }
25193
25194 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25195 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25196
25197 status = VIA_READ(VIA_REG_INTERRUPT);
25198 if (status & VIA_IRQ_VBLANK_PENDING) {
25199 - atomic_inc(&dev_priv->vbl_received);
25200 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25201 + atomic_inc_unchecked(&dev_priv->vbl_received);
25202 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25203 do_gettimeofday(&cur_vblank);
25204 if (dev_priv->last_vblank_valid) {
25205 dev_priv->usec_per_vblank =
25206 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25207 dev_priv->last_vblank = cur_vblank;
25208 dev_priv->last_vblank_valid = 1;
25209 }
25210 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25211 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25212 DRM_DEBUG("US per vblank is: %u\n",
25213 dev_priv->usec_per_vblank);
25214 }
25215 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25216
25217 for (i = 0; i < dev_priv->num_irqs; ++i) {
25218 if (status & cur_irq->pending_mask) {
25219 - atomic_inc(&cur_irq->irq_received);
25220 + atomic_inc_unchecked(&cur_irq->irq_received);
25221 DRM_WAKEUP(&cur_irq->irq_queue);
25222 handled = 1;
25223 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25224 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25225 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25226 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25227 masks[irq][4]));
25228 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25229 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25230 } else {
25231 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25232 (((cur_irq_sequence =
25233 - atomic_read(&cur_irq->irq_received)) -
25234 + atomic_read_unchecked(&cur_irq->irq_received)) -
25235 *sequence) <= (1 << 23)));
25236 }
25237 *sequence = cur_irq_sequence;
25238 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25239 }
25240
25241 for (i = 0; i < dev_priv->num_irqs; ++i) {
25242 - atomic_set(&cur_irq->irq_received, 0);
25243 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25244 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25245 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25246 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25247 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25248 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25249 case VIA_IRQ_RELATIVE:
25250 irqwait->request.sequence +=
25251 - atomic_read(&cur_irq->irq_received);
25252 + atomic_read_unchecked(&cur_irq->irq_received);
25253 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25254 case VIA_IRQ_ABSOLUTE:
25255 break;
25256 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25257 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25258 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25259 @@ -240,7 +240,7 @@ struct vmw_private {
25260 * Fencing and IRQs.
25261 */
25262
25263 - atomic_t fence_seq;
25264 + atomic_unchecked_t fence_seq;
25265 wait_queue_head_t fence_queue;
25266 wait_queue_head_t fifo_queue;
25267 atomic_t fence_queue_waiters;
25268 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25269 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25270 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25271 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25272 while (!vmw_lag_lt(queue, us)) {
25273 spin_lock(&queue->lock);
25274 if (list_empty(&queue->head))
25275 - sequence = atomic_read(&dev_priv->fence_seq);
25276 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25277 else {
25278 fence = list_first_entry(&queue->head,
25279 struct vmw_fence, head);
25280 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25281 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25282 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25283 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25284 (unsigned int) min,
25285 (unsigned int) fifo->capabilities);
25286
25287 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25288 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25289 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25290 vmw_fence_queue_init(&fifo->fence_queue);
25291 return vmw_fifo_send_fence(dev_priv, &dummy);
25292 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25293
25294 fm = vmw_fifo_reserve(dev_priv, bytes);
25295 if (unlikely(fm == NULL)) {
25296 - *sequence = atomic_read(&dev_priv->fence_seq);
25297 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25298 ret = -ENOMEM;
25299 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25300 false, 3*HZ);
25301 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25302 }
25303
25304 do {
25305 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25306 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25307 } while (*sequence == 0);
25308
25309 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25310 diff -urNp linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25311 --- linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25312 +++ linux-3.0.3/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25313 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25314 * emitted. Then the fence is stale and signaled.
25315 */
25316
25317 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25318 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25319 > VMW_FENCE_WRAP);
25320
25321 return ret;
25322 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25323
25324 if (fifo_idle)
25325 down_read(&fifo_state->rwsem);
25326 - signal_seq = atomic_read(&dev_priv->fence_seq);
25327 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25328 ret = 0;
25329
25330 for (;;) {
25331 diff -urNp linux-3.0.3/drivers/hid/hid-core.c linux-3.0.3/drivers/hid/hid-core.c
25332 --- linux-3.0.3/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25333 +++ linux-3.0.3/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25334 @@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25335
25336 int hid_add_device(struct hid_device *hdev)
25337 {
25338 - static atomic_t id = ATOMIC_INIT(0);
25339 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25340 int ret;
25341
25342 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25343 @@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25344 /* XXX hack, any other cleaner solution after the driver core
25345 * is converted to allow more than 20 bytes as the device name? */
25346 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25347 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25348 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25349
25350 hid_debug_register(hdev, dev_name(&hdev->dev));
25351 ret = device_add(&hdev->dev);
25352 diff -urNp linux-3.0.3/drivers/hid/usbhid/hiddev.c linux-3.0.3/drivers/hid/usbhid/hiddev.c
25353 --- linux-3.0.3/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25354 +++ linux-3.0.3/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25355 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25356 break;
25357
25358 case HIDIOCAPPLICATION:
25359 - if (arg < 0 || arg >= hid->maxapplication)
25360 + if (arg >= hid->maxapplication)
25361 break;
25362
25363 for (i = 0; i < hid->maxcollection; i++)
25364 diff -urNp linux-3.0.3/drivers/hwmon/acpi_power_meter.c linux-3.0.3/drivers/hwmon/acpi_power_meter.c
25365 --- linux-3.0.3/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25366 +++ linux-3.0.3/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25367 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25368 return res;
25369
25370 temp /= 1000;
25371 - if (temp < 0)
25372 - return -EINVAL;
25373
25374 mutex_lock(&resource->lock);
25375 resource->trip[attr->index - 7] = temp;
25376 diff -urNp linux-3.0.3/drivers/hwmon/sht15.c linux-3.0.3/drivers/hwmon/sht15.c
25377 --- linux-3.0.3/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25378 +++ linux-3.0.3/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25379 @@ -166,7 +166,7 @@ struct sht15_data {
25380 int supply_uV;
25381 bool supply_uV_valid;
25382 struct work_struct update_supply_work;
25383 - atomic_t interrupt_handled;
25384 + atomic_unchecked_t interrupt_handled;
25385 };
25386
25387 /**
25388 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25389 return ret;
25390
25391 gpio_direction_input(data->pdata->gpio_data);
25392 - atomic_set(&data->interrupt_handled, 0);
25393 + atomic_set_unchecked(&data->interrupt_handled, 0);
25394
25395 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25396 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25397 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25398 /* Only relevant if the interrupt hasn't occurred. */
25399 - if (!atomic_read(&data->interrupt_handled))
25400 + if (!atomic_read_unchecked(&data->interrupt_handled))
25401 schedule_work(&data->read_work);
25402 }
25403 ret = wait_event_timeout(data->wait_queue,
25404 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25405
25406 /* First disable the interrupt */
25407 disable_irq_nosync(irq);
25408 - atomic_inc(&data->interrupt_handled);
25409 + atomic_inc_unchecked(&data->interrupt_handled);
25410 /* Then schedule a reading work struct */
25411 if (data->state != SHT15_READING_NOTHING)
25412 schedule_work(&data->read_work);
25413 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25414 * If not, then start the interrupt again - care here as could
25415 * have gone low in meantime so verify it hasn't!
25416 */
25417 - atomic_set(&data->interrupt_handled, 0);
25418 + atomic_set_unchecked(&data->interrupt_handled, 0);
25419 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25420 /* If still not occurred or another handler has been scheduled */
25421 if (gpio_get_value(data->pdata->gpio_data)
25422 - || atomic_read(&data->interrupt_handled))
25423 + || atomic_read_unchecked(&data->interrupt_handled))
25424 return;
25425 }
25426
25427 diff -urNp linux-3.0.3/drivers/hwmon/w83791d.c linux-3.0.3/drivers/hwmon/w83791d.c
25428 --- linux-3.0.3/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25429 +++ linux-3.0.3/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25430 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25431 struct i2c_board_info *info);
25432 static int w83791d_remove(struct i2c_client *client);
25433
25434 -static int w83791d_read(struct i2c_client *client, u8 register);
25435 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25436 +static int w83791d_read(struct i2c_client *client, u8 reg);
25437 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25438 static struct w83791d_data *w83791d_update_device(struct device *dev);
25439
25440 #ifdef DEBUG
25441 diff -urNp linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c
25442 --- linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
25443 +++ linux-3.0.3/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
25444 @@ -43,7 +43,7 @@
25445 extern struct i2c_adapter amd756_smbus;
25446
25447 static struct i2c_adapter *s4882_adapter;
25448 -static struct i2c_algorithm *s4882_algo;
25449 +static i2c_algorithm_no_const *s4882_algo;
25450
25451 /* Wrapper access functions for multiplexed SMBus */
25452 static DEFINE_MUTEX(amd756_lock);
25453 diff -urNp linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c
25454 --- linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
25455 +++ linux-3.0.3/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
25456 @@ -41,7 +41,7 @@
25457 extern struct i2c_adapter *nforce2_smbus;
25458
25459 static struct i2c_adapter *s4985_adapter;
25460 -static struct i2c_algorithm *s4985_algo;
25461 +static i2c_algorithm_no_const *s4985_algo;
25462
25463 /* Wrapper access functions for multiplexed SMBus */
25464 static DEFINE_MUTEX(nforce2_lock);
25465 diff -urNp linux-3.0.3/drivers/i2c/i2c-mux.c linux-3.0.3/drivers/i2c/i2c-mux.c
25466 --- linux-3.0.3/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
25467 +++ linux-3.0.3/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
25468 @@ -28,7 +28,7 @@
25469 /* multiplexer per channel data */
25470 struct i2c_mux_priv {
25471 struct i2c_adapter adap;
25472 - struct i2c_algorithm algo;
25473 + i2c_algorithm_no_const algo;
25474
25475 struct i2c_adapter *parent;
25476 void *mux_dev; /* the mux chip/device */
25477 diff -urNp linux-3.0.3/drivers/ide/ide-cd.c linux-3.0.3/drivers/ide/ide-cd.c
25478 --- linux-3.0.3/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
25479 +++ linux-3.0.3/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
25480 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25481 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25482 if ((unsigned long)buf & alignment
25483 || blk_rq_bytes(rq) & q->dma_pad_mask
25484 - || object_is_on_stack(buf))
25485 + || object_starts_on_stack(buf))
25486 drive->dma = 0;
25487 }
25488 }
25489 diff -urNp linux-3.0.3/drivers/ide/ide-floppy.c linux-3.0.3/drivers/ide/ide-floppy.c
25490 --- linux-3.0.3/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
25491 +++ linux-3.0.3/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
25492 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25493 u8 pc_buf[256], header_len, desc_cnt;
25494 int i, rc = 1, blocks, length;
25495
25496 + pax_track_stack();
25497 +
25498 ide_debug_log(IDE_DBG_FUNC, "enter");
25499
25500 drive->bios_cyl = 0;
25501 diff -urNp linux-3.0.3/drivers/ide/setup-pci.c linux-3.0.3/drivers/ide/setup-pci.c
25502 --- linux-3.0.3/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
25503 +++ linux-3.0.3/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
25504 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25505 int ret, i, n_ports = dev2 ? 4 : 2;
25506 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25507
25508 + pax_track_stack();
25509 +
25510 for (i = 0; i < n_ports / 2; i++) {
25511 ret = ide_setup_pci_controller(pdev[i], d, !i);
25512 if (ret < 0)
25513 diff -urNp linux-3.0.3/drivers/infiniband/core/cm.c linux-3.0.3/drivers/infiniband/core/cm.c
25514 --- linux-3.0.3/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
25515 +++ linux-3.0.3/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
25516 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25517
25518 struct cm_counter_group {
25519 struct kobject obj;
25520 - atomic_long_t counter[CM_ATTR_COUNT];
25521 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25522 };
25523
25524 struct cm_counter_attribute {
25525 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25526 struct ib_mad_send_buf *msg = NULL;
25527 int ret;
25528
25529 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25530 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25531 counter[CM_REQ_COUNTER]);
25532
25533 /* Quick state check to discard duplicate REQs. */
25534 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25535 if (!cm_id_priv)
25536 return;
25537
25538 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25539 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25540 counter[CM_REP_COUNTER]);
25541 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25542 if (ret)
25543 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25544 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25545 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25546 spin_unlock_irq(&cm_id_priv->lock);
25547 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25548 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25549 counter[CM_RTU_COUNTER]);
25550 goto out;
25551 }
25552 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25553 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25554 dreq_msg->local_comm_id);
25555 if (!cm_id_priv) {
25556 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25557 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25558 counter[CM_DREQ_COUNTER]);
25559 cm_issue_drep(work->port, work->mad_recv_wc);
25560 return -EINVAL;
25561 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25562 case IB_CM_MRA_REP_RCVD:
25563 break;
25564 case IB_CM_TIMEWAIT:
25565 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25566 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25567 counter[CM_DREQ_COUNTER]);
25568 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25569 goto unlock;
25570 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25571 cm_free_msg(msg);
25572 goto deref;
25573 case IB_CM_DREQ_RCVD:
25574 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25575 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25576 counter[CM_DREQ_COUNTER]);
25577 goto unlock;
25578 default:
25579 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25580 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25581 cm_id_priv->msg, timeout)) {
25582 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25583 - atomic_long_inc(&work->port->
25584 + atomic_long_inc_unchecked(&work->port->
25585 counter_group[CM_RECV_DUPLICATES].
25586 counter[CM_MRA_COUNTER]);
25587 goto out;
25588 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25589 break;
25590 case IB_CM_MRA_REQ_RCVD:
25591 case IB_CM_MRA_REP_RCVD:
25592 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25593 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25594 counter[CM_MRA_COUNTER]);
25595 /* fall through */
25596 default:
25597 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25598 case IB_CM_LAP_IDLE:
25599 break;
25600 case IB_CM_MRA_LAP_SENT:
25601 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25602 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25603 counter[CM_LAP_COUNTER]);
25604 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25605 goto unlock;
25606 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25607 cm_free_msg(msg);
25608 goto deref;
25609 case IB_CM_LAP_RCVD:
25610 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25611 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25612 counter[CM_LAP_COUNTER]);
25613 goto unlock;
25614 default:
25615 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25616 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25617 if (cur_cm_id_priv) {
25618 spin_unlock_irq(&cm.lock);
25619 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25620 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25621 counter[CM_SIDR_REQ_COUNTER]);
25622 goto out; /* Duplicate message. */
25623 }
25624 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25625 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25626 msg->retries = 1;
25627
25628 - atomic_long_add(1 + msg->retries,
25629 + atomic_long_add_unchecked(1 + msg->retries,
25630 &port->counter_group[CM_XMIT].counter[attr_index]);
25631 if (msg->retries)
25632 - atomic_long_add(msg->retries,
25633 + atomic_long_add_unchecked(msg->retries,
25634 &port->counter_group[CM_XMIT_RETRIES].
25635 counter[attr_index]);
25636
25637 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25638 }
25639
25640 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25641 - atomic_long_inc(&port->counter_group[CM_RECV].
25642 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25643 counter[attr_id - CM_ATTR_ID_OFFSET]);
25644
25645 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25646 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25647 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25648
25649 return sprintf(buf, "%ld\n",
25650 - atomic_long_read(&group->counter[cm_attr->index]));
25651 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25652 }
25653
25654 static const struct sysfs_ops cm_counter_ops = {
25655 diff -urNp linux-3.0.3/drivers/infiniband/core/fmr_pool.c linux-3.0.3/drivers/infiniband/core/fmr_pool.c
25656 --- linux-3.0.3/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
25657 +++ linux-3.0.3/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
25658 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25659
25660 struct task_struct *thread;
25661
25662 - atomic_t req_ser;
25663 - atomic_t flush_ser;
25664 + atomic_unchecked_t req_ser;
25665 + atomic_unchecked_t flush_ser;
25666
25667 wait_queue_head_t force_wait;
25668 };
25669 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25670 struct ib_fmr_pool *pool = pool_ptr;
25671
25672 do {
25673 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25674 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25675 ib_fmr_batch_release(pool);
25676
25677 - atomic_inc(&pool->flush_ser);
25678 + atomic_inc_unchecked(&pool->flush_ser);
25679 wake_up_interruptible(&pool->force_wait);
25680
25681 if (pool->flush_function)
25682 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25683 }
25684
25685 set_current_state(TASK_INTERRUPTIBLE);
25686 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25687 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25688 !kthread_should_stop())
25689 schedule();
25690 __set_current_state(TASK_RUNNING);
25691 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25692 pool->dirty_watermark = params->dirty_watermark;
25693 pool->dirty_len = 0;
25694 spin_lock_init(&pool->pool_lock);
25695 - atomic_set(&pool->req_ser, 0);
25696 - atomic_set(&pool->flush_ser, 0);
25697 + atomic_set_unchecked(&pool->req_ser, 0);
25698 + atomic_set_unchecked(&pool->flush_ser, 0);
25699 init_waitqueue_head(&pool->force_wait);
25700
25701 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25702 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25703 }
25704 spin_unlock_irq(&pool->pool_lock);
25705
25706 - serial = atomic_inc_return(&pool->req_ser);
25707 + serial = atomic_inc_return_unchecked(&pool->req_ser);
25708 wake_up_process(pool->thread);
25709
25710 if (wait_event_interruptible(pool->force_wait,
25711 - atomic_read(&pool->flush_ser) - serial >= 0))
25712 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25713 return -EINTR;
25714
25715 return 0;
25716 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25717 } else {
25718 list_add_tail(&fmr->list, &pool->dirty_list);
25719 if (++pool->dirty_len >= pool->dirty_watermark) {
25720 - atomic_inc(&pool->req_ser);
25721 + atomic_inc_unchecked(&pool->req_ser);
25722 wake_up_process(pool->thread);
25723 }
25724 }
25725 diff -urNp linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c
25726 --- linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
25727 +++ linux-3.0.3/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
25728 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25729 int err;
25730 struct fw_ri_tpte tpt;
25731 u32 stag_idx;
25732 - static atomic_t key;
25733 + static atomic_unchecked_t key;
25734
25735 if (c4iw_fatal_error(rdev))
25736 return -EIO;
25737 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25738 &rdev->resource.tpt_fifo_lock);
25739 if (!stag_idx)
25740 return -ENOMEM;
25741 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25742 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25743 }
25744 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25745 __func__, stag_state, type, pdid, stag_idx);
25746 diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c
25747 --- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
25748 +++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
25749 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25750 struct infinipath_counters counters;
25751 struct ipath_devdata *dd;
25752
25753 + pax_track_stack();
25754 +
25755 dd = file->f_path.dentry->d_inode->i_private;
25756 dd->ipath_f_read_counters(dd, &counters);
25757
25758 diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c
25759 --- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
25760 +++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
25761 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25762 struct ib_atomic_eth *ateth;
25763 struct ipath_ack_entry *e;
25764 u64 vaddr;
25765 - atomic64_t *maddr;
25766 + atomic64_unchecked_t *maddr;
25767 u64 sdata;
25768 u32 rkey;
25769 u8 next;
25770 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25771 IB_ACCESS_REMOTE_ATOMIC)))
25772 goto nack_acc_unlck;
25773 /* Perform atomic OP and save result. */
25774 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25775 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25776 sdata = be64_to_cpu(ateth->swap_data);
25777 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25778 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25779 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25780 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25781 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25782 be64_to_cpu(ateth->compare_data),
25783 sdata);
25784 diff -urNp linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c
25785 --- linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
25786 +++ linux-3.0.3/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
25787 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25788 unsigned long flags;
25789 struct ib_wc wc;
25790 u64 sdata;
25791 - atomic64_t *maddr;
25792 + atomic64_unchecked_t *maddr;
25793 enum ib_wc_status send_status;
25794
25795 /*
25796 @@ -382,11 +382,11 @@ again:
25797 IB_ACCESS_REMOTE_ATOMIC)))
25798 goto acc_err;
25799 /* Perform atomic OP and save result. */
25800 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25801 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25802 sdata = wqe->wr.wr.atomic.compare_add;
25803 *(u64 *) sqp->s_sge.sge.vaddr =
25804 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25805 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25806 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25807 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25808 sdata, wqe->wr.wr.atomic.swap);
25809 goto send_comp;
25810 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes.c linux-3.0.3/drivers/infiniband/hw/nes/nes.c
25811 --- linux-3.0.3/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
25812 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
25813 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25814 LIST_HEAD(nes_adapter_list);
25815 static LIST_HEAD(nes_dev_list);
25816
25817 -atomic_t qps_destroyed;
25818 +atomic_unchecked_t qps_destroyed;
25819
25820 static unsigned int ee_flsh_adapter;
25821 static unsigned int sysfs_nonidx_addr;
25822 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25823 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25824 struct nes_adapter *nesadapter = nesdev->nesadapter;
25825
25826 - atomic_inc(&qps_destroyed);
25827 + atomic_inc_unchecked(&qps_destroyed);
25828
25829 /* Free the control structures */
25830
25831 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c
25832 --- linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
25833 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
25834 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25835 u32 cm_packets_retrans;
25836 u32 cm_packets_created;
25837 u32 cm_packets_received;
25838 -atomic_t cm_listens_created;
25839 -atomic_t cm_listens_destroyed;
25840 +atomic_unchecked_t cm_listens_created;
25841 +atomic_unchecked_t cm_listens_destroyed;
25842 u32 cm_backlog_drops;
25843 -atomic_t cm_loopbacks;
25844 -atomic_t cm_nodes_created;
25845 -atomic_t cm_nodes_destroyed;
25846 -atomic_t cm_accel_dropped_pkts;
25847 -atomic_t cm_resets_recvd;
25848 +atomic_unchecked_t cm_loopbacks;
25849 +atomic_unchecked_t cm_nodes_created;
25850 +atomic_unchecked_t cm_nodes_destroyed;
25851 +atomic_unchecked_t cm_accel_dropped_pkts;
25852 +atomic_unchecked_t cm_resets_recvd;
25853
25854 static inline int mini_cm_accelerated(struct nes_cm_core *,
25855 struct nes_cm_node *);
25856 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25857
25858 static struct nes_cm_core *g_cm_core;
25859
25860 -atomic_t cm_connects;
25861 -atomic_t cm_accepts;
25862 -atomic_t cm_disconnects;
25863 -atomic_t cm_closes;
25864 -atomic_t cm_connecteds;
25865 -atomic_t cm_connect_reqs;
25866 -atomic_t cm_rejects;
25867 +atomic_unchecked_t cm_connects;
25868 +atomic_unchecked_t cm_accepts;
25869 +atomic_unchecked_t cm_disconnects;
25870 +atomic_unchecked_t cm_closes;
25871 +atomic_unchecked_t cm_connecteds;
25872 +atomic_unchecked_t cm_connect_reqs;
25873 +atomic_unchecked_t cm_rejects;
25874
25875
25876 /**
25877 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25878 kfree(listener);
25879 listener = NULL;
25880 ret = 0;
25881 - atomic_inc(&cm_listens_destroyed);
25882 + atomic_inc_unchecked(&cm_listens_destroyed);
25883 } else {
25884 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25885 }
25886 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25887 cm_node->rem_mac);
25888
25889 add_hte_node(cm_core, cm_node);
25890 - atomic_inc(&cm_nodes_created);
25891 + atomic_inc_unchecked(&cm_nodes_created);
25892
25893 return cm_node;
25894 }
25895 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25896 }
25897
25898 atomic_dec(&cm_core->node_cnt);
25899 - atomic_inc(&cm_nodes_destroyed);
25900 + atomic_inc_unchecked(&cm_nodes_destroyed);
25901 nesqp = cm_node->nesqp;
25902 if (nesqp) {
25903 nesqp->cm_node = NULL;
25904 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25905
25906 static void drop_packet(struct sk_buff *skb)
25907 {
25908 - atomic_inc(&cm_accel_dropped_pkts);
25909 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25910 dev_kfree_skb_any(skb);
25911 }
25912
25913 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25914 {
25915
25916 int reset = 0; /* whether to send reset in case of err.. */
25917 - atomic_inc(&cm_resets_recvd);
25918 + atomic_inc_unchecked(&cm_resets_recvd);
25919 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25920 " refcnt=%d\n", cm_node, cm_node->state,
25921 atomic_read(&cm_node->ref_count));
25922 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25923 rem_ref_cm_node(cm_node->cm_core, cm_node);
25924 return NULL;
25925 }
25926 - atomic_inc(&cm_loopbacks);
25927 + atomic_inc_unchecked(&cm_loopbacks);
25928 loopbackremotenode->loopbackpartner = cm_node;
25929 loopbackremotenode->tcp_cntxt.rcv_wscale =
25930 NES_CM_DEFAULT_RCV_WND_SCALE;
25931 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
25932 add_ref_cm_node(cm_node);
25933 } else if (cm_node->state == NES_CM_STATE_TSA) {
25934 rem_ref_cm_node(cm_core, cm_node);
25935 - atomic_inc(&cm_accel_dropped_pkts);
25936 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25937 dev_kfree_skb_any(skb);
25938 break;
25939 }
25940 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
25941
25942 if ((cm_id) && (cm_id->event_handler)) {
25943 if (issue_disconn) {
25944 - atomic_inc(&cm_disconnects);
25945 + atomic_inc_unchecked(&cm_disconnects);
25946 cm_event.event = IW_CM_EVENT_DISCONNECT;
25947 cm_event.status = disconn_status;
25948 cm_event.local_addr = cm_id->local_addr;
25949 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
25950 }
25951
25952 if (issue_close) {
25953 - atomic_inc(&cm_closes);
25954 + atomic_inc_unchecked(&cm_closes);
25955 nes_disconnect(nesqp, 1);
25956
25957 cm_id->provider_data = nesqp;
25958 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
25959
25960 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
25961 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
25962 - atomic_inc(&cm_accepts);
25963 + atomic_inc_unchecked(&cm_accepts);
25964
25965 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
25966 netdev_refcnt_read(nesvnic->netdev));
25967 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
25968
25969 struct nes_cm_core *cm_core;
25970
25971 - atomic_inc(&cm_rejects);
25972 + atomic_inc_unchecked(&cm_rejects);
25973 cm_node = (struct nes_cm_node *) cm_id->provider_data;
25974 loopback = cm_node->loopbackpartner;
25975 cm_core = cm_node->cm_core;
25976 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
25977 ntohl(cm_id->local_addr.sin_addr.s_addr),
25978 ntohs(cm_id->local_addr.sin_port));
25979
25980 - atomic_inc(&cm_connects);
25981 + atomic_inc_unchecked(&cm_connects);
25982 nesqp->active_conn = 1;
25983
25984 /* cache the cm_id in the qp */
25985 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
25986 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
25987 return err;
25988 }
25989 - atomic_inc(&cm_listens_created);
25990 + atomic_inc_unchecked(&cm_listens_created);
25991 }
25992
25993 cm_id->add_ref(cm_id);
25994 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
25995 if (nesqp->destroyed) {
25996 return;
25997 }
25998 - atomic_inc(&cm_connecteds);
25999 + atomic_inc_unchecked(&cm_connecteds);
26000 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26001 " local port 0x%04X. jiffies = %lu.\n",
26002 nesqp->hwqp.qp_id,
26003 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26004
26005 cm_id->add_ref(cm_id);
26006 ret = cm_id->event_handler(cm_id, &cm_event);
26007 - atomic_inc(&cm_closes);
26008 + atomic_inc_unchecked(&cm_closes);
26009 cm_event.event = IW_CM_EVENT_CLOSE;
26010 cm_event.status = 0;
26011 cm_event.provider_data = cm_id->provider_data;
26012 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26013 return;
26014 cm_id = cm_node->cm_id;
26015
26016 - atomic_inc(&cm_connect_reqs);
26017 + atomic_inc_unchecked(&cm_connect_reqs);
26018 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26019 cm_node, cm_id, jiffies);
26020
26021 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26022 return;
26023 cm_id = cm_node->cm_id;
26024
26025 - atomic_inc(&cm_connect_reqs);
26026 + atomic_inc_unchecked(&cm_connect_reqs);
26027 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26028 cm_node, cm_id, jiffies);
26029
26030 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes.h linux-3.0.3/drivers/infiniband/hw/nes/nes.h
26031 --- linux-3.0.3/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26032 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26033 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26034 extern unsigned int wqm_quanta;
26035 extern struct list_head nes_adapter_list;
26036
26037 -extern atomic_t cm_connects;
26038 -extern atomic_t cm_accepts;
26039 -extern atomic_t cm_disconnects;
26040 -extern atomic_t cm_closes;
26041 -extern atomic_t cm_connecteds;
26042 -extern atomic_t cm_connect_reqs;
26043 -extern atomic_t cm_rejects;
26044 -extern atomic_t mod_qp_timouts;
26045 -extern atomic_t qps_created;
26046 -extern atomic_t qps_destroyed;
26047 -extern atomic_t sw_qps_destroyed;
26048 +extern atomic_unchecked_t cm_connects;
26049 +extern atomic_unchecked_t cm_accepts;
26050 +extern atomic_unchecked_t cm_disconnects;
26051 +extern atomic_unchecked_t cm_closes;
26052 +extern atomic_unchecked_t cm_connecteds;
26053 +extern atomic_unchecked_t cm_connect_reqs;
26054 +extern atomic_unchecked_t cm_rejects;
26055 +extern atomic_unchecked_t mod_qp_timouts;
26056 +extern atomic_unchecked_t qps_created;
26057 +extern atomic_unchecked_t qps_destroyed;
26058 +extern atomic_unchecked_t sw_qps_destroyed;
26059 extern u32 mh_detected;
26060 extern u32 mh_pauses_sent;
26061 extern u32 cm_packets_sent;
26062 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26063 extern u32 cm_packets_received;
26064 extern u32 cm_packets_dropped;
26065 extern u32 cm_packets_retrans;
26066 -extern atomic_t cm_listens_created;
26067 -extern atomic_t cm_listens_destroyed;
26068 +extern atomic_unchecked_t cm_listens_created;
26069 +extern atomic_unchecked_t cm_listens_destroyed;
26070 extern u32 cm_backlog_drops;
26071 -extern atomic_t cm_loopbacks;
26072 -extern atomic_t cm_nodes_created;
26073 -extern atomic_t cm_nodes_destroyed;
26074 -extern atomic_t cm_accel_dropped_pkts;
26075 -extern atomic_t cm_resets_recvd;
26076 +extern atomic_unchecked_t cm_loopbacks;
26077 +extern atomic_unchecked_t cm_nodes_created;
26078 +extern atomic_unchecked_t cm_nodes_destroyed;
26079 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26080 +extern atomic_unchecked_t cm_resets_recvd;
26081
26082 extern u32 int_mod_timer_init;
26083 extern u32 int_mod_cq_depth_256;
26084 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c
26085 --- linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26086 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26087 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26088 target_stat_values[++index] = mh_detected;
26089 target_stat_values[++index] = mh_pauses_sent;
26090 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26091 - target_stat_values[++index] = atomic_read(&cm_connects);
26092 - target_stat_values[++index] = atomic_read(&cm_accepts);
26093 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26094 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26095 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26096 - target_stat_values[++index] = atomic_read(&cm_rejects);
26097 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26098 - target_stat_values[++index] = atomic_read(&qps_created);
26099 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26100 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26101 - target_stat_values[++index] = atomic_read(&cm_closes);
26102 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26103 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26104 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26105 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26106 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26107 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26108 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26109 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26110 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26111 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26112 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26113 target_stat_values[++index] = cm_packets_sent;
26114 target_stat_values[++index] = cm_packets_bounced;
26115 target_stat_values[++index] = cm_packets_created;
26116 target_stat_values[++index] = cm_packets_received;
26117 target_stat_values[++index] = cm_packets_dropped;
26118 target_stat_values[++index] = cm_packets_retrans;
26119 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26120 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26121 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26122 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26123 target_stat_values[++index] = cm_backlog_drops;
26124 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26125 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26126 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26127 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26128 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26129 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26130 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26131 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26132 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26133 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26134 target_stat_values[++index] = nesadapter->free_4kpbl;
26135 target_stat_values[++index] = nesadapter->free_256pbl;
26136 target_stat_values[++index] = int_mod_timer_init;
26137 diff -urNp linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c
26138 --- linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26139 +++ linux-3.0.3/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26140 @@ -46,9 +46,9 @@
26141
26142 #include <rdma/ib_umem.h>
26143
26144 -atomic_t mod_qp_timouts;
26145 -atomic_t qps_created;
26146 -atomic_t sw_qps_destroyed;
26147 +atomic_unchecked_t mod_qp_timouts;
26148 +atomic_unchecked_t qps_created;
26149 +atomic_unchecked_t sw_qps_destroyed;
26150
26151 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26152
26153 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26154 if (init_attr->create_flags)
26155 return ERR_PTR(-EINVAL);
26156
26157 - atomic_inc(&qps_created);
26158 + atomic_inc_unchecked(&qps_created);
26159 switch (init_attr->qp_type) {
26160 case IB_QPT_RC:
26161 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26162 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26163 struct iw_cm_event cm_event;
26164 int ret;
26165
26166 - atomic_inc(&sw_qps_destroyed);
26167 + atomic_inc_unchecked(&sw_qps_destroyed);
26168 nesqp->destroyed = 1;
26169
26170 /* Blow away the connection if it exists. */
26171 diff -urNp linux-3.0.3/drivers/infiniband/hw/qib/qib.h linux-3.0.3/drivers/infiniband/hw/qib/qib.h
26172 --- linux-3.0.3/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26173 +++ linux-3.0.3/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26174 @@ -51,6 +51,7 @@
26175 #include <linux/completion.h>
26176 #include <linux/kref.h>
26177 #include <linux/sched.h>
26178 +#include <linux/slab.h>
26179
26180 #include "qib_common.h"
26181 #include "qib_verbs.h"
26182 diff -urNp linux-3.0.3/drivers/input/gameport/gameport.c linux-3.0.3/drivers/input/gameport/gameport.c
26183 --- linux-3.0.3/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26184 +++ linux-3.0.3/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26185 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26186 */
26187 static void gameport_init_port(struct gameport *gameport)
26188 {
26189 - static atomic_t gameport_no = ATOMIC_INIT(0);
26190 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26191
26192 __module_get(THIS_MODULE);
26193
26194 mutex_init(&gameport->drv_mutex);
26195 device_initialize(&gameport->dev);
26196 dev_set_name(&gameport->dev, "gameport%lu",
26197 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26198 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26199 gameport->dev.bus = &gameport_bus;
26200 gameport->dev.release = gameport_release_port;
26201 if (gameport->parent)
26202 diff -urNp linux-3.0.3/drivers/input/input.c linux-3.0.3/drivers/input/input.c
26203 --- linux-3.0.3/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26204 +++ linux-3.0.3/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26205 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26206 */
26207 int input_register_device(struct input_dev *dev)
26208 {
26209 - static atomic_t input_no = ATOMIC_INIT(0);
26210 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26211 struct input_handler *handler;
26212 const char *path;
26213 int error;
26214 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26215 dev->setkeycode = input_default_setkeycode;
26216
26217 dev_set_name(&dev->dev, "input%ld",
26218 - (unsigned long) atomic_inc_return(&input_no) - 1);
26219 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26220
26221 error = device_add(&dev->dev);
26222 if (error)
26223 diff -urNp linux-3.0.3/drivers/input/joystick/sidewinder.c linux-3.0.3/drivers/input/joystick/sidewinder.c
26224 --- linux-3.0.3/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26225 +++ linux-3.0.3/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26226 @@ -30,6 +30,7 @@
26227 #include <linux/kernel.h>
26228 #include <linux/module.h>
26229 #include <linux/slab.h>
26230 +#include <linux/sched.h>
26231 #include <linux/init.h>
26232 #include <linux/input.h>
26233 #include <linux/gameport.h>
26234 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26235 unsigned char buf[SW_LENGTH];
26236 int i;
26237
26238 + pax_track_stack();
26239 +
26240 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26241
26242 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26243 diff -urNp linux-3.0.3/drivers/input/joystick/xpad.c linux-3.0.3/drivers/input/joystick/xpad.c
26244 --- linux-3.0.3/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26245 +++ linux-3.0.3/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26246 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26247
26248 static int xpad_led_probe(struct usb_xpad *xpad)
26249 {
26250 - static atomic_t led_seq = ATOMIC_INIT(0);
26251 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26252 long led_no;
26253 struct xpad_led *led;
26254 struct led_classdev *led_cdev;
26255 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26256 if (!led)
26257 return -ENOMEM;
26258
26259 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26260 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26261
26262 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26263 led->xpad = xpad;
26264 diff -urNp linux-3.0.3/drivers/input/mousedev.c linux-3.0.3/drivers/input/mousedev.c
26265 --- linux-3.0.3/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26266 +++ linux-3.0.3/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26267 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26268
26269 spin_unlock_irq(&client->packet_lock);
26270
26271 - if (copy_to_user(buffer, data, count))
26272 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26273 return -EFAULT;
26274
26275 return count;
26276 diff -urNp linux-3.0.3/drivers/input/serio/serio.c linux-3.0.3/drivers/input/serio/serio.c
26277 --- linux-3.0.3/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26278 +++ linux-3.0.3/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26279 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26280 */
26281 static void serio_init_port(struct serio *serio)
26282 {
26283 - static atomic_t serio_no = ATOMIC_INIT(0);
26284 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26285
26286 __module_get(THIS_MODULE);
26287
26288 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26289 mutex_init(&serio->drv_mutex);
26290 device_initialize(&serio->dev);
26291 dev_set_name(&serio->dev, "serio%ld",
26292 - (long)atomic_inc_return(&serio_no) - 1);
26293 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26294 serio->dev.bus = &serio_bus;
26295 serio->dev.release = serio_release_port;
26296 serio->dev.groups = serio_device_attr_groups;
26297 diff -urNp linux-3.0.3/drivers/isdn/capi/capi.c linux-3.0.3/drivers/isdn/capi/capi.c
26298 --- linux-3.0.3/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26299 +++ linux-3.0.3/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26300 @@ -83,8 +83,8 @@ struct capiminor {
26301
26302 struct capi20_appl *ap;
26303 u32 ncci;
26304 - atomic_t datahandle;
26305 - atomic_t msgid;
26306 + atomic_unchecked_t datahandle;
26307 + atomic_unchecked_t msgid;
26308
26309 struct tty_port port;
26310 int ttyinstop;
26311 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26312 capimsg_setu16(s, 2, mp->ap->applid);
26313 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26314 capimsg_setu8 (s, 5, CAPI_RESP);
26315 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26316 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26317 capimsg_setu32(s, 8, mp->ncci);
26318 capimsg_setu16(s, 12, datahandle);
26319 }
26320 @@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26321 mp->outbytes -= len;
26322 spin_unlock_bh(&mp->outlock);
26323
26324 - datahandle = atomic_inc_return(&mp->datahandle);
26325 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26326 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26327 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26328 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26329 capimsg_setu16(skb->data, 2, mp->ap->applid);
26330 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26331 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26332 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26333 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26334 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26335 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26336 capimsg_setu16(skb->data, 16, len); /* Data length */
26337 diff -urNp linux-3.0.3/drivers/isdn/gigaset/common.c linux-3.0.3/drivers/isdn/gigaset/common.c
26338 --- linux-3.0.3/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26339 +++ linux-3.0.3/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26340 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26341 cs->commands_pending = 0;
26342 cs->cur_at_seq = 0;
26343 cs->gotfwver = -1;
26344 - cs->open_count = 0;
26345 + local_set(&cs->open_count, 0);
26346 cs->dev = NULL;
26347 cs->tty = NULL;
26348 cs->tty_dev = NULL;
26349 diff -urNp linux-3.0.3/drivers/isdn/gigaset/gigaset.h linux-3.0.3/drivers/isdn/gigaset/gigaset.h
26350 --- linux-3.0.3/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26351 +++ linux-3.0.3/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26352 @@ -35,6 +35,7 @@
26353 #include <linux/tty_driver.h>
26354 #include <linux/list.h>
26355 #include <asm/atomic.h>
26356 +#include <asm/local.h>
26357
26358 #define GIG_VERSION {0, 5, 0, 0}
26359 #define GIG_COMPAT {0, 4, 0, 0}
26360 @@ -433,7 +434,7 @@ struct cardstate {
26361 spinlock_t cmdlock;
26362 unsigned curlen, cmdbytes;
26363
26364 - unsigned open_count;
26365 + local_t open_count;
26366 struct tty_struct *tty;
26367 struct tasklet_struct if_wake_tasklet;
26368 unsigned control_state;
26369 diff -urNp linux-3.0.3/drivers/isdn/gigaset/interface.c linux-3.0.3/drivers/isdn/gigaset/interface.c
26370 --- linux-3.0.3/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26371 +++ linux-3.0.3/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26372 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26373 }
26374 tty->driver_data = cs;
26375
26376 - ++cs->open_count;
26377 -
26378 - if (cs->open_count == 1) {
26379 + if (local_inc_return(&cs->open_count) == 1) {
26380 spin_lock_irqsave(&cs->lock, flags);
26381 cs->tty = tty;
26382 spin_unlock_irqrestore(&cs->lock, flags);
26383 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26384
26385 if (!cs->connected)
26386 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26387 - else if (!cs->open_count)
26388 + else if (!local_read(&cs->open_count))
26389 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26390 else {
26391 - if (!--cs->open_count) {
26392 + if (!local_dec_return(&cs->open_count)) {
26393 spin_lock_irqsave(&cs->lock, flags);
26394 cs->tty = NULL;
26395 spin_unlock_irqrestore(&cs->lock, flags);
26396 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26397 if (!cs->connected) {
26398 gig_dbg(DEBUG_IF, "not connected");
26399 retval = -ENODEV;
26400 - } else if (!cs->open_count)
26401 + } else if (!local_read(&cs->open_count))
26402 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26403 else {
26404 retval = 0;
26405 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26406 retval = -ENODEV;
26407 goto done;
26408 }
26409 - if (!cs->open_count) {
26410 + if (!local_read(&cs->open_count)) {
26411 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26412 retval = -ENODEV;
26413 goto done;
26414 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26415 if (!cs->connected) {
26416 gig_dbg(DEBUG_IF, "not connected");
26417 retval = -ENODEV;
26418 - } else if (!cs->open_count)
26419 + } else if (!local_read(&cs->open_count))
26420 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26421 else if (cs->mstate != MS_LOCKED) {
26422 dev_warn(cs->dev, "can't write to unlocked device\n");
26423 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26424
26425 if (!cs->connected)
26426 gig_dbg(DEBUG_IF, "not connected");
26427 - else if (!cs->open_count)
26428 + else if (!local_read(&cs->open_count))
26429 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26430 else if (cs->mstate != MS_LOCKED)
26431 dev_warn(cs->dev, "can't write to unlocked device\n");
26432 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26433
26434 if (!cs->connected)
26435 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26436 - else if (!cs->open_count)
26437 + else if (!local_read(&cs->open_count))
26438 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26439 else
26440 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26441 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
26442
26443 if (!cs->connected)
26444 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26445 - else if (!cs->open_count)
26446 + else if (!local_read(&cs->open_count))
26447 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26448 else
26449 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26450 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
26451 goto out;
26452 }
26453
26454 - if (!cs->open_count) {
26455 + if (!local_read(&cs->open_count)) {
26456 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26457 goto out;
26458 }
26459 diff -urNp linux-3.0.3/drivers/isdn/hardware/avm/b1.c linux-3.0.3/drivers/isdn/hardware/avm/b1.c
26460 --- linux-3.0.3/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
26461 +++ linux-3.0.3/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
26462 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26463 }
26464 if (left) {
26465 if (t4file->user) {
26466 - if (copy_from_user(buf, dp, left))
26467 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26468 return -EFAULT;
26469 } else {
26470 memcpy(buf, dp, left);
26471 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26472 }
26473 if (left) {
26474 if (config->user) {
26475 - if (copy_from_user(buf, dp, left))
26476 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26477 return -EFAULT;
26478 } else {
26479 memcpy(buf, dp, left);
26480 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c
26481 --- linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
26482 +++ linux-3.0.3/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
26483 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26484 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26485 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26486
26487 + pax_track_stack();
26488
26489 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26490 {
26491 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c
26492 --- linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
26493 +++ linux-3.0.3/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
26494 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26495 IDI_SYNC_REQ req;
26496 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26497
26498 + pax_track_stack();
26499 +
26500 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26501
26502 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26503 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c
26504 --- linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
26505 +++ linux-3.0.3/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
26506 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26507 IDI_SYNC_REQ req;
26508 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26509
26510 + pax_track_stack();
26511 +
26512 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26513
26514 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26515 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c
26516 --- linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
26517 +++ linux-3.0.3/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
26518 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
26519 IDI_SYNC_REQ req;
26520 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26521
26522 + pax_track_stack();
26523 +
26524 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26525
26526 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26527 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h
26528 --- linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
26529 +++ linux-3.0.3/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
26530 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26531 } diva_didd_add_adapter_t;
26532 typedef struct _diva_didd_remove_adapter {
26533 IDI_CALL p_request;
26534 -} diva_didd_remove_adapter_t;
26535 +} __no_const diva_didd_remove_adapter_t;
26536 typedef struct _diva_didd_read_adapter_array {
26537 void * buffer;
26538 dword length;
26539 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c
26540 --- linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
26541 +++ linux-3.0.3/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
26542 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26543 IDI_SYNC_REQ req;
26544 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26545
26546 + pax_track_stack();
26547 +
26548 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26549
26550 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26551 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/message.c linux-3.0.3/drivers/isdn/hardware/eicon/message.c
26552 --- linux-3.0.3/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
26553 +++ linux-3.0.3/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
26554 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
26555 dword d;
26556 word w;
26557
26558 + pax_track_stack();
26559 +
26560 a = plci->adapter;
26561 Id = ((word)plci->Id<<8)|a->Id;
26562 PUT_WORD(&SS_Ind[4],0x0000);
26563 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
26564 word j, n, w;
26565 dword d;
26566
26567 + pax_track_stack();
26568 +
26569
26570 for(i=0;i<8;i++) bp_parms[i].length = 0;
26571 for(i=0;i<2;i++) global_config[i].length = 0;
26572 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
26573 const byte llc3[] = {4,3,2,2,6,6,0};
26574 const byte header[] = {0,2,3,3,0,0,0};
26575
26576 + pax_track_stack();
26577 +
26578 for(i=0;i<8;i++) bp_parms[i].length = 0;
26579 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26580 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26581 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
26582 word appl_number_group_type[MAX_APPL];
26583 PLCI *auxplci;
26584
26585 + pax_track_stack();
26586 +
26587 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26588
26589 if(!a->group_optimization_enabled)
26590 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c
26591 --- linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
26592 +++ linux-3.0.3/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
26593 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26594 IDI_SYNC_REQ req;
26595 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26596
26597 + pax_track_stack();
26598 +
26599 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26600
26601 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26602 diff -urNp linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h
26603 --- linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
26604 +++ linux-3.0.3/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
26605 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26606 typedef struct _diva_os_idi_adapter_interface {
26607 diva_init_card_proc_t cleanup_adapter_proc;
26608 diva_cmd_card_proc_t cmd_proc;
26609 -} diva_os_idi_adapter_interface_t;
26610 +} __no_const diva_os_idi_adapter_interface_t;
26611
26612 typedef struct _diva_os_xdi_adapter {
26613 struct list_head link;
26614 diff -urNp linux-3.0.3/drivers/isdn/i4l/isdn_common.c linux-3.0.3/drivers/isdn/i4l/isdn_common.c
26615 --- linux-3.0.3/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
26616 +++ linux-3.0.3/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
26617 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
26618 } iocpar;
26619 void __user *argp = (void __user *)arg;
26620
26621 + pax_track_stack();
26622 +
26623 #define name iocpar.name
26624 #define bname iocpar.bname
26625 #define iocts iocpar.iocts
26626 diff -urNp linux-3.0.3/drivers/isdn/icn/icn.c linux-3.0.3/drivers/isdn/icn/icn.c
26627 --- linux-3.0.3/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
26628 +++ linux-3.0.3/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
26629 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26630 if (count > len)
26631 count = len;
26632 if (user) {
26633 - if (copy_from_user(msg, buf, count))
26634 + if (count > sizeof msg || copy_from_user(msg, buf, count))
26635 return -EFAULT;
26636 } else
26637 memcpy(msg, buf, count);
26638 diff -urNp linux-3.0.3/drivers/lguest/core.c linux-3.0.3/drivers/lguest/core.c
26639 --- linux-3.0.3/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
26640 +++ linux-3.0.3/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
26641 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26642 * it's worked so far. The end address needs +1 because __get_vm_area
26643 * allocates an extra guard page, so we need space for that.
26644 */
26645 +
26646 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26647 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26648 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26649 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26650 +#else
26651 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26652 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26653 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26654 +#endif
26655 +
26656 if (!switcher_vma) {
26657 err = -ENOMEM;
26658 printk("lguest: could not map switcher pages high\n");
26659 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26660 * Now the Switcher is mapped at the right address, we can't fail!
26661 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26662 */
26663 - memcpy(switcher_vma->addr, start_switcher_text,
26664 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26665 end_switcher_text - start_switcher_text);
26666
26667 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26668 diff -urNp linux-3.0.3/drivers/lguest/x86/core.c linux-3.0.3/drivers/lguest/x86/core.c
26669 --- linux-3.0.3/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
26670 +++ linux-3.0.3/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
26671 @@ -59,7 +59,7 @@ static struct {
26672 /* Offset from where switcher.S was compiled to where we've copied it */
26673 static unsigned long switcher_offset(void)
26674 {
26675 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26676 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26677 }
26678
26679 /* This cpu's struct lguest_pages. */
26680 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26681 * These copies are pretty cheap, so we do them unconditionally: */
26682 /* Save the current Host top-level page directory.
26683 */
26684 +
26685 +#ifdef CONFIG_PAX_PER_CPU_PGD
26686 + pages->state.host_cr3 = read_cr3();
26687 +#else
26688 pages->state.host_cr3 = __pa(current->mm->pgd);
26689 +#endif
26690 +
26691 /*
26692 * Set up the Guest's page tables to see this CPU's pages (and no
26693 * other CPU's pages).
26694 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26695 * compiled-in switcher code and the high-mapped copy we just made.
26696 */
26697 for (i = 0; i < IDT_ENTRIES; i++)
26698 - default_idt_entries[i] += switcher_offset();
26699 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26700
26701 /*
26702 * Set up the Switcher's per-cpu areas.
26703 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26704 * it will be undisturbed when we switch. To change %cs and jump we
26705 * need this structure to feed to Intel's "lcall" instruction.
26706 */
26707 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26708 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26709 lguest_entry.segment = LGUEST_CS;
26710
26711 /*
26712 diff -urNp linux-3.0.3/drivers/lguest/x86/switcher_32.S linux-3.0.3/drivers/lguest/x86/switcher_32.S
26713 --- linux-3.0.3/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
26714 +++ linux-3.0.3/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
26715 @@ -87,6 +87,7 @@
26716 #include <asm/page.h>
26717 #include <asm/segment.h>
26718 #include <asm/lguest.h>
26719 +#include <asm/processor-flags.h>
26720
26721 // We mark the start of the code to copy
26722 // It's placed in .text tho it's never run here
26723 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26724 // Changes type when we load it: damn Intel!
26725 // For after we switch over our page tables
26726 // That entry will be read-only: we'd crash.
26727 +
26728 +#ifdef CONFIG_PAX_KERNEXEC
26729 + mov %cr0, %edx
26730 + xor $X86_CR0_WP, %edx
26731 + mov %edx, %cr0
26732 +#endif
26733 +
26734 movl $(GDT_ENTRY_TSS*8), %edx
26735 ltr %dx
26736
26737 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26738 // Let's clear it again for our return.
26739 // The GDT descriptor of the Host
26740 // Points to the table after two "size" bytes
26741 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26742 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26743 // Clear "used" from type field (byte 5, bit 2)
26744 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26745 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26746 +
26747 +#ifdef CONFIG_PAX_KERNEXEC
26748 + mov %cr0, %eax
26749 + xor $X86_CR0_WP, %eax
26750 + mov %eax, %cr0
26751 +#endif
26752
26753 // Once our page table's switched, the Guest is live!
26754 // The Host fades as we run this final step.
26755 @@ -295,13 +309,12 @@ deliver_to_host:
26756 // I consulted gcc, and it gave
26757 // These instructions, which I gladly credit:
26758 leal (%edx,%ebx,8), %eax
26759 - movzwl (%eax),%edx
26760 - movl 4(%eax), %eax
26761 - xorw %ax, %ax
26762 - orl %eax, %edx
26763 + movl 4(%eax), %edx
26764 + movw (%eax), %dx
26765 // Now the address of the handler's in %edx
26766 // We call it now: its "iret" drops us home.
26767 - jmp *%edx
26768 + ljmp $__KERNEL_CS, $1f
26769 +1: jmp *%edx
26770
26771 // Every interrupt can come to us here
26772 // But we must truly tell each apart.
26773 diff -urNp linux-3.0.3/drivers/md/dm.c linux-3.0.3/drivers/md/dm.c
26774 --- linux-3.0.3/drivers/md/dm.c 2011-08-23 21:44:40.000000000 -0400
26775 +++ linux-3.0.3/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
26776 @@ -164,9 +164,9 @@ struct mapped_device {
26777 /*
26778 * Event handling.
26779 */
26780 - atomic_t event_nr;
26781 + atomic_unchecked_t event_nr;
26782 wait_queue_head_t eventq;
26783 - atomic_t uevent_seq;
26784 + atomic_unchecked_t uevent_seq;
26785 struct list_head uevent_list;
26786 spinlock_t uevent_lock; /* Protect access to uevent_list */
26787
26788 @@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
26789 rwlock_init(&md->map_lock);
26790 atomic_set(&md->holders, 1);
26791 atomic_set(&md->open_count, 0);
26792 - atomic_set(&md->event_nr, 0);
26793 - atomic_set(&md->uevent_seq, 0);
26794 + atomic_set_unchecked(&md->event_nr, 0);
26795 + atomic_set_unchecked(&md->uevent_seq, 0);
26796 INIT_LIST_HEAD(&md->uevent_list);
26797 spin_lock_init(&md->uevent_lock);
26798
26799 @@ -1977,7 +1977,7 @@ static void event_callback(void *context
26800
26801 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26802
26803 - atomic_inc(&md->event_nr);
26804 + atomic_inc_unchecked(&md->event_nr);
26805 wake_up(&md->eventq);
26806 }
26807
26808 @@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
26809
26810 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26811 {
26812 - return atomic_add_return(1, &md->uevent_seq);
26813 + return atomic_add_return_unchecked(1, &md->uevent_seq);
26814 }
26815
26816 uint32_t dm_get_event_nr(struct mapped_device *md)
26817 {
26818 - return atomic_read(&md->event_nr);
26819 + return atomic_read_unchecked(&md->event_nr);
26820 }
26821
26822 int dm_wait_event(struct mapped_device *md, int event_nr)
26823 {
26824 return wait_event_interruptible(md->eventq,
26825 - (event_nr != atomic_read(&md->event_nr)));
26826 + (event_nr != atomic_read_unchecked(&md->event_nr)));
26827 }
26828
26829 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26830 diff -urNp linux-3.0.3/drivers/md/dm-ioctl.c linux-3.0.3/drivers/md/dm-ioctl.c
26831 --- linux-3.0.3/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
26832 +++ linux-3.0.3/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
26833 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26834 cmd == DM_LIST_VERSIONS_CMD)
26835 return 0;
26836
26837 - if ((cmd == DM_DEV_CREATE_CMD)) {
26838 + if (cmd == DM_DEV_CREATE_CMD) {
26839 if (!*param->name) {
26840 DMWARN("name not supplied when creating device");
26841 return -EINVAL;
26842 diff -urNp linux-3.0.3/drivers/md/dm-raid1.c linux-3.0.3/drivers/md/dm-raid1.c
26843 --- linux-3.0.3/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
26844 +++ linux-3.0.3/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
26845 @@ -40,7 +40,7 @@ enum dm_raid1_error {
26846
26847 struct mirror {
26848 struct mirror_set *ms;
26849 - atomic_t error_count;
26850 + atomic_unchecked_t error_count;
26851 unsigned long error_type;
26852 struct dm_dev *dev;
26853 sector_t offset;
26854 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
26855 struct mirror *m;
26856
26857 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26858 - if (!atomic_read(&m->error_count))
26859 + if (!atomic_read_unchecked(&m->error_count))
26860 return m;
26861
26862 return NULL;
26863 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
26864 * simple way to tell if a device has encountered
26865 * errors.
26866 */
26867 - atomic_inc(&m->error_count);
26868 + atomic_inc_unchecked(&m->error_count);
26869
26870 if (test_and_set_bit(error_type, &m->error_type))
26871 return;
26872 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
26873 struct mirror *m = get_default_mirror(ms);
26874
26875 do {
26876 - if (likely(!atomic_read(&m->error_count)))
26877 + if (likely(!atomic_read_unchecked(&m->error_count)))
26878 return m;
26879
26880 if (m-- == ms->mirror)
26881 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
26882 {
26883 struct mirror *default_mirror = get_default_mirror(m->ms);
26884
26885 - return !atomic_read(&default_mirror->error_count);
26886 + return !atomic_read_unchecked(&default_mirror->error_count);
26887 }
26888
26889 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26890 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
26891 */
26892 if (likely(region_in_sync(ms, region, 1)))
26893 m = choose_mirror(ms, bio->bi_sector);
26894 - else if (m && atomic_read(&m->error_count))
26895 + else if (m && atomic_read_unchecked(&m->error_count))
26896 m = NULL;
26897
26898 if (likely(m))
26899 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
26900 }
26901
26902 ms->mirror[mirror].ms = ms;
26903 - atomic_set(&(ms->mirror[mirror].error_count), 0);
26904 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26905 ms->mirror[mirror].error_type = 0;
26906 ms->mirror[mirror].offset = offset;
26907
26908 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26909 */
26910 static char device_status_char(struct mirror *m)
26911 {
26912 - if (!atomic_read(&(m->error_count)))
26913 + if (!atomic_read_unchecked(&(m->error_count)))
26914 return 'A';
26915
26916 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26917 diff -urNp linux-3.0.3/drivers/md/dm-stripe.c linux-3.0.3/drivers/md/dm-stripe.c
26918 --- linux-3.0.3/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
26919 +++ linux-3.0.3/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
26920 @@ -20,7 +20,7 @@ struct stripe {
26921 struct dm_dev *dev;
26922 sector_t physical_start;
26923
26924 - atomic_t error_count;
26925 + atomic_unchecked_t error_count;
26926 };
26927
26928 struct stripe_c {
26929 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
26930 kfree(sc);
26931 return r;
26932 }
26933 - atomic_set(&(sc->stripe[i].error_count), 0);
26934 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
26935 }
26936
26937 ti->private = sc;
26938 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
26939 DMEMIT("%d ", sc->stripes);
26940 for (i = 0; i < sc->stripes; i++) {
26941 DMEMIT("%s ", sc->stripe[i].dev->name);
26942 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
26943 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
26944 'D' : 'A';
26945 }
26946 buffer[i] = '\0';
26947 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
26948 */
26949 for (i = 0; i < sc->stripes; i++)
26950 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
26951 - atomic_inc(&(sc->stripe[i].error_count));
26952 - if (atomic_read(&(sc->stripe[i].error_count)) <
26953 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
26954 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
26955 DM_IO_ERROR_THRESHOLD)
26956 schedule_work(&sc->trigger_event);
26957 }
26958 diff -urNp linux-3.0.3/drivers/md/dm-table.c linux-3.0.3/drivers/md/dm-table.c
26959 --- linux-3.0.3/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
26960 +++ linux-3.0.3/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
26961 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
26962 if (!dev_size)
26963 return 0;
26964
26965 - if ((start >= dev_size) || (start + len > dev_size)) {
26966 + if ((start >= dev_size) || (len > dev_size - start)) {
26967 DMWARN("%s: %s too small for target: "
26968 "start=%llu, len=%llu, dev_size=%llu",
26969 dm_device_name(ti->table->md), bdevname(bdev, b),
26970 diff -urNp linux-3.0.3/drivers/md/md.c linux-3.0.3/drivers/md/md.c
26971 --- linux-3.0.3/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
26972 +++ linux-3.0.3/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
26973 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
26974 * start build, activate spare
26975 */
26976 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
26977 -static atomic_t md_event_count;
26978 +static atomic_unchecked_t md_event_count;
26979 void md_new_event(mddev_t *mddev)
26980 {
26981 - atomic_inc(&md_event_count);
26982 + atomic_inc_unchecked(&md_event_count);
26983 wake_up(&md_event_waiters);
26984 }
26985 EXPORT_SYMBOL_GPL(md_new_event);
26986 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
26987 */
26988 static void md_new_event_inintr(mddev_t *mddev)
26989 {
26990 - atomic_inc(&md_event_count);
26991 + atomic_inc_unchecked(&md_event_count);
26992 wake_up(&md_event_waiters);
26993 }
26994
26995 @@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
26996
26997 rdev->preferred_minor = 0xffff;
26998 rdev->data_offset = le64_to_cpu(sb->data_offset);
26999 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27000 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27001
27002 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27003 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27004 @@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27005 else
27006 sb->resync_offset = cpu_to_le64(0);
27007
27008 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27009 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27010
27011 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27012 sb->size = cpu_to_le64(mddev->dev_sectors);
27013 @@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27014 static ssize_t
27015 errors_show(mdk_rdev_t *rdev, char *page)
27016 {
27017 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27018 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27019 }
27020
27021 static ssize_t
27022 @@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27023 char *e;
27024 unsigned long n = simple_strtoul(buf, &e, 10);
27025 if (*buf && (*e == 0 || *e == '\n')) {
27026 - atomic_set(&rdev->corrected_errors, n);
27027 + atomic_set_unchecked(&rdev->corrected_errors, n);
27028 return len;
27029 }
27030 return -EINVAL;
27031 @@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27032 rdev->last_read_error.tv_sec = 0;
27033 rdev->last_read_error.tv_nsec = 0;
27034 atomic_set(&rdev->nr_pending, 0);
27035 - atomic_set(&rdev->read_errors, 0);
27036 - atomic_set(&rdev->corrected_errors, 0);
27037 + atomic_set_unchecked(&rdev->read_errors, 0);
27038 + atomic_set_unchecked(&rdev->corrected_errors, 0);
27039
27040 INIT_LIST_HEAD(&rdev->same_set);
27041 init_waitqueue_head(&rdev->blocked_wait);
27042 @@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27043
27044 spin_unlock(&pers_lock);
27045 seq_printf(seq, "\n");
27046 - mi->event = atomic_read(&md_event_count);
27047 + mi->event = atomic_read_unchecked(&md_event_count);
27048 return 0;
27049 }
27050 if (v == (void*)2) {
27051 @@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27052 chunk_kb ? "KB" : "B");
27053 if (bitmap->file) {
27054 seq_printf(seq, ", file: ");
27055 - seq_path(seq, &bitmap->file->f_path, " \t\n");
27056 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27057 }
27058
27059 seq_printf(seq, "\n");
27060 @@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27061 else {
27062 struct seq_file *p = file->private_data;
27063 p->private = mi;
27064 - mi->event = atomic_read(&md_event_count);
27065 + mi->event = atomic_read_unchecked(&md_event_count);
27066 }
27067 return error;
27068 }
27069 @@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27070 /* always allow read */
27071 mask = POLLIN | POLLRDNORM;
27072
27073 - if (mi->event != atomic_read(&md_event_count))
27074 + if (mi->event != atomic_read_unchecked(&md_event_count))
27075 mask |= POLLERR | POLLPRI;
27076 return mask;
27077 }
27078 @@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27079 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27080 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27081 (int)part_stat_read(&disk->part0, sectors[1]) -
27082 - atomic_read(&disk->sync_io);
27083 + atomic_read_unchecked(&disk->sync_io);
27084 /* sync IO will cause sync_io to increase before the disk_stats
27085 * as sync_io is counted when a request starts, and
27086 * disk_stats is counted when it completes.
27087 diff -urNp linux-3.0.3/drivers/md/md.h linux-3.0.3/drivers/md/md.h
27088 --- linux-3.0.3/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27089 +++ linux-3.0.3/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27090 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27091 * only maintained for arrays that
27092 * support hot removal
27093 */
27094 - atomic_t read_errors; /* number of consecutive read errors that
27095 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27096 * we have tried to ignore.
27097 */
27098 struct timespec last_read_error; /* monotonic time since our
27099 * last read error
27100 */
27101 - atomic_t corrected_errors; /* number of corrected read errors,
27102 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27103 * for reporting to userspace and storing
27104 * in superblock.
27105 */
27106 @@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27107
27108 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27109 {
27110 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27111 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27112 }
27113
27114 struct mdk_personality
27115 diff -urNp linux-3.0.3/drivers/md/raid10.c linux-3.0.3/drivers/md/raid10.c
27116 --- linux-3.0.3/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27117 +++ linux-3.0.3/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27118 @@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27119 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27120 set_bit(R10BIO_Uptodate, &r10_bio->state);
27121 else {
27122 - atomic_add(r10_bio->sectors,
27123 + atomic_add_unchecked(r10_bio->sectors,
27124 &conf->mirrors[d].rdev->corrected_errors);
27125 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27126 md_error(r10_bio->mddev,
27127 @@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27128 {
27129 struct timespec cur_time_mon;
27130 unsigned long hours_since_last;
27131 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27132 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27133
27134 ktime_get_ts(&cur_time_mon);
27135
27136 @@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27137 * overflowing the shift of read_errors by hours_since_last.
27138 */
27139 if (hours_since_last >= 8 * sizeof(read_errors))
27140 - atomic_set(&rdev->read_errors, 0);
27141 + atomic_set_unchecked(&rdev->read_errors, 0);
27142 else
27143 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27144 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27145 }
27146
27147 /*
27148 @@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27149 return;
27150
27151 check_decay_read_errors(mddev, rdev);
27152 - atomic_inc(&rdev->read_errors);
27153 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
27154 + atomic_inc_unchecked(&rdev->read_errors);
27155 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27156 char b[BDEVNAME_SIZE];
27157 bdevname(rdev->bdev, b);
27158
27159 @@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27160 "md/raid10:%s: %s: Raid device exceeded "
27161 "read_error threshold [cur %d:max %d]\n",
27162 mdname(mddev), b,
27163 - atomic_read(&rdev->read_errors), max_read_errors);
27164 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27165 printk(KERN_NOTICE
27166 "md/raid10:%s: %s: Failing raid device\n",
27167 mdname(mddev), b);
27168 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27169 test_bit(In_sync, &rdev->flags)) {
27170 atomic_inc(&rdev->nr_pending);
27171 rcu_read_unlock();
27172 - atomic_add(s, &rdev->corrected_errors);
27173 + atomic_add_unchecked(s, &rdev->corrected_errors);
27174 if (sync_page_io(rdev,
27175 r10_bio->devs[sl].addr +
27176 sect,
27177 diff -urNp linux-3.0.3/drivers/md/raid1.c linux-3.0.3/drivers/md/raid1.c
27178 --- linux-3.0.3/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27179 +++ linux-3.0.3/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27180 @@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27181 rdev_dec_pending(rdev, mddev);
27182 md_error(mddev, rdev);
27183 } else
27184 - atomic_add(s, &rdev->corrected_errors);
27185 + atomic_add_unchecked(s, &rdev->corrected_errors);
27186 }
27187 d = start;
27188 while (d != r1_bio->read_disk) {
27189 @@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27190 /* Well, this device is dead */
27191 md_error(mddev, rdev);
27192 else {
27193 - atomic_add(s, &rdev->corrected_errors);
27194 + atomic_add_unchecked(s, &rdev->corrected_errors);
27195 printk(KERN_INFO
27196 "md/raid1:%s: read error corrected "
27197 "(%d sectors at %llu on %s)\n",
27198 diff -urNp linux-3.0.3/drivers/md/raid5.c linux-3.0.3/drivers/md/raid5.c
27199 --- linux-3.0.3/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27200 +++ linux-3.0.3/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27201 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27202 bi->bi_next = NULL;
27203 if ((rw & WRITE) &&
27204 test_bit(R5_ReWrite, &sh->dev[i].flags))
27205 - atomic_add(STRIPE_SECTORS,
27206 + atomic_add_unchecked(STRIPE_SECTORS,
27207 &rdev->corrected_errors);
27208 generic_make_request(bi);
27209 } else {
27210 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27211 clear_bit(R5_ReadError, &sh->dev[i].flags);
27212 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27213 }
27214 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27215 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27216 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27217 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27218 } else {
27219 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27220 int retry = 0;
27221 rdev = conf->disks[i].rdev;
27222
27223 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27224 - atomic_inc(&rdev->read_errors);
27225 + atomic_inc_unchecked(&rdev->read_errors);
27226 if (conf->mddev->degraded >= conf->max_degraded)
27227 printk_rl(KERN_WARNING
27228 "md/raid:%s: read error not correctable "
27229 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27230 (unsigned long long)(sh->sector
27231 + rdev->data_offset),
27232 bdn);
27233 - else if (atomic_read(&rdev->read_errors)
27234 + else if (atomic_read_unchecked(&rdev->read_errors)
27235 > conf->max_nr_stripes)
27236 printk(KERN_WARNING
27237 "md/raid:%s: Too many read errors, failing device %s.\n",
27238 @@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27239 sector_t r_sector;
27240 struct stripe_head sh2;
27241
27242 + pax_track_stack();
27243
27244 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27245 stripe = new_sector;
27246 diff -urNp linux-3.0.3/drivers/media/common/saa7146_hlp.c linux-3.0.3/drivers/media/common/saa7146_hlp.c
27247 --- linux-3.0.3/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27248 +++ linux-3.0.3/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27249 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27250
27251 int x[32], y[32], w[32], h[32];
27252
27253 + pax_track_stack();
27254 +
27255 /* clear out memory */
27256 memset(&line_list[0], 0x00, sizeof(u32)*32);
27257 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27258 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27259 --- linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27260 +++ linux-3.0.3/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27261 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27262 u8 buf[HOST_LINK_BUF_SIZE];
27263 int i;
27264
27265 + pax_track_stack();
27266 +
27267 dprintk("%s\n", __func__);
27268
27269 /* check if we have space for a link buf in the rx_buffer */
27270 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27271 unsigned long timeout;
27272 int written;
27273
27274 + pax_track_stack();
27275 +
27276 dprintk("%s\n", __func__);
27277
27278 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27279 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h
27280 --- linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
27281 +++ linux-3.0.3/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
27282 @@ -68,12 +68,12 @@ struct dvb_demux_feed {
27283 union {
27284 struct dmx_ts_feed ts;
27285 struct dmx_section_feed sec;
27286 - } feed;
27287 + } __no_const feed;
27288
27289 union {
27290 dmx_ts_cb ts;
27291 dmx_section_cb sec;
27292 - } cb;
27293 + } __no_const cb;
27294
27295 struct dvb_demux *demux;
27296 void *priv;
27297 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c
27298 --- linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
27299 +++ linux-3.0.3/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
27300 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27301 const struct dvb_device *template, void *priv, int type)
27302 {
27303 struct dvb_device *dvbdev;
27304 - struct file_operations *dvbdevfops;
27305 + file_operations_no_const *dvbdevfops;
27306 struct device *clsdev;
27307 int minor;
27308 int id;
27309 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c
27310 --- linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
27311 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
27312 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27313 struct dib0700_adapter_state {
27314 int (*set_param_save) (struct dvb_frontend *,
27315 struct dvb_frontend_parameters *);
27316 -};
27317 +} __no_const;
27318
27319 static int dib7070_set_param_override(struct dvb_frontend *fe,
27320 struct dvb_frontend_parameters *fep)
27321 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c
27322 --- linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27323 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27324 @@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27325 if (!buf)
27326 return -ENOMEM;
27327
27328 + pax_track_stack();
27329 +
27330 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27331 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27332 hx.addr, hx.len, hx.chk);
27333 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h
27334 --- linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
27335 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
27336 @@ -97,7 +97,7 @@
27337 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
27338
27339 struct dibusb_state {
27340 - struct dib_fe_xfer_ops ops;
27341 + dib_fe_xfer_ops_no_const ops;
27342 int mt2060_present;
27343 u8 tuner_addr;
27344 };
27345 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c
27346 --- linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
27347 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
27348 @@ -95,7 +95,7 @@ struct su3000_state {
27349
27350 struct s6x0_state {
27351 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27352 -};
27353 +} __no_const;
27354
27355 /* debug */
27356 static int dvb_usb_dw2102_debug;
27357 diff -urNp linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c
27358 --- linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27359 +++ linux-3.0.3/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27360 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27361 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27362 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27363
27364 + pax_track_stack();
27365
27366 data[0] = 0x8a;
27367 len_in = 1;
27368 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27369 int ret = 0, len_in;
27370 u8 data[512] = {0};
27371
27372 + pax_track_stack();
27373 +
27374 data[0] = 0x0a;
27375 len_in = 1;
27376 info("FRM Firmware Cold Reset");
27377 diff -urNp linux-3.0.3/drivers/media/dvb/frontends/dib3000.h linux-3.0.3/drivers/media/dvb/frontends/dib3000.h
27378 --- linux-3.0.3/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
27379 +++ linux-3.0.3/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
27380 @@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27381 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27382 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27383 };
27384 +typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27385
27386 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27387 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27388 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27389 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27390 #else
27391 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27392 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27393 diff -urNp linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c
27394 --- linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27395 +++ linux-3.0.3/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27396 @@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27397 static struct dvb_frontend_ops dib3000mb_ops;
27398
27399 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27400 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27401 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27402 {
27403 struct dib3000_state* state = NULL;
27404
27405 diff -urNp linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c
27406 --- linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27407 +++ linux-3.0.3/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27408 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27409 int ret = -1;
27410 int sync;
27411
27412 + pax_track_stack();
27413 +
27414 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27415
27416 fcp = 3000;
27417 diff -urNp linux-3.0.3/drivers/media/dvb/frontends/or51211.c linux-3.0.3/drivers/media/dvb/frontends/or51211.c
27418 --- linux-3.0.3/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27419 +++ linux-3.0.3/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27420 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27421 u8 tudata[585];
27422 int i;
27423
27424 + pax_track_stack();
27425 +
27426 dprintk("Firmware is %zd bytes\n",fw->size);
27427
27428 /* Get eprom data */
27429 diff -urNp linux-3.0.3/drivers/media/video/cx18/cx18-driver.c linux-3.0.3/drivers/media/video/cx18/cx18-driver.c
27430 --- linux-3.0.3/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27431 +++ linux-3.0.3/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27432 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27433 struct i2c_client c;
27434 u8 eedata[256];
27435
27436 + pax_track_stack();
27437 +
27438 memset(&c, 0, sizeof(c));
27439 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27440 c.adapter = &cx->i2c_adap[0];
27441 diff -urNp linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c
27442 --- linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
27443 +++ linux-3.0.3/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
27444 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27445 bool handle = false;
27446 struct ir_raw_event ir_core_event[64];
27447
27448 + pax_track_stack();
27449 +
27450 do {
27451 num = 0;
27452 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27453 diff -urNp linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27454 --- linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
27455 +++ linux-3.0.3/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
27456 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27457 u8 *eeprom;
27458 struct tveeprom tvdata;
27459
27460 + pax_track_stack();
27461 +
27462 memset(&tvdata,0,sizeof(tvdata));
27463
27464 eeprom = pvr2_eeprom_fetch(hdw);
27465 diff -urNp linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c
27466 --- linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
27467 +++ linux-3.0.3/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
27468 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27469 unsigned char localPAT[256];
27470 unsigned char localPMT[256];
27471
27472 + pax_track_stack();
27473 +
27474 /* Set video format - must be done first as it resets other settings */
27475 set_reg8(client, 0x41, h->video_format);
27476
27477 diff -urNp linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c
27478 --- linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
27479 +++ linux-3.0.3/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
27480 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27481 u8 tmp[512];
27482 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27483
27484 + pax_track_stack();
27485 +
27486 /* While any outstand message on the bus exists... */
27487 do {
27488
27489 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27490 u8 tmp[512];
27491 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27492
27493 + pax_track_stack();
27494 +
27495 while (loop) {
27496
27497 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27498 diff -urNp linux-3.0.3/drivers/media/video/timblogiw.c linux-3.0.3/drivers/media/video/timblogiw.c
27499 --- linux-3.0.3/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
27500 +++ linux-3.0.3/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
27501 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
27502
27503 /* Platform device functions */
27504
27505 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27506 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
27507 .vidioc_querycap = timblogiw_querycap,
27508 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27509 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27510 diff -urNp linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c
27511 --- linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
27512 +++ linux-3.0.3/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
27513 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
27514 unsigned char rv, gv, bv;
27515 static unsigned char *Y, *U, *V;
27516
27517 + pax_track_stack();
27518 +
27519 frame = usbvision->cur_frame;
27520 image_size = frame->frmwidth * frame->frmheight;
27521 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27522 diff -urNp linux-3.0.3/drivers/media/video/videobuf-dma-sg.c linux-3.0.3/drivers/media/video/videobuf-dma-sg.c
27523 --- linux-3.0.3/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
27524 +++ linux-3.0.3/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
27525 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27526 {
27527 struct videobuf_queue q;
27528
27529 + pax_track_stack();
27530 +
27531 /* Required to make generic handler to call __videobuf_alloc */
27532 q.int_ops = &sg_ops;
27533
27534 diff -urNp linux-3.0.3/drivers/message/fusion/mptbase.c linux-3.0.3/drivers/message/fusion/mptbase.c
27535 --- linux-3.0.3/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
27536 +++ linux-3.0.3/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
27537 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
27538 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27539 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27540
27541 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27542 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27543 +#else
27544 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27545 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27546 +#endif
27547 +
27548 /*
27549 * Rounding UP to nearest 4-kB boundary here...
27550 */
27551 diff -urNp linux-3.0.3/drivers/message/fusion/mptsas.c linux-3.0.3/drivers/message/fusion/mptsas.c
27552 --- linux-3.0.3/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
27553 +++ linux-3.0.3/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
27554 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27555 return 0;
27556 }
27557
27558 +static inline void
27559 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27560 +{
27561 + if (phy_info->port_details) {
27562 + phy_info->port_details->rphy = rphy;
27563 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27564 + ioc->name, rphy));
27565 + }
27566 +
27567 + if (rphy) {
27568 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27569 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27570 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27571 + ioc->name, rphy, rphy->dev.release));
27572 + }
27573 +}
27574 +
27575 /* no mutex */
27576 static void
27577 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27578 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27579 return NULL;
27580 }
27581
27582 -static inline void
27583 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27584 -{
27585 - if (phy_info->port_details) {
27586 - phy_info->port_details->rphy = rphy;
27587 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27588 - ioc->name, rphy));
27589 - }
27590 -
27591 - if (rphy) {
27592 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27593 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27594 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27595 - ioc->name, rphy, rphy->dev.release));
27596 - }
27597 -}
27598 -
27599 static inline struct sas_port *
27600 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27601 {
27602 diff -urNp linux-3.0.3/drivers/message/fusion/mptscsih.c linux-3.0.3/drivers/message/fusion/mptscsih.c
27603 --- linux-3.0.3/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
27604 +++ linux-3.0.3/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
27605 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27606
27607 h = shost_priv(SChost);
27608
27609 - if (h) {
27610 - if (h->info_kbuf == NULL)
27611 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27612 - return h->info_kbuf;
27613 - h->info_kbuf[0] = '\0';
27614 + if (!h)
27615 + return NULL;
27616
27617 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27618 - h->info_kbuf[size-1] = '\0';
27619 - }
27620 + if (h->info_kbuf == NULL)
27621 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27622 + return h->info_kbuf;
27623 + h->info_kbuf[0] = '\0';
27624 +
27625 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27626 + h->info_kbuf[size-1] = '\0';
27627
27628 return h->info_kbuf;
27629 }
27630 diff -urNp linux-3.0.3/drivers/message/i2o/i2o_config.c linux-3.0.3/drivers/message/i2o/i2o_config.c
27631 --- linux-3.0.3/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
27632 +++ linux-3.0.3/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
27633 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27634 struct i2o_message *msg;
27635 unsigned int iop;
27636
27637 + pax_track_stack();
27638 +
27639 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27640 return -EFAULT;
27641
27642 diff -urNp linux-3.0.3/drivers/message/i2o/i2o_proc.c linux-3.0.3/drivers/message/i2o/i2o_proc.c
27643 --- linux-3.0.3/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
27644 +++ linux-3.0.3/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
27645 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27646 "Array Controller Device"
27647 };
27648
27649 -static char *chtostr(u8 * chars, int n)
27650 -{
27651 - char tmp[256];
27652 - tmp[0] = 0;
27653 - return strncat(tmp, (char *)chars, n);
27654 -}
27655 -
27656 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27657 char *group)
27658 {
27659 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27660
27661 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27662 seq_printf(seq, "%-#8x", ddm_table.module_id);
27663 - seq_printf(seq, "%-29s",
27664 - chtostr(ddm_table.module_name_version, 28));
27665 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27666 seq_printf(seq, "%9d ", ddm_table.data_size);
27667 seq_printf(seq, "%8d", ddm_table.code_size);
27668
27669 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27670
27671 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27672 seq_printf(seq, "%-#8x", dst->module_id);
27673 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27674 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27675 + seq_printf(seq, "%-.28s", dst->module_name_version);
27676 + seq_printf(seq, "%-.8s", dst->date);
27677 seq_printf(seq, "%8d ", dst->module_size);
27678 seq_printf(seq, "%8d ", dst->mpb_size);
27679 seq_printf(seq, "0x%04x", dst->module_flags);
27680 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27681 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27682 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27683 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27684 - seq_printf(seq, "Vendor info : %s\n",
27685 - chtostr((u8 *) (work32 + 2), 16));
27686 - seq_printf(seq, "Product info : %s\n",
27687 - chtostr((u8 *) (work32 + 6), 16));
27688 - seq_printf(seq, "Description : %s\n",
27689 - chtostr((u8 *) (work32 + 10), 16));
27690 - seq_printf(seq, "Product rev. : %s\n",
27691 - chtostr((u8 *) (work32 + 14), 8));
27692 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27693 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27694 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27695 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27696
27697 seq_printf(seq, "Serial number : ");
27698 print_serial_number(seq, (u8 *) (work32 + 16),
27699 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27700 }
27701
27702 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27703 - seq_printf(seq, "Module name : %s\n",
27704 - chtostr(result.module_name, 24));
27705 - seq_printf(seq, "Module revision : %s\n",
27706 - chtostr(result.module_rev, 8));
27707 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
27708 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27709
27710 seq_printf(seq, "Serial number : ");
27711 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27712 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27713 return 0;
27714 }
27715
27716 - seq_printf(seq, "Device name : %s\n",
27717 - chtostr(result.device_name, 64));
27718 - seq_printf(seq, "Service name : %s\n",
27719 - chtostr(result.service_name, 64));
27720 - seq_printf(seq, "Physical name : %s\n",
27721 - chtostr(result.physical_location, 64));
27722 - seq_printf(seq, "Instance number : %s\n",
27723 - chtostr(result.instance_number, 4));
27724 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
27725 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
27726 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27727 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27728
27729 return 0;
27730 }
27731 diff -urNp linux-3.0.3/drivers/message/i2o/iop.c linux-3.0.3/drivers/message/i2o/iop.c
27732 --- linux-3.0.3/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
27733 +++ linux-3.0.3/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
27734 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27735
27736 spin_lock_irqsave(&c->context_list_lock, flags);
27737
27738 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27739 - atomic_inc(&c->context_list_counter);
27740 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27741 + atomic_inc_unchecked(&c->context_list_counter);
27742
27743 - entry->context = atomic_read(&c->context_list_counter);
27744 + entry->context = atomic_read_unchecked(&c->context_list_counter);
27745
27746 list_add(&entry->list, &c->context_list);
27747
27748 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27749
27750 #if BITS_PER_LONG == 64
27751 spin_lock_init(&c->context_list_lock);
27752 - atomic_set(&c->context_list_counter, 0);
27753 + atomic_set_unchecked(&c->context_list_counter, 0);
27754 INIT_LIST_HEAD(&c->context_list);
27755 #endif
27756
27757 diff -urNp linux-3.0.3/drivers/mfd/abx500-core.c linux-3.0.3/drivers/mfd/abx500-core.c
27758 --- linux-3.0.3/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
27759 +++ linux-3.0.3/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
27760 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27761
27762 struct abx500_device_entry {
27763 struct list_head list;
27764 - struct abx500_ops ops;
27765 + abx500_ops_no_const ops;
27766 struct device *dev;
27767 };
27768
27769 diff -urNp linux-3.0.3/drivers/mfd/janz-cmodio.c linux-3.0.3/drivers/mfd/janz-cmodio.c
27770 --- linux-3.0.3/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
27771 +++ linux-3.0.3/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
27772 @@ -13,6 +13,7 @@
27773
27774 #include <linux/kernel.h>
27775 #include <linux/module.h>
27776 +#include <linux/slab.h>
27777 #include <linux/init.h>
27778 #include <linux/pci.h>
27779 #include <linux/interrupt.h>
27780 diff -urNp linux-3.0.3/drivers/mfd/wm8350-i2c.c linux-3.0.3/drivers/mfd/wm8350-i2c.c
27781 --- linux-3.0.3/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
27782 +++ linux-3.0.3/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
27783 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27784 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27785 int ret;
27786
27787 + pax_track_stack();
27788 +
27789 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27790 return -EINVAL;
27791
27792 diff -urNp linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c
27793 --- linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
27794 +++ linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
27795 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27796 * the lid is closed. This leads to interrupts as soon as a little move
27797 * is done.
27798 */
27799 - atomic_inc(&lis3_dev.count);
27800 + atomic_inc_unchecked(&lis3_dev.count);
27801
27802 wake_up_interruptible(&lis3_dev.misc_wait);
27803 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27804 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27805 if (lis3_dev.pm_dev)
27806 pm_runtime_get_sync(lis3_dev.pm_dev);
27807
27808 - atomic_set(&lis3_dev.count, 0);
27809 + atomic_set_unchecked(&lis3_dev.count, 0);
27810 return 0;
27811 }
27812
27813 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27814 add_wait_queue(&lis3_dev.misc_wait, &wait);
27815 while (true) {
27816 set_current_state(TASK_INTERRUPTIBLE);
27817 - data = atomic_xchg(&lis3_dev.count, 0);
27818 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27819 if (data)
27820 break;
27821
27822 @@ -583,7 +583,7 @@ out:
27823 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27824 {
27825 poll_wait(file, &lis3_dev.misc_wait, wait);
27826 - if (atomic_read(&lis3_dev.count))
27827 + if (atomic_read_unchecked(&lis3_dev.count))
27828 return POLLIN | POLLRDNORM;
27829 return 0;
27830 }
27831 diff -urNp linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h
27832 --- linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
27833 +++ linux-3.0.3/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
27834 @@ -265,7 +265,7 @@ struct lis3lv02d {
27835 struct input_polled_dev *idev; /* input device */
27836 struct platform_device *pdev; /* platform device */
27837 struct regulator_bulk_data regulators[2];
27838 - atomic_t count; /* interrupt count after last read */
27839 + atomic_unchecked_t count; /* interrupt count after last read */
27840 union axis_conversion ac; /* hw -> logical axis */
27841 int mapped_btns[3];
27842
27843 diff -urNp linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c
27844 --- linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
27845 +++ linux-3.0.3/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
27846 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27847 unsigned long nsec;
27848
27849 nsec = CLKS2NSEC(clks);
27850 - atomic_long_inc(&mcs_op_statistics[op].count);
27851 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
27852 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
27853 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
27854 if (mcs_op_statistics[op].max < nsec)
27855 mcs_op_statistics[op].max = nsec;
27856 }
27857 diff -urNp linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c
27858 --- linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
27859 +++ linux-3.0.3/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
27860 @@ -32,9 +32,9 @@
27861
27862 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
27863
27864 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
27865 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
27866 {
27867 - unsigned long val = atomic_long_read(v);
27868 + unsigned long val = atomic_long_read_unchecked(v);
27869
27870 seq_printf(s, "%16lu %s\n", val, id);
27871 }
27872 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
27873
27874 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
27875 for (op = 0; op < mcsop_last; op++) {
27876 - count = atomic_long_read(&mcs_op_statistics[op].count);
27877 - total = atomic_long_read(&mcs_op_statistics[op].total);
27878 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
27879 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
27880 max = mcs_op_statistics[op].max;
27881 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
27882 count ? total / count : 0, max);
27883 diff -urNp linux-3.0.3/drivers/misc/sgi-gru/grutables.h linux-3.0.3/drivers/misc/sgi-gru/grutables.h
27884 --- linux-3.0.3/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
27885 +++ linux-3.0.3/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
27886 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
27887 * GRU statistics.
27888 */
27889 struct gru_stats_s {
27890 - atomic_long_t vdata_alloc;
27891 - atomic_long_t vdata_free;
27892 - atomic_long_t gts_alloc;
27893 - atomic_long_t gts_free;
27894 - atomic_long_t gms_alloc;
27895 - atomic_long_t gms_free;
27896 - atomic_long_t gts_double_allocate;
27897 - atomic_long_t assign_context;
27898 - atomic_long_t assign_context_failed;
27899 - atomic_long_t free_context;
27900 - atomic_long_t load_user_context;
27901 - atomic_long_t load_kernel_context;
27902 - atomic_long_t lock_kernel_context;
27903 - atomic_long_t unlock_kernel_context;
27904 - atomic_long_t steal_user_context;
27905 - atomic_long_t steal_kernel_context;
27906 - atomic_long_t steal_context_failed;
27907 - atomic_long_t nopfn;
27908 - atomic_long_t asid_new;
27909 - atomic_long_t asid_next;
27910 - atomic_long_t asid_wrap;
27911 - atomic_long_t asid_reuse;
27912 - atomic_long_t intr;
27913 - atomic_long_t intr_cbr;
27914 - atomic_long_t intr_tfh;
27915 - atomic_long_t intr_spurious;
27916 - atomic_long_t intr_mm_lock_failed;
27917 - atomic_long_t call_os;
27918 - atomic_long_t call_os_wait_queue;
27919 - atomic_long_t user_flush_tlb;
27920 - atomic_long_t user_unload_context;
27921 - atomic_long_t user_exception;
27922 - atomic_long_t set_context_option;
27923 - atomic_long_t check_context_retarget_intr;
27924 - atomic_long_t check_context_unload;
27925 - atomic_long_t tlb_dropin;
27926 - atomic_long_t tlb_preload_page;
27927 - atomic_long_t tlb_dropin_fail_no_asid;
27928 - atomic_long_t tlb_dropin_fail_upm;
27929 - atomic_long_t tlb_dropin_fail_invalid;
27930 - atomic_long_t tlb_dropin_fail_range_active;
27931 - atomic_long_t tlb_dropin_fail_idle;
27932 - atomic_long_t tlb_dropin_fail_fmm;
27933 - atomic_long_t tlb_dropin_fail_no_exception;
27934 - atomic_long_t tfh_stale_on_fault;
27935 - atomic_long_t mmu_invalidate_range;
27936 - atomic_long_t mmu_invalidate_page;
27937 - atomic_long_t flush_tlb;
27938 - atomic_long_t flush_tlb_gru;
27939 - atomic_long_t flush_tlb_gru_tgh;
27940 - atomic_long_t flush_tlb_gru_zero_asid;
27941 -
27942 - atomic_long_t copy_gpa;
27943 - atomic_long_t read_gpa;
27944 -
27945 - atomic_long_t mesq_receive;
27946 - atomic_long_t mesq_receive_none;
27947 - atomic_long_t mesq_send;
27948 - atomic_long_t mesq_send_failed;
27949 - atomic_long_t mesq_noop;
27950 - atomic_long_t mesq_send_unexpected_error;
27951 - atomic_long_t mesq_send_lb_overflow;
27952 - atomic_long_t mesq_send_qlimit_reached;
27953 - atomic_long_t mesq_send_amo_nacked;
27954 - atomic_long_t mesq_send_put_nacked;
27955 - atomic_long_t mesq_page_overflow;
27956 - atomic_long_t mesq_qf_locked;
27957 - atomic_long_t mesq_qf_noop_not_full;
27958 - atomic_long_t mesq_qf_switch_head_failed;
27959 - atomic_long_t mesq_qf_unexpected_error;
27960 - atomic_long_t mesq_noop_unexpected_error;
27961 - atomic_long_t mesq_noop_lb_overflow;
27962 - atomic_long_t mesq_noop_qlimit_reached;
27963 - atomic_long_t mesq_noop_amo_nacked;
27964 - atomic_long_t mesq_noop_put_nacked;
27965 - atomic_long_t mesq_noop_page_overflow;
27966 + atomic_long_unchecked_t vdata_alloc;
27967 + atomic_long_unchecked_t vdata_free;
27968 + atomic_long_unchecked_t gts_alloc;
27969 + atomic_long_unchecked_t gts_free;
27970 + atomic_long_unchecked_t gms_alloc;
27971 + atomic_long_unchecked_t gms_free;
27972 + atomic_long_unchecked_t gts_double_allocate;
27973 + atomic_long_unchecked_t assign_context;
27974 + atomic_long_unchecked_t assign_context_failed;
27975 + atomic_long_unchecked_t free_context;
27976 + atomic_long_unchecked_t load_user_context;
27977 + atomic_long_unchecked_t load_kernel_context;
27978 + atomic_long_unchecked_t lock_kernel_context;
27979 + atomic_long_unchecked_t unlock_kernel_context;
27980 + atomic_long_unchecked_t steal_user_context;
27981 + atomic_long_unchecked_t steal_kernel_context;
27982 + atomic_long_unchecked_t steal_context_failed;
27983 + atomic_long_unchecked_t nopfn;
27984 + atomic_long_unchecked_t asid_new;
27985 + atomic_long_unchecked_t asid_next;
27986 + atomic_long_unchecked_t asid_wrap;
27987 + atomic_long_unchecked_t asid_reuse;
27988 + atomic_long_unchecked_t intr;
27989 + atomic_long_unchecked_t intr_cbr;
27990 + atomic_long_unchecked_t intr_tfh;
27991 + atomic_long_unchecked_t intr_spurious;
27992 + atomic_long_unchecked_t intr_mm_lock_failed;
27993 + atomic_long_unchecked_t call_os;
27994 + atomic_long_unchecked_t call_os_wait_queue;
27995 + atomic_long_unchecked_t user_flush_tlb;
27996 + atomic_long_unchecked_t user_unload_context;
27997 + atomic_long_unchecked_t user_exception;
27998 + atomic_long_unchecked_t set_context_option;
27999 + atomic_long_unchecked_t check_context_retarget_intr;
28000 + atomic_long_unchecked_t check_context_unload;
28001 + atomic_long_unchecked_t tlb_dropin;
28002 + atomic_long_unchecked_t tlb_preload_page;
28003 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28004 + atomic_long_unchecked_t tlb_dropin_fail_upm;
28005 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
28006 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
28007 + atomic_long_unchecked_t tlb_dropin_fail_idle;
28008 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
28009 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28010 + atomic_long_unchecked_t tfh_stale_on_fault;
28011 + atomic_long_unchecked_t mmu_invalidate_range;
28012 + atomic_long_unchecked_t mmu_invalidate_page;
28013 + atomic_long_unchecked_t flush_tlb;
28014 + atomic_long_unchecked_t flush_tlb_gru;
28015 + atomic_long_unchecked_t flush_tlb_gru_tgh;
28016 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28017 +
28018 + atomic_long_unchecked_t copy_gpa;
28019 + atomic_long_unchecked_t read_gpa;
28020 +
28021 + atomic_long_unchecked_t mesq_receive;
28022 + atomic_long_unchecked_t mesq_receive_none;
28023 + atomic_long_unchecked_t mesq_send;
28024 + atomic_long_unchecked_t mesq_send_failed;
28025 + atomic_long_unchecked_t mesq_noop;
28026 + atomic_long_unchecked_t mesq_send_unexpected_error;
28027 + atomic_long_unchecked_t mesq_send_lb_overflow;
28028 + atomic_long_unchecked_t mesq_send_qlimit_reached;
28029 + atomic_long_unchecked_t mesq_send_amo_nacked;
28030 + atomic_long_unchecked_t mesq_send_put_nacked;
28031 + atomic_long_unchecked_t mesq_page_overflow;
28032 + atomic_long_unchecked_t mesq_qf_locked;
28033 + atomic_long_unchecked_t mesq_qf_noop_not_full;
28034 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
28035 + atomic_long_unchecked_t mesq_qf_unexpected_error;
28036 + atomic_long_unchecked_t mesq_noop_unexpected_error;
28037 + atomic_long_unchecked_t mesq_noop_lb_overflow;
28038 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
28039 + atomic_long_unchecked_t mesq_noop_amo_nacked;
28040 + atomic_long_unchecked_t mesq_noop_put_nacked;
28041 + atomic_long_unchecked_t mesq_noop_page_overflow;
28042
28043 };
28044
28045 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28046 tghop_invalidate, mcsop_last};
28047
28048 struct mcs_op_statistic {
28049 - atomic_long_t count;
28050 - atomic_long_t total;
28051 + atomic_long_unchecked_t count;
28052 + atomic_long_unchecked_t total;
28053 unsigned long max;
28054 };
28055
28056 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28057
28058 #define STAT(id) do { \
28059 if (gru_options & OPT_STATS) \
28060 - atomic_long_inc(&gru_stats.id); \
28061 + atomic_long_inc_unchecked(&gru_stats.id); \
28062 } while (0)
28063
28064 #ifdef CONFIG_SGI_GRU_DEBUG
28065 diff -urNp linux-3.0.3/drivers/misc/sgi-xp/xp.h linux-3.0.3/drivers/misc/sgi-xp/xp.h
28066 --- linux-3.0.3/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
28067 +++ linux-3.0.3/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
28068 @@ -289,7 +289,7 @@ struct xpc_interface {
28069 xpc_notify_func, void *);
28070 void (*received) (short, int, void *);
28071 enum xp_retval (*partid_to_nasids) (short, void *);
28072 -};
28073 +} __no_const;
28074
28075 extern struct xpc_interface xpc_interface;
28076
28077 diff -urNp linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c
28078 --- linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
28079 +++ linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
28080 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28081 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28082 unsigned long timeo = jiffies + HZ;
28083
28084 + pax_track_stack();
28085 +
28086 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28087 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28088 goto sleep;
28089 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28090 unsigned long initial_adr;
28091 int initial_len = len;
28092
28093 + pax_track_stack();
28094 +
28095 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28096 adr += chip->start;
28097 initial_adr = adr;
28098 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28099 int retries = 3;
28100 int ret;
28101
28102 + pax_track_stack();
28103 +
28104 adr += chip->start;
28105
28106 retry:
28107 diff -urNp linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c
28108 --- linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
28109 +++ linux-3.0.3/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
28110 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28111 unsigned long cmd_addr;
28112 struct cfi_private *cfi = map->fldrv_priv;
28113
28114 + pax_track_stack();
28115 +
28116 adr += chip->start;
28117
28118 /* Ensure cmd read/writes are aligned. */
28119 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28120 DECLARE_WAITQUEUE(wait, current);
28121 int wbufsize, z;
28122
28123 + pax_track_stack();
28124 +
28125 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28126 if (adr & (map_bankwidth(map)-1))
28127 return -EINVAL;
28128 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28129 DECLARE_WAITQUEUE(wait, current);
28130 int ret = 0;
28131
28132 + pax_track_stack();
28133 +
28134 adr += chip->start;
28135
28136 /* Let's determine this according to the interleave only once */
28137 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28138 unsigned long timeo = jiffies + HZ;
28139 DECLARE_WAITQUEUE(wait, current);
28140
28141 + pax_track_stack();
28142 +
28143 adr += chip->start;
28144
28145 /* Let's determine this according to the interleave only once */
28146 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28147 unsigned long timeo = jiffies + HZ;
28148 DECLARE_WAITQUEUE(wait, current);
28149
28150 + pax_track_stack();
28151 +
28152 adr += chip->start;
28153
28154 /* Let's determine this according to the interleave only once */
28155 diff -urNp linux-3.0.3/drivers/mtd/devices/doc2000.c linux-3.0.3/drivers/mtd/devices/doc2000.c
28156 --- linux-3.0.3/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
28157 +++ linux-3.0.3/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
28158 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28159
28160 /* The ECC will not be calculated correctly if less than 512 is written */
28161 /* DBB-
28162 - if (len != 0x200 && eccbuf)
28163 + if (len != 0x200)
28164 printk(KERN_WARNING
28165 "ECC needs a full sector write (adr: %lx size %lx)\n",
28166 (long) to, (long) len);
28167 diff -urNp linux-3.0.3/drivers/mtd/devices/doc2001.c linux-3.0.3/drivers/mtd/devices/doc2001.c
28168 --- linux-3.0.3/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
28169 +++ linux-3.0.3/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
28170 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28171 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28172
28173 /* Don't allow read past end of device */
28174 - if (from >= this->totlen)
28175 + if (from >= this->totlen || !len)
28176 return -EINVAL;
28177
28178 /* Don't allow a single read to cross a 512-byte block boundary */
28179 diff -urNp linux-3.0.3/drivers/mtd/ftl.c linux-3.0.3/drivers/mtd/ftl.c
28180 --- linux-3.0.3/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
28181 +++ linux-3.0.3/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
28182 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28183 loff_t offset;
28184 uint16_t srcunitswap = cpu_to_le16(srcunit);
28185
28186 + pax_track_stack();
28187 +
28188 eun = &part->EUNInfo[srcunit];
28189 xfer = &part->XferInfo[xferunit];
28190 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28191 diff -urNp linux-3.0.3/drivers/mtd/inftlcore.c linux-3.0.3/drivers/mtd/inftlcore.c
28192 --- linux-3.0.3/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28193 +++ linux-3.0.3/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28194 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28195 struct inftl_oob oob;
28196 size_t retlen;
28197
28198 + pax_track_stack();
28199 +
28200 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28201 "pending=%d)\n", inftl, thisVUC, pendingblock);
28202
28203 diff -urNp linux-3.0.3/drivers/mtd/inftlmount.c linux-3.0.3/drivers/mtd/inftlmount.c
28204 --- linux-3.0.3/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28205 +++ linux-3.0.3/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28206 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28207 struct INFTLPartition *ip;
28208 size_t retlen;
28209
28210 + pax_track_stack();
28211 +
28212 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28213
28214 /*
28215 diff -urNp linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c
28216 --- linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28217 +++ linux-3.0.3/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28218 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28219 {
28220 map_word pfow_val[4];
28221
28222 + pax_track_stack();
28223 +
28224 /* Check identification string */
28225 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28226 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28227 diff -urNp linux-3.0.3/drivers/mtd/mtdchar.c linux-3.0.3/drivers/mtd/mtdchar.c
28228 --- linux-3.0.3/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28229 +++ linux-3.0.3/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28230 @@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28231 u_long size;
28232 struct mtd_info_user info;
28233
28234 + pax_track_stack();
28235 +
28236 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28237
28238 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28239 diff -urNp linux-3.0.3/drivers/mtd/nand/denali.c linux-3.0.3/drivers/mtd/nand/denali.c
28240 --- linux-3.0.3/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28241 +++ linux-3.0.3/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28242 @@ -26,6 +26,7 @@
28243 #include <linux/pci.h>
28244 #include <linux/mtd/mtd.h>
28245 #include <linux/module.h>
28246 +#include <linux/slab.h>
28247
28248 #include "denali.h"
28249
28250 diff -urNp linux-3.0.3/drivers/mtd/nftlcore.c linux-3.0.3/drivers/mtd/nftlcore.c
28251 --- linux-3.0.3/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28252 +++ linux-3.0.3/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28253 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28254 int inplace = 1;
28255 size_t retlen;
28256
28257 + pax_track_stack();
28258 +
28259 memset(BlockMap, 0xff, sizeof(BlockMap));
28260 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28261
28262 diff -urNp linux-3.0.3/drivers/mtd/nftlmount.c linux-3.0.3/drivers/mtd/nftlmount.c
28263 --- linux-3.0.3/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28264 +++ linux-3.0.3/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28265 @@ -24,6 +24,7 @@
28266 #include <asm/errno.h>
28267 #include <linux/delay.h>
28268 #include <linux/slab.h>
28269 +#include <linux/sched.h>
28270 #include <linux/mtd/mtd.h>
28271 #include <linux/mtd/nand.h>
28272 #include <linux/mtd/nftl.h>
28273 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28274 struct mtd_info *mtd = nftl->mbd.mtd;
28275 unsigned int i;
28276
28277 + pax_track_stack();
28278 +
28279 /* Assume logical EraseSize == physical erasesize for starting the scan.
28280 We'll sort it out later if we find a MediaHeader which says otherwise */
28281 /* Actually, we won't. The new DiskOnChip driver has already scanned
28282 diff -urNp linux-3.0.3/drivers/mtd/ubi/build.c linux-3.0.3/drivers/mtd/ubi/build.c
28283 --- linux-3.0.3/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28284 +++ linux-3.0.3/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28285 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28286 static int __init bytes_str_to_int(const char *str)
28287 {
28288 char *endp;
28289 - unsigned long result;
28290 + unsigned long result, scale = 1;
28291
28292 result = simple_strtoul(str, &endp, 0);
28293 if (str == endp || result >= INT_MAX) {
28294 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28295
28296 switch (*endp) {
28297 case 'G':
28298 - result *= 1024;
28299 + scale *= 1024;
28300 case 'M':
28301 - result *= 1024;
28302 + scale *= 1024;
28303 case 'K':
28304 - result *= 1024;
28305 + scale *= 1024;
28306 if (endp[1] == 'i' && endp[2] == 'B')
28307 endp += 2;
28308 case '\0':
28309 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28310 return -EINVAL;
28311 }
28312
28313 - return result;
28314 + if ((intoverflow_t)result*scale >= INT_MAX) {
28315 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28316 + str);
28317 + return -EINVAL;
28318 + }
28319 +
28320 + return result*scale;
28321 }
28322
28323 /**
28324 diff -urNp linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c
28325 --- linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28326 +++ linux-3.0.3/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28327 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28328 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28329 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28330
28331 -static struct bfa_ioc_hwif nw_hwif_ct;
28332 +static struct bfa_ioc_hwif nw_hwif_ct = {
28333 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28334 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28335 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28336 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28337 + .ioc_map_port = bfa_ioc_ct_map_port,
28338 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28339 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28340 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28341 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28342 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28343 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28344 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28345 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28346 +};
28347
28348 /**
28349 * Called from bfa_ioc_attach() to map asic specific calls.
28350 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28351 void
28352 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28353 {
28354 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28355 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28356 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28357 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28358 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28359 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28360 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28361 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28362 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28363 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28364 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28365 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28366 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28367 -
28368 ioc->ioc_hwif = &nw_hwif_ct;
28369 }
28370
28371 diff -urNp linux-3.0.3/drivers/net/bna/bnad.c linux-3.0.3/drivers/net/bna/bnad.c
28372 --- linux-3.0.3/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28373 +++ linux-3.0.3/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28374 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28375 struct bna_intr_info *intr_info =
28376 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28377 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28378 - struct bna_tx_event_cbfn tx_cbfn;
28379 + static struct bna_tx_event_cbfn tx_cbfn = {
28380 + /* Initialize the tx event handlers */
28381 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28382 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28383 + .tx_stall_cbfn = bnad_cb_tx_stall,
28384 + .tx_resume_cbfn = bnad_cb_tx_resume,
28385 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28386 + };
28387 struct bna_tx *tx;
28388 unsigned long flags;
28389
28390 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28391 tx_config->txq_depth = bnad->txq_depth;
28392 tx_config->tx_type = BNA_TX_T_REGULAR;
28393
28394 - /* Initialize the tx event handlers */
28395 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28396 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28397 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28398 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28399 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28400 -
28401 /* Get BNA's resource requirement for one tx object */
28402 spin_lock_irqsave(&bnad->bna_lock, flags);
28403 bna_tx_res_req(bnad->num_txq_per_tx,
28404 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28405 struct bna_intr_info *intr_info =
28406 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28407 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28408 - struct bna_rx_event_cbfn rx_cbfn;
28409 + static struct bna_rx_event_cbfn rx_cbfn = {
28410 + /* Initialize the Rx event handlers */
28411 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28412 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28413 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28414 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28415 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28416 + .rx_post_cbfn = bnad_cb_rx_post
28417 + };
28418 struct bna_rx *rx;
28419 unsigned long flags;
28420
28421 /* Initialize the Rx object configuration */
28422 bnad_init_rx_config(bnad, rx_config);
28423
28424 - /* Initialize the Rx event handlers */
28425 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28426 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28427 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28428 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28429 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28430 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28431 -
28432 /* Get BNA's resource requirement for one Rx object */
28433 spin_lock_irqsave(&bnad->bna_lock, flags);
28434 bna_rx_res_req(rx_config, res_info);
28435 diff -urNp linux-3.0.3/drivers/net/bnx2.c linux-3.0.3/drivers/net/bnx2.c
28436 --- linux-3.0.3/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
28437 +++ linux-3.0.3/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
28438 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28439 int rc = 0;
28440 u32 magic, csum;
28441
28442 + pax_track_stack();
28443 +
28444 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28445 goto test_nvram_done;
28446
28447 diff -urNp linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c
28448 --- linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
28449 +++ linux-3.0.3/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
28450 @@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
28451 int i, rc;
28452 u32 magic, crc;
28453
28454 + pax_track_stack();
28455 +
28456 if (BP_NOMCP(bp))
28457 return 0;
28458
28459 diff -urNp linux-3.0.3/drivers/net/cxgb3/l2t.h linux-3.0.3/drivers/net/cxgb3/l2t.h
28460 --- linux-3.0.3/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
28461 +++ linux-3.0.3/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
28462 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28463 */
28464 struct l2t_skb_cb {
28465 arp_failure_handler_func arp_failure_handler;
28466 -};
28467 +} __no_const;
28468
28469 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28470
28471 diff -urNp linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c
28472 --- linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
28473 +++ linux-3.0.3/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
28474 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
28475 unsigned int nchan = adap->params.nports;
28476 struct msix_entry entries[MAX_INGQ + 1];
28477
28478 + pax_track_stack();
28479 +
28480 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28481 entries[i].entry = i;
28482
28483 diff -urNp linux-3.0.3/drivers/net/cxgb4/t4_hw.c linux-3.0.3/drivers/net/cxgb4/t4_hw.c
28484 --- linux-3.0.3/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
28485 +++ linux-3.0.3/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
28486 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28487 u8 vpd[VPD_LEN], csum;
28488 unsigned int vpdr_len, kw_offset, id_len;
28489
28490 + pax_track_stack();
28491 +
28492 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28493 if (ret < 0)
28494 return ret;
28495 diff -urNp linux-3.0.3/drivers/net/e1000e/82571.c linux-3.0.3/drivers/net/e1000e/82571.c
28496 --- linux-3.0.3/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
28497 +++ linux-3.0.3/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
28498 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28499 {
28500 struct e1000_hw *hw = &adapter->hw;
28501 struct e1000_mac_info *mac = &hw->mac;
28502 - struct e1000_mac_operations *func = &mac->ops;
28503 + e1000_mac_operations_no_const *func = &mac->ops;
28504 u32 swsm = 0;
28505 u32 swsm2 = 0;
28506 bool force_clear_smbi = false;
28507 diff -urNp linux-3.0.3/drivers/net/e1000e/es2lan.c linux-3.0.3/drivers/net/e1000e/es2lan.c
28508 --- linux-3.0.3/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
28509 +++ linux-3.0.3/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
28510 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28511 {
28512 struct e1000_hw *hw = &adapter->hw;
28513 struct e1000_mac_info *mac = &hw->mac;
28514 - struct e1000_mac_operations *func = &mac->ops;
28515 + e1000_mac_operations_no_const *func = &mac->ops;
28516
28517 /* Set media type */
28518 switch (adapter->pdev->device) {
28519 diff -urNp linux-3.0.3/drivers/net/e1000e/hw.h linux-3.0.3/drivers/net/e1000e/hw.h
28520 --- linux-3.0.3/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
28521 +++ linux-3.0.3/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
28522 @@ -776,6 +776,7 @@ struct e1000_mac_operations {
28523 void (*write_vfta)(struct e1000_hw *, u32, u32);
28524 s32 (*read_mac_addr)(struct e1000_hw *);
28525 };
28526 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28527
28528 /* Function pointers for the PHY. */
28529 struct e1000_phy_operations {
28530 @@ -799,6 +800,7 @@ struct e1000_phy_operations {
28531 void (*power_up)(struct e1000_hw *);
28532 void (*power_down)(struct e1000_hw *);
28533 };
28534 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28535
28536 /* Function pointers for the NVM. */
28537 struct e1000_nvm_operations {
28538 @@ -810,9 +812,10 @@ struct e1000_nvm_operations {
28539 s32 (*validate)(struct e1000_hw *);
28540 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28541 };
28542 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28543
28544 struct e1000_mac_info {
28545 - struct e1000_mac_operations ops;
28546 + e1000_mac_operations_no_const ops;
28547 u8 addr[ETH_ALEN];
28548 u8 perm_addr[ETH_ALEN];
28549
28550 @@ -853,7 +856,7 @@ struct e1000_mac_info {
28551 };
28552
28553 struct e1000_phy_info {
28554 - struct e1000_phy_operations ops;
28555 + e1000_phy_operations_no_const ops;
28556
28557 enum e1000_phy_type type;
28558
28559 @@ -887,7 +890,7 @@ struct e1000_phy_info {
28560 };
28561
28562 struct e1000_nvm_info {
28563 - struct e1000_nvm_operations ops;
28564 + e1000_nvm_operations_no_const ops;
28565
28566 enum e1000_nvm_type type;
28567 enum e1000_nvm_override override;
28568 diff -urNp linux-3.0.3/drivers/net/hamradio/6pack.c linux-3.0.3/drivers/net/hamradio/6pack.c
28569 --- linux-3.0.3/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
28570 +++ linux-3.0.3/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
28571 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28572 unsigned char buf[512];
28573 int count1;
28574
28575 + pax_track_stack();
28576 +
28577 if (!count)
28578 return;
28579
28580 diff -urNp linux-3.0.3/drivers/net/igb/e1000_hw.h linux-3.0.3/drivers/net/igb/e1000_hw.h
28581 --- linux-3.0.3/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
28582 +++ linux-3.0.3/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
28583 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28584 s32 (*read_mac_addr)(struct e1000_hw *);
28585 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28586 };
28587 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28588
28589 struct e1000_phy_operations {
28590 s32 (*acquire)(struct e1000_hw *);
28591 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28592 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28593 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28594 };
28595 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28596
28597 struct e1000_nvm_operations {
28598 s32 (*acquire)(struct e1000_hw *);
28599 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28600 s32 (*update)(struct e1000_hw *);
28601 s32 (*validate)(struct e1000_hw *);
28602 };
28603 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28604
28605 struct e1000_info {
28606 s32 (*get_invariants)(struct e1000_hw *);
28607 @@ -350,7 +353,7 @@ struct e1000_info {
28608 extern const struct e1000_info e1000_82575_info;
28609
28610 struct e1000_mac_info {
28611 - struct e1000_mac_operations ops;
28612 + e1000_mac_operations_no_const ops;
28613
28614 u8 addr[6];
28615 u8 perm_addr[6];
28616 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28617 };
28618
28619 struct e1000_phy_info {
28620 - struct e1000_phy_operations ops;
28621 + e1000_phy_operations_no_const ops;
28622
28623 enum e1000_phy_type type;
28624
28625 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28626 };
28627
28628 struct e1000_nvm_info {
28629 - struct e1000_nvm_operations ops;
28630 + e1000_nvm_operations_no_const ops;
28631 enum e1000_nvm_type type;
28632 enum e1000_nvm_override override;
28633
28634 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28635 s32 (*check_for_ack)(struct e1000_hw *, u16);
28636 s32 (*check_for_rst)(struct e1000_hw *, u16);
28637 };
28638 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28639
28640 struct e1000_mbx_stats {
28641 u32 msgs_tx;
28642 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28643 };
28644
28645 struct e1000_mbx_info {
28646 - struct e1000_mbx_operations ops;
28647 + e1000_mbx_operations_no_const ops;
28648 struct e1000_mbx_stats stats;
28649 u32 timeout;
28650 u32 usec_delay;
28651 diff -urNp linux-3.0.3/drivers/net/igbvf/vf.h linux-3.0.3/drivers/net/igbvf/vf.h
28652 --- linux-3.0.3/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
28653 +++ linux-3.0.3/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
28654 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28655 s32 (*read_mac_addr)(struct e1000_hw *);
28656 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28657 };
28658 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28659
28660 struct e1000_mac_info {
28661 - struct e1000_mac_operations ops;
28662 + e1000_mac_operations_no_const ops;
28663 u8 addr[6];
28664 u8 perm_addr[6];
28665
28666 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28667 s32 (*check_for_ack)(struct e1000_hw *);
28668 s32 (*check_for_rst)(struct e1000_hw *);
28669 };
28670 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28671
28672 struct e1000_mbx_stats {
28673 u32 msgs_tx;
28674 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28675 };
28676
28677 struct e1000_mbx_info {
28678 - struct e1000_mbx_operations ops;
28679 + e1000_mbx_operations_no_const ops;
28680 struct e1000_mbx_stats stats;
28681 u32 timeout;
28682 u32 usec_delay;
28683 diff -urNp linux-3.0.3/drivers/net/ixgb/ixgb_main.c linux-3.0.3/drivers/net/ixgb/ixgb_main.c
28684 --- linux-3.0.3/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
28685 +++ linux-3.0.3/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
28686 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
28687 u32 rctl;
28688 int i;
28689
28690 + pax_track_stack();
28691 +
28692 /* Check for Promiscuous and All Multicast modes */
28693
28694 rctl = IXGB_READ_REG(hw, RCTL);
28695 diff -urNp linux-3.0.3/drivers/net/ixgb/ixgb_param.c linux-3.0.3/drivers/net/ixgb/ixgb_param.c
28696 --- linux-3.0.3/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
28697 +++ linux-3.0.3/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
28698 @@ -261,6 +261,9 @@ void __devinit
28699 ixgb_check_options(struct ixgb_adapter *adapter)
28700 {
28701 int bd = adapter->bd_number;
28702 +
28703 + pax_track_stack();
28704 +
28705 if (bd >= IXGB_MAX_NIC) {
28706 pr_notice("Warning: no configuration for board #%i\n", bd);
28707 pr_notice("Using defaults for all values\n");
28708 diff -urNp linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h
28709 --- linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
28710 +++ linux-3.0.3/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
28711 @@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
28712 s32 (*update_checksum)(struct ixgbe_hw *);
28713 u16 (*calc_checksum)(struct ixgbe_hw *);
28714 };
28715 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28716
28717 struct ixgbe_mac_operations {
28718 s32 (*init_hw)(struct ixgbe_hw *);
28719 @@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
28720 /* Flow Control */
28721 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28722 };
28723 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28724
28725 struct ixgbe_phy_operations {
28726 s32 (*identify)(struct ixgbe_hw *);
28727 @@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
28728 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28729 s32 (*check_overtemp)(struct ixgbe_hw *);
28730 };
28731 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28732
28733 struct ixgbe_eeprom_info {
28734 - struct ixgbe_eeprom_operations ops;
28735 + ixgbe_eeprom_operations_no_const ops;
28736 enum ixgbe_eeprom_type type;
28737 u32 semaphore_delay;
28738 u16 word_size;
28739 @@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
28740
28741 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28742 struct ixgbe_mac_info {
28743 - struct ixgbe_mac_operations ops;
28744 + ixgbe_mac_operations_no_const ops;
28745 enum ixgbe_mac_type type;
28746 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28747 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28748 @@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
28749 };
28750
28751 struct ixgbe_phy_info {
28752 - struct ixgbe_phy_operations ops;
28753 + ixgbe_phy_operations_no_const ops;
28754 struct mdio_if_info mdio;
28755 enum ixgbe_phy_type type;
28756 u32 id;
28757 @@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
28758 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28759 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28760 };
28761 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28762
28763 struct ixgbe_mbx_stats {
28764 u32 msgs_tx;
28765 @@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
28766 };
28767
28768 struct ixgbe_mbx_info {
28769 - struct ixgbe_mbx_operations ops;
28770 + ixgbe_mbx_operations_no_const ops;
28771 struct ixgbe_mbx_stats stats;
28772 u32 timeout;
28773 u32 usec_delay;
28774 diff -urNp linux-3.0.3/drivers/net/ixgbevf/vf.h linux-3.0.3/drivers/net/ixgbevf/vf.h
28775 --- linux-3.0.3/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
28776 +++ linux-3.0.3/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
28777 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
28778 s32 (*clear_vfta)(struct ixgbe_hw *);
28779 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28780 };
28781 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28782
28783 enum ixgbe_mac_type {
28784 ixgbe_mac_unknown = 0,
28785 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
28786 };
28787
28788 struct ixgbe_mac_info {
28789 - struct ixgbe_mac_operations ops;
28790 + ixgbe_mac_operations_no_const ops;
28791 u8 addr[6];
28792 u8 perm_addr[6];
28793
28794 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
28795 s32 (*check_for_ack)(struct ixgbe_hw *);
28796 s32 (*check_for_rst)(struct ixgbe_hw *);
28797 };
28798 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28799
28800 struct ixgbe_mbx_stats {
28801 u32 msgs_tx;
28802 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
28803 };
28804
28805 struct ixgbe_mbx_info {
28806 - struct ixgbe_mbx_operations ops;
28807 + ixgbe_mbx_operations_no_const ops;
28808 struct ixgbe_mbx_stats stats;
28809 u32 timeout;
28810 u32 udelay;
28811 diff -urNp linux-3.0.3/drivers/net/ksz884x.c linux-3.0.3/drivers/net/ksz884x.c
28812 --- linux-3.0.3/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
28813 +++ linux-3.0.3/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
28814 @@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
28815 int rc;
28816 u64 counter[TOTAL_PORT_COUNTER_NUM];
28817
28818 + pax_track_stack();
28819 +
28820 mutex_lock(&hw_priv->lock);
28821 n = SWITCH_PORT_NUM;
28822 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28823 diff -urNp linux-3.0.3/drivers/net/mlx4/main.c linux-3.0.3/drivers/net/mlx4/main.c
28824 --- linux-3.0.3/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
28825 +++ linux-3.0.3/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
28826 @@ -40,6 +40,7 @@
28827 #include <linux/dma-mapping.h>
28828 #include <linux/slab.h>
28829 #include <linux/io-mapping.h>
28830 +#include <linux/sched.h>
28831
28832 #include <linux/mlx4/device.h>
28833 #include <linux/mlx4/doorbell.h>
28834 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28835 u64 icm_size;
28836 int err;
28837
28838 + pax_track_stack();
28839 +
28840 err = mlx4_QUERY_FW(dev);
28841 if (err) {
28842 if (err == -EACCES)
28843 diff -urNp linux-3.0.3/drivers/net/niu.c linux-3.0.3/drivers/net/niu.c
28844 --- linux-3.0.3/drivers/net/niu.c 2011-08-23 21:44:40.000000000 -0400
28845 +++ linux-3.0.3/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
28846 @@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
28847 int i, num_irqs, err;
28848 u8 first_ldg;
28849
28850 + pax_track_stack();
28851 +
28852 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
28853 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
28854 ldg_num_map[i] = first_ldg + i;
28855 diff -urNp linux-3.0.3/drivers/net/pcnet32.c linux-3.0.3/drivers/net/pcnet32.c
28856 --- linux-3.0.3/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
28857 +++ linux-3.0.3/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
28858 @@ -82,7 +82,7 @@ static int cards_found;
28859 /*
28860 * VLB I/O addresses
28861 */
28862 -static unsigned int pcnet32_portlist[] __initdata =
28863 +static unsigned int pcnet32_portlist[] __devinitdata =
28864 { 0x300, 0x320, 0x340, 0x360, 0 };
28865
28866 static int pcnet32_debug;
28867 @@ -270,7 +270,7 @@ struct pcnet32_private {
28868 struct sk_buff **rx_skbuff;
28869 dma_addr_t *tx_dma_addr;
28870 dma_addr_t *rx_dma_addr;
28871 - struct pcnet32_access a;
28872 + struct pcnet32_access *a;
28873 spinlock_t lock; /* Guard lock */
28874 unsigned int cur_rx, cur_tx; /* The next free ring entry */
28875 unsigned int rx_ring_size; /* current rx ring size */
28876 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
28877 u16 val;
28878
28879 netif_wake_queue(dev);
28880 - val = lp->a.read_csr(ioaddr, CSR3);
28881 + val = lp->a->read_csr(ioaddr, CSR3);
28882 val &= 0x00ff;
28883 - lp->a.write_csr(ioaddr, CSR3, val);
28884 + lp->a->write_csr(ioaddr, CSR3, val);
28885 napi_enable(&lp->napi);
28886 }
28887
28888 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
28889 r = mii_link_ok(&lp->mii_if);
28890 } else if (lp->chip_version >= PCNET32_79C970A) {
28891 ulong ioaddr = dev->base_addr; /* card base I/O address */
28892 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
28893 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
28894 } else { /* can not detect link on really old chips */
28895 r = 1;
28896 }
28897 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
28898 pcnet32_netif_stop(dev);
28899
28900 spin_lock_irqsave(&lp->lock, flags);
28901 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28902 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28903
28904 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
28905
28906 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
28907 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
28908 {
28909 struct pcnet32_private *lp = netdev_priv(dev);
28910 - struct pcnet32_access *a = &lp->a; /* access to registers */
28911 + struct pcnet32_access *a = lp->a; /* access to registers */
28912 ulong ioaddr = dev->base_addr; /* card base I/O address */
28913 struct sk_buff *skb; /* sk buff */
28914 int x, i; /* counters */
28915 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
28916 pcnet32_netif_stop(dev);
28917
28918 spin_lock_irqsave(&lp->lock, flags);
28919 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28920 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
28921
28922 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
28923
28924 /* Reset the PCNET32 */
28925 - lp->a.reset(ioaddr);
28926 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28927 + lp->a->reset(ioaddr);
28928 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28929
28930 /* switch pcnet32 to 32bit mode */
28931 - lp->a.write_bcr(ioaddr, 20, 2);
28932 + lp->a->write_bcr(ioaddr, 20, 2);
28933
28934 /* purge & init rings but don't actually restart */
28935 pcnet32_restart(dev, 0x0000);
28936
28937 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28938 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28939
28940 /* Initialize Transmit buffers. */
28941 size = data_len + 15;
28942 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
28943
28944 /* set int loopback in CSR15 */
28945 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
28946 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
28947 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
28948
28949 teststatus = cpu_to_le16(0x8000);
28950 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28951 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
28952
28953 /* Check status of descriptors */
28954 for (x = 0; x < numbuffs; x++) {
28955 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
28956 }
28957 }
28958
28959 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28960 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
28961 wmb();
28962 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
28963 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
28964 @@ -1015,7 +1015,7 @@ clean_up:
28965 pcnet32_restart(dev, CSR0_NORMAL);
28966 } else {
28967 pcnet32_purge_rx_ring(dev);
28968 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28969 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
28970 }
28971 spin_unlock_irqrestore(&lp->lock, flags);
28972
28973 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
28974 enum ethtool_phys_id_state state)
28975 {
28976 struct pcnet32_private *lp = netdev_priv(dev);
28977 - struct pcnet32_access *a = &lp->a;
28978 + struct pcnet32_access *a = lp->a;
28979 ulong ioaddr = dev->base_addr;
28980 unsigned long flags;
28981 int i;
28982 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
28983 {
28984 int csr5;
28985 struct pcnet32_private *lp = netdev_priv(dev);
28986 - struct pcnet32_access *a = &lp->a;
28987 + struct pcnet32_access *a = lp->a;
28988 ulong ioaddr = dev->base_addr;
28989 int ticks;
28990
28991 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
28992 spin_lock_irqsave(&lp->lock, flags);
28993 if (pcnet32_tx(dev)) {
28994 /* reset the chip to clear the error condition, then restart */
28995 - lp->a.reset(ioaddr);
28996 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28997 + lp->a->reset(ioaddr);
28998 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
28999 pcnet32_restart(dev, CSR0_START);
29000 netif_wake_queue(dev);
29001 }
29002 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29003 __napi_complete(napi);
29004
29005 /* clear interrupt masks */
29006 - val = lp->a.read_csr(ioaddr, CSR3);
29007 + val = lp->a->read_csr(ioaddr, CSR3);
29008 val &= 0x00ff;
29009 - lp->a.write_csr(ioaddr, CSR3, val);
29010 + lp->a->write_csr(ioaddr, CSR3, val);
29011
29012 /* Set interrupt enable. */
29013 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29014 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29015
29016 spin_unlock_irqrestore(&lp->lock, flags);
29017 }
29018 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29019 int i, csr0;
29020 u16 *buff = ptr;
29021 struct pcnet32_private *lp = netdev_priv(dev);
29022 - struct pcnet32_access *a = &lp->a;
29023 + struct pcnet32_access *a = lp->a;
29024 ulong ioaddr = dev->base_addr;
29025 unsigned long flags;
29026
29027 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29028 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29029 if (lp->phymask & (1 << j)) {
29030 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29031 - lp->a.write_bcr(ioaddr, 33,
29032 + lp->a->write_bcr(ioaddr, 33,
29033 (j << 5) | i);
29034 - *buff++ = lp->a.read_bcr(ioaddr, 34);
29035 + *buff++ = lp->a->read_bcr(ioaddr, 34);
29036 }
29037 }
29038 }
29039 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29040 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29041 lp->options |= PCNET32_PORT_FD;
29042
29043 - lp->a = *a;
29044 + lp->a = a;
29045
29046 /* prior to register_netdev, dev->name is not yet correct */
29047 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29048 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29049 if (lp->mii) {
29050 /* lp->phycount and lp->phymask are set to 0 by memset above */
29051
29052 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29053 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29054 /* scan for PHYs */
29055 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29056 unsigned short id1, id2;
29057 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29058 pr_info("Found PHY %04x:%04x at address %d\n",
29059 id1, id2, i);
29060 }
29061 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29062 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29063 if (lp->phycount > 1)
29064 lp->options |= PCNET32_PORT_MII;
29065 }
29066 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29067 }
29068
29069 /* Reset the PCNET32 */
29070 - lp->a.reset(ioaddr);
29071 + lp->a->reset(ioaddr);
29072
29073 /* switch pcnet32 to 32bit mode */
29074 - lp->a.write_bcr(ioaddr, 20, 2);
29075 + lp->a->write_bcr(ioaddr, 20, 2);
29076
29077 netif_printk(lp, ifup, KERN_DEBUG, dev,
29078 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29079 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29080 (u32) (lp->init_dma_addr));
29081
29082 /* set/reset autoselect bit */
29083 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
29084 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
29085 if (lp->options & PCNET32_PORT_ASEL)
29086 val |= 2;
29087 - lp->a.write_bcr(ioaddr, 2, val);
29088 + lp->a->write_bcr(ioaddr, 2, val);
29089
29090 /* handle full duplex setting */
29091 if (lp->mii_if.full_duplex) {
29092 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
29093 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
29094 if (lp->options & PCNET32_PORT_FD) {
29095 val |= 1;
29096 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29097 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29098 if (lp->chip_version == 0x2627)
29099 val |= 3;
29100 }
29101 - lp->a.write_bcr(ioaddr, 9, val);
29102 + lp->a->write_bcr(ioaddr, 9, val);
29103 }
29104
29105 /* set/reset GPSI bit in test register */
29106 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29107 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29108 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29109 val |= 0x10;
29110 - lp->a.write_csr(ioaddr, 124, val);
29111 + lp->a->write_csr(ioaddr, 124, val);
29112
29113 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29114 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29115 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29116 * duplex, and/or enable auto negotiation, and clear DANAS
29117 */
29118 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29119 - lp->a.write_bcr(ioaddr, 32,
29120 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
29121 + lp->a->write_bcr(ioaddr, 32,
29122 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
29123 /* disable Auto Negotiation, set 10Mpbs, HD */
29124 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29125 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29126 if (lp->options & PCNET32_PORT_FD)
29127 val |= 0x10;
29128 if (lp->options & PCNET32_PORT_100)
29129 val |= 0x08;
29130 - lp->a.write_bcr(ioaddr, 32, val);
29131 + lp->a->write_bcr(ioaddr, 32, val);
29132 } else {
29133 if (lp->options & PCNET32_PORT_ASEL) {
29134 - lp->a.write_bcr(ioaddr, 32,
29135 - lp->a.read_bcr(ioaddr,
29136 + lp->a->write_bcr(ioaddr, 32,
29137 + lp->a->read_bcr(ioaddr,
29138 32) | 0x0080);
29139 /* enable auto negotiate, setup, disable fd */
29140 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29141 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29142 val |= 0x20;
29143 - lp->a.write_bcr(ioaddr, 32, val);
29144 + lp->a->write_bcr(ioaddr, 32, val);
29145 }
29146 }
29147 } else {
29148 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29149 * There is really no good other way to handle multiple PHYs
29150 * other than turning off all automatics
29151 */
29152 - val = lp->a.read_bcr(ioaddr, 2);
29153 - lp->a.write_bcr(ioaddr, 2, val & ~2);
29154 - val = lp->a.read_bcr(ioaddr, 32);
29155 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29156 + val = lp->a->read_bcr(ioaddr, 2);
29157 + lp->a->write_bcr(ioaddr, 2, val & ~2);
29158 + val = lp->a->read_bcr(ioaddr, 32);
29159 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29160
29161 if (!(lp->options & PCNET32_PORT_ASEL)) {
29162 /* setup ecmd */
29163 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29164 ethtool_cmd_speed_set(&ecmd,
29165 (lp->options & PCNET32_PORT_100) ?
29166 SPEED_100 : SPEED_10);
29167 - bcr9 = lp->a.read_bcr(ioaddr, 9);
29168 + bcr9 = lp->a->read_bcr(ioaddr, 9);
29169
29170 if (lp->options & PCNET32_PORT_FD) {
29171 ecmd.duplex = DUPLEX_FULL;
29172 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29173 ecmd.duplex = DUPLEX_HALF;
29174 bcr9 |= ~(1 << 0);
29175 }
29176 - lp->a.write_bcr(ioaddr, 9, bcr9);
29177 + lp->a->write_bcr(ioaddr, 9, bcr9);
29178 }
29179
29180 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29181 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29182
29183 #ifdef DO_DXSUFLO
29184 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29185 - val = lp->a.read_csr(ioaddr, CSR3);
29186 + val = lp->a->read_csr(ioaddr, CSR3);
29187 val |= 0x40;
29188 - lp->a.write_csr(ioaddr, CSR3, val);
29189 + lp->a->write_csr(ioaddr, CSR3, val);
29190 }
29191 #endif
29192
29193 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29194 napi_enable(&lp->napi);
29195
29196 /* Re-initialize the PCNET32, and start it when done. */
29197 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29198 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29199 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29200 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29201
29202 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29203 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29204 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29205 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29206
29207 netif_start_queue(dev);
29208
29209 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29210
29211 i = 0;
29212 while (i++ < 100)
29213 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29214 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29215 break;
29216 /*
29217 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29218 * reports that doing so triggers a bug in the '974.
29219 */
29220 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29221 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29222
29223 netif_printk(lp, ifup, KERN_DEBUG, dev,
29224 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29225 i,
29226 (u32) (lp->init_dma_addr),
29227 - lp->a.read_csr(ioaddr, CSR0));
29228 + lp->a->read_csr(ioaddr, CSR0));
29229
29230 spin_unlock_irqrestore(&lp->lock, flags);
29231
29232 @@ -2218,7 +2218,7 @@ err_free_ring:
29233 * Switch back to 16bit mode to avoid problems with dumb
29234 * DOS packet driver after a warm reboot
29235 */
29236 - lp->a.write_bcr(ioaddr, 20, 4);
29237 + lp->a->write_bcr(ioaddr, 20, 4);
29238
29239 err_free_irq:
29240 spin_unlock_irqrestore(&lp->lock, flags);
29241 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29242
29243 /* wait for stop */
29244 for (i = 0; i < 100; i++)
29245 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29246 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29247 break;
29248
29249 if (i >= 100)
29250 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29251 return;
29252
29253 /* ReInit Ring */
29254 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29255 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29256 i = 0;
29257 while (i++ < 1000)
29258 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29259 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29260 break;
29261
29262 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29263 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29264 }
29265
29266 static void pcnet32_tx_timeout(struct net_device *dev)
29267 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29268 /* Transmitter timeout, serious problems. */
29269 if (pcnet32_debug & NETIF_MSG_DRV)
29270 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29271 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29272 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29273 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29274 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29275 dev->stats.tx_errors++;
29276 if (netif_msg_tx_err(lp)) {
29277 int i;
29278 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29279
29280 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29281 "%s() called, csr0 %4.4x\n",
29282 - __func__, lp->a.read_csr(ioaddr, CSR0));
29283 + __func__, lp->a->read_csr(ioaddr, CSR0));
29284
29285 /* Default status -- will not enable Successful-TxDone
29286 * interrupt when that option is available to us.
29287 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29288 dev->stats.tx_bytes += skb->len;
29289
29290 /* Trigger an immediate send poll. */
29291 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29292 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29293
29294 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29295 lp->tx_full = 1;
29296 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29297
29298 spin_lock(&lp->lock);
29299
29300 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29301 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29302 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29303 if (csr0 == 0xffff)
29304 break; /* PCMCIA remove happened */
29305 /* Acknowledge all of the current interrupt sources ASAP. */
29306 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29307 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29308
29309 netif_printk(lp, intr, KERN_DEBUG, dev,
29310 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29311 - csr0, lp->a.read_csr(ioaddr, CSR0));
29312 + csr0, lp->a->read_csr(ioaddr, CSR0));
29313
29314 /* Log misc errors. */
29315 if (csr0 & 0x4000)
29316 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29317 if (napi_schedule_prep(&lp->napi)) {
29318 u16 val;
29319 /* set interrupt masks */
29320 - val = lp->a.read_csr(ioaddr, CSR3);
29321 + val = lp->a->read_csr(ioaddr, CSR3);
29322 val |= 0x5f00;
29323 - lp->a.write_csr(ioaddr, CSR3, val);
29324 + lp->a->write_csr(ioaddr, CSR3, val);
29325
29326 __napi_schedule(&lp->napi);
29327 break;
29328 }
29329 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29330 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29331 }
29332
29333 netif_printk(lp, intr, KERN_DEBUG, dev,
29334 "exiting interrupt, csr0=%#4.4x\n",
29335 - lp->a.read_csr(ioaddr, CSR0));
29336 + lp->a->read_csr(ioaddr, CSR0));
29337
29338 spin_unlock(&lp->lock);
29339
29340 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29341
29342 spin_lock_irqsave(&lp->lock, flags);
29343
29344 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29345 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29346
29347 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29348 "Shutting down ethercard, status was %2.2x\n",
29349 - lp->a.read_csr(ioaddr, CSR0));
29350 + lp->a->read_csr(ioaddr, CSR0));
29351
29352 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29353 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29354 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29355
29356 /*
29357 * Switch back to 16bit mode to avoid problems with dumb
29358 * DOS packet driver after a warm reboot
29359 */
29360 - lp->a.write_bcr(ioaddr, 20, 4);
29361 + lp->a->write_bcr(ioaddr, 20, 4);
29362
29363 spin_unlock_irqrestore(&lp->lock, flags);
29364
29365 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29366 unsigned long flags;
29367
29368 spin_lock_irqsave(&lp->lock, flags);
29369 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29370 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29371 spin_unlock_irqrestore(&lp->lock, flags);
29372
29373 return &dev->stats;
29374 @@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29375 if (dev->flags & IFF_ALLMULTI) {
29376 ib->filter[0] = cpu_to_le32(~0U);
29377 ib->filter[1] = cpu_to_le32(~0U);
29378 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29379 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29380 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29381 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29382 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29383 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29384 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29385 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29386 return;
29387 }
29388 /* clear the multicast filter */
29389 @@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29390 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29391 }
29392 for (i = 0; i < 4; i++)
29393 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29394 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29395 le16_to_cpu(mcast_table[i]));
29396 }
29397
29398 @@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29399
29400 spin_lock_irqsave(&lp->lock, flags);
29401 suspended = pcnet32_suspend(dev, &flags, 0);
29402 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29403 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29404 if (dev->flags & IFF_PROMISC) {
29405 /* Log any net taps. */
29406 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29407 lp->init_block->mode =
29408 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29409 7);
29410 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29411 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29412 } else {
29413 lp->init_block->mode =
29414 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29415 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29416 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29417 pcnet32_load_multicast(dev);
29418 }
29419
29420 if (suspended) {
29421 int csr5;
29422 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29423 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29424 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29425 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29426 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29427 } else {
29428 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29429 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29430 pcnet32_restart(dev, CSR0_NORMAL);
29431 netif_wake_queue(dev);
29432 }
29433 @@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29434 if (!lp->mii)
29435 return 0;
29436
29437 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29438 - val_out = lp->a.read_bcr(ioaddr, 34);
29439 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29440 + val_out = lp->a->read_bcr(ioaddr, 34);
29441
29442 return val_out;
29443 }
29444 @@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
29445 if (!lp->mii)
29446 return;
29447
29448 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29449 - lp->a.write_bcr(ioaddr, 34, val);
29450 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29451 + lp->a->write_bcr(ioaddr, 34, val);
29452 }
29453
29454 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29455 @@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
29456 curr_link = mii_link_ok(&lp->mii_if);
29457 } else {
29458 ulong ioaddr = dev->base_addr; /* card base I/O address */
29459 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29460 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29461 }
29462 if (!curr_link) {
29463 if (prev_link || verbose) {
29464 @@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
29465 (ecmd.duplex == DUPLEX_FULL)
29466 ? "full" : "half");
29467 }
29468 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29469 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29470 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29471 if (lp->mii_if.full_duplex)
29472 bcr9 |= (1 << 0);
29473 else
29474 bcr9 &= ~(1 << 0);
29475 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
29476 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
29477 }
29478 } else {
29479 netif_info(lp, link, dev, "link up\n");
29480 diff -urNp linux-3.0.3/drivers/net/ppp_generic.c linux-3.0.3/drivers/net/ppp_generic.c
29481 --- linux-3.0.3/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
29482 +++ linux-3.0.3/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
29483 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29484 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29485 struct ppp_stats stats;
29486 struct ppp_comp_stats cstats;
29487 - char *vers;
29488
29489 switch (cmd) {
29490 case SIOCGPPPSTATS:
29491 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29492 break;
29493
29494 case SIOCGPPPVER:
29495 - vers = PPP_VERSION;
29496 - if (copy_to_user(addr, vers, strlen(vers) + 1))
29497 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29498 break;
29499 err = 0;
29500 break;
29501 diff -urNp linux-3.0.3/drivers/net/r8169.c linux-3.0.3/drivers/net/r8169.c
29502 --- linux-3.0.3/drivers/net/r8169.c 2011-08-23 21:44:40.000000000 -0400
29503 +++ linux-3.0.3/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
29504 @@ -645,12 +645,12 @@ struct rtl8169_private {
29505 struct mdio_ops {
29506 void (*write)(void __iomem *, int, int);
29507 int (*read)(void __iomem *, int);
29508 - } mdio_ops;
29509 + } __no_const mdio_ops;
29510
29511 struct pll_power_ops {
29512 void (*down)(struct rtl8169_private *);
29513 void (*up)(struct rtl8169_private *);
29514 - } pll_power_ops;
29515 + } __no_const pll_power_ops;
29516
29517 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29518 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29519 diff -urNp linux-3.0.3/drivers/net/tg3.h linux-3.0.3/drivers/net/tg3.h
29520 --- linux-3.0.3/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
29521 +++ linux-3.0.3/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
29522 @@ -134,6 +134,7 @@
29523 #define CHIPREV_ID_5750_A0 0x4000
29524 #define CHIPREV_ID_5750_A1 0x4001
29525 #define CHIPREV_ID_5750_A3 0x4003
29526 +#define CHIPREV_ID_5750_C1 0x4201
29527 #define CHIPREV_ID_5750_C2 0x4202
29528 #define CHIPREV_ID_5752_A0_HW 0x5000
29529 #define CHIPREV_ID_5752_A0 0x6000
29530 diff -urNp linux-3.0.3/drivers/net/tokenring/abyss.c linux-3.0.3/drivers/net/tokenring/abyss.c
29531 --- linux-3.0.3/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
29532 +++ linux-3.0.3/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
29533 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29534
29535 static int __init abyss_init (void)
29536 {
29537 - abyss_netdev_ops = tms380tr_netdev_ops;
29538 + pax_open_kernel();
29539 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29540
29541 - abyss_netdev_ops.ndo_open = abyss_open;
29542 - abyss_netdev_ops.ndo_stop = abyss_close;
29543 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29544 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29545 + pax_close_kernel();
29546
29547 return pci_register_driver(&abyss_driver);
29548 }
29549 diff -urNp linux-3.0.3/drivers/net/tokenring/madgemc.c linux-3.0.3/drivers/net/tokenring/madgemc.c
29550 --- linux-3.0.3/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
29551 +++ linux-3.0.3/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
29552 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29553
29554 static int __init madgemc_init (void)
29555 {
29556 - madgemc_netdev_ops = tms380tr_netdev_ops;
29557 - madgemc_netdev_ops.ndo_open = madgemc_open;
29558 - madgemc_netdev_ops.ndo_stop = madgemc_close;
29559 + pax_open_kernel();
29560 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29561 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29562 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29563 + pax_close_kernel();
29564
29565 return mca_register_driver (&madgemc_driver);
29566 }
29567 diff -urNp linux-3.0.3/drivers/net/tokenring/proteon.c linux-3.0.3/drivers/net/tokenring/proteon.c
29568 --- linux-3.0.3/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
29569 +++ linux-3.0.3/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
29570 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29571 struct platform_device *pdev;
29572 int i, num = 0, err = 0;
29573
29574 - proteon_netdev_ops = tms380tr_netdev_ops;
29575 - proteon_netdev_ops.ndo_open = proteon_open;
29576 - proteon_netdev_ops.ndo_stop = tms380tr_close;
29577 + pax_open_kernel();
29578 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29579 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29580 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29581 + pax_close_kernel();
29582
29583 err = platform_driver_register(&proteon_driver);
29584 if (err)
29585 diff -urNp linux-3.0.3/drivers/net/tokenring/skisa.c linux-3.0.3/drivers/net/tokenring/skisa.c
29586 --- linux-3.0.3/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
29587 +++ linux-3.0.3/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
29588 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29589 struct platform_device *pdev;
29590 int i, num = 0, err = 0;
29591
29592 - sk_isa_netdev_ops = tms380tr_netdev_ops;
29593 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
29594 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29595 + pax_open_kernel();
29596 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29597 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29598 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29599 + pax_close_kernel();
29600
29601 err = platform_driver_register(&sk_isa_driver);
29602 if (err)
29603 diff -urNp linux-3.0.3/drivers/net/tulip/de2104x.c linux-3.0.3/drivers/net/tulip/de2104x.c
29604 --- linux-3.0.3/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
29605 +++ linux-3.0.3/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
29606 @@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
29607 struct de_srom_info_leaf *il;
29608 void *bufp;
29609
29610 + pax_track_stack();
29611 +
29612 /* download entire eeprom */
29613 for (i = 0; i < DE_EEPROM_WORDS; i++)
29614 ((__le16 *)ee_data)[i] =
29615 diff -urNp linux-3.0.3/drivers/net/tulip/de4x5.c linux-3.0.3/drivers/net/tulip/de4x5.c
29616 --- linux-3.0.3/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
29617 +++ linux-3.0.3/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
29618 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29619 for (i=0; i<ETH_ALEN; i++) {
29620 tmp.addr[i] = dev->dev_addr[i];
29621 }
29622 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29623 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29624 break;
29625
29626 case DE4X5_SET_HWADDR: /* Set the hardware address */
29627 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29628 spin_lock_irqsave(&lp->lock, flags);
29629 memcpy(&statbuf, &lp->pktStats, ioc->len);
29630 spin_unlock_irqrestore(&lp->lock, flags);
29631 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
29632 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29633 return -EFAULT;
29634 break;
29635 }
29636 diff -urNp linux-3.0.3/drivers/net/usb/hso.c linux-3.0.3/drivers/net/usb/hso.c
29637 --- linux-3.0.3/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
29638 +++ linux-3.0.3/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
29639 @@ -71,7 +71,7 @@
29640 #include <asm/byteorder.h>
29641 #include <linux/serial_core.h>
29642 #include <linux/serial.h>
29643 -
29644 +#include <asm/local.h>
29645
29646 #define MOD_AUTHOR "Option Wireless"
29647 #define MOD_DESCRIPTION "USB High Speed Option driver"
29648 @@ -257,7 +257,7 @@ struct hso_serial {
29649
29650 /* from usb_serial_port */
29651 struct tty_struct *tty;
29652 - int open_count;
29653 + local_t open_count;
29654 spinlock_t serial_lock;
29655
29656 int (*write_data) (struct hso_serial *serial);
29657 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29658 struct urb *urb;
29659
29660 urb = serial->rx_urb[0];
29661 - if (serial->open_count > 0) {
29662 + if (local_read(&serial->open_count) > 0) {
29663 count = put_rxbuf_data(urb, serial);
29664 if (count == -1)
29665 return;
29666 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29667 DUMP1(urb->transfer_buffer, urb->actual_length);
29668
29669 /* Anyone listening? */
29670 - if (serial->open_count == 0)
29671 + if (local_read(&serial->open_count) == 0)
29672 return;
29673
29674 if (status == 0) {
29675 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29676 spin_unlock_irq(&serial->serial_lock);
29677
29678 /* check for port already opened, if not set the termios */
29679 - serial->open_count++;
29680 - if (serial->open_count == 1) {
29681 + if (local_inc_return(&serial->open_count) == 1) {
29682 serial->rx_state = RX_IDLE;
29683 /* Force default termio settings */
29684 _hso_serial_set_termios(tty, NULL);
29685 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29686 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29687 if (result) {
29688 hso_stop_serial_device(serial->parent);
29689 - serial->open_count--;
29690 + local_dec(&serial->open_count);
29691 kref_put(&serial->parent->ref, hso_serial_ref_free);
29692 }
29693 } else {
29694 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29695
29696 /* reset the rts and dtr */
29697 /* do the actual close */
29698 - serial->open_count--;
29699 + local_dec(&serial->open_count);
29700
29701 - if (serial->open_count <= 0) {
29702 - serial->open_count = 0;
29703 + if (local_read(&serial->open_count) <= 0) {
29704 + local_set(&serial->open_count, 0);
29705 spin_lock_irq(&serial->serial_lock);
29706 if (serial->tty == tty) {
29707 serial->tty->driver_data = NULL;
29708 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29709
29710 /* the actual setup */
29711 spin_lock_irqsave(&serial->serial_lock, flags);
29712 - if (serial->open_count)
29713 + if (local_read(&serial->open_count))
29714 _hso_serial_set_termios(tty, old);
29715 else
29716 tty->termios = old;
29717 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29718 D1("Pending read interrupt on port %d\n", i);
29719 spin_lock(&serial->serial_lock);
29720 if (serial->rx_state == RX_IDLE &&
29721 - serial->open_count > 0) {
29722 + local_read(&serial->open_count) > 0) {
29723 /* Setup and send a ctrl req read on
29724 * port i */
29725 if (!serial->rx_urb_filled[0]) {
29726 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
29727 /* Start all serial ports */
29728 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29729 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29730 - if (dev2ser(serial_table[i])->open_count) {
29731 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
29732 result =
29733 hso_start_serial_device(serial_table[i], GFP_NOIO);
29734 hso_kick_transmit(dev2ser(serial_table[i]));
29735 diff -urNp linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c
29736 --- linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29737 +++ linux-3.0.3/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
29738 @@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
29739 * Return with error code if any of the queue indices
29740 * is out of range
29741 */
29742 - if (p->ring_index[i] < 0 ||
29743 - p->ring_index[i] >= adapter->num_rx_queues)
29744 + if (p->ring_index[i] >= adapter->num_rx_queues)
29745 return -EINVAL;
29746 }
29747
29748 diff -urNp linux-3.0.3/drivers/net/vxge/vxge-config.h linux-3.0.3/drivers/net/vxge/vxge-config.h
29749 --- linux-3.0.3/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
29750 +++ linux-3.0.3/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
29751 @@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
29752 void (*link_down)(struct __vxge_hw_device *devh);
29753 void (*crit_err)(struct __vxge_hw_device *devh,
29754 enum vxge_hw_event type, u64 ext_data);
29755 -};
29756 +} __no_const;
29757
29758 /*
29759 * struct __vxge_hw_blockpool_entry - Block private data structure
29760 diff -urNp linux-3.0.3/drivers/net/vxge/vxge-main.c linux-3.0.3/drivers/net/vxge/vxge-main.c
29761 --- linux-3.0.3/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
29762 +++ linux-3.0.3/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
29763 @@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29764 struct sk_buff *completed[NR_SKB_COMPLETED];
29765 int more;
29766
29767 + pax_track_stack();
29768 +
29769 do {
29770 more = 0;
29771 skb_ptr = completed;
29772 @@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
29773 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29774 int index;
29775
29776 + pax_track_stack();
29777 +
29778 /*
29779 * Filling
29780 * - itable with bucket numbers
29781 diff -urNp linux-3.0.3/drivers/net/vxge/vxge-traffic.h linux-3.0.3/drivers/net/vxge/vxge-traffic.h
29782 --- linux-3.0.3/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
29783 +++ linux-3.0.3/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
29784 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29785 struct vxge_hw_mempool_dma *dma_object,
29786 u32 index,
29787 u32 is_last);
29788 -};
29789 +} __no_const;
29790
29791 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29792 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29793 diff -urNp linux-3.0.3/drivers/net/wan/cycx_x25.c linux-3.0.3/drivers/net/wan/cycx_x25.c
29794 --- linux-3.0.3/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
29795 +++ linux-3.0.3/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
29796 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29797 unsigned char hex[1024],
29798 * phex = hex;
29799
29800 + pax_track_stack();
29801 +
29802 if (len >= (sizeof(hex) / 2))
29803 len = (sizeof(hex) / 2) - 1;
29804
29805 diff -urNp linux-3.0.3/drivers/net/wan/hdlc_x25.c linux-3.0.3/drivers/net/wan/hdlc_x25.c
29806 --- linux-3.0.3/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
29807 +++ linux-3.0.3/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
29808 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29809
29810 static int x25_open(struct net_device *dev)
29811 {
29812 - struct lapb_register_struct cb;
29813 + static struct lapb_register_struct cb = {
29814 + .connect_confirmation = x25_connected,
29815 + .connect_indication = x25_connected,
29816 + .disconnect_confirmation = x25_disconnected,
29817 + .disconnect_indication = x25_disconnected,
29818 + .data_indication = x25_data_indication,
29819 + .data_transmit = x25_data_transmit
29820 + };
29821 int result;
29822
29823 - cb.connect_confirmation = x25_connected;
29824 - cb.connect_indication = x25_connected;
29825 - cb.disconnect_confirmation = x25_disconnected;
29826 - cb.disconnect_indication = x25_disconnected;
29827 - cb.data_indication = x25_data_indication;
29828 - cb.data_transmit = x25_data_transmit;
29829 -
29830 result = lapb_register(dev, &cb);
29831 if (result != LAPB_OK)
29832 return result;
29833 diff -urNp linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c
29834 --- linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
29835 +++ linux-3.0.3/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
29836 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29837 int do_autopm = 1;
29838 DECLARE_COMPLETION_ONSTACK(notif_completion);
29839
29840 + pax_track_stack();
29841 +
29842 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
29843 i2400m, ack, ack_size);
29844 BUG_ON(_ack == i2400m->bm_ack_buf);
29845 diff -urNp linux-3.0.3/drivers/net/wireless/airo.c linux-3.0.3/drivers/net/wireless/airo.c
29846 --- linux-3.0.3/drivers/net/wireless/airo.c 2011-08-23 21:44:40.000000000 -0400
29847 +++ linux-3.0.3/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
29848 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
29849 BSSListElement * loop_net;
29850 BSSListElement * tmp_net;
29851
29852 + pax_track_stack();
29853 +
29854 /* Blow away current list of scan results */
29855 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
29856 list_move_tail (&loop_net->list, &ai->network_free_list);
29857 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
29858 WepKeyRid wkr;
29859 int rc;
29860
29861 + pax_track_stack();
29862 +
29863 memset( &mySsid, 0, sizeof( mySsid ) );
29864 kfree (ai->flash);
29865 ai->flash = NULL;
29866 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
29867 __le32 *vals = stats.vals;
29868 int len;
29869
29870 + pax_track_stack();
29871 +
29872 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29873 return -ENOMEM;
29874 data = file->private_data;
29875 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
29876 /* If doLoseSync is not 1, we won't do a Lose Sync */
29877 int doLoseSync = -1;
29878
29879 + pax_track_stack();
29880 +
29881 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
29882 return -ENOMEM;
29883 data = file->private_data;
29884 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
29885 int i;
29886 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
29887
29888 + pax_track_stack();
29889 +
29890 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
29891 if (!qual)
29892 return -ENOMEM;
29893 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
29894 CapabilityRid cap_rid;
29895 __le32 *vals = stats_rid.vals;
29896
29897 + pax_track_stack();
29898 +
29899 /* Get stats out of the card */
29900 clear_bit(JOB_WSTATS, &local->jobs);
29901 if (local->power.event) {
29902 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c
29903 --- linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
29904 +++ linux-3.0.3/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
29905 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
29906 unsigned int v;
29907 u64 tsf;
29908
29909 + pax_track_stack();
29910 +
29911 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
29912 len += snprintf(buf+len, sizeof(buf)-len,
29913 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
29914 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
29915 unsigned int len = 0;
29916 unsigned int i;
29917
29918 + pax_track_stack();
29919 +
29920 len += snprintf(buf+len, sizeof(buf)-len,
29921 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
29922
29923 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
29924 unsigned int i;
29925 unsigned int v;
29926
29927 + pax_track_stack();
29928 +
29929 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
29930 sc->ah->ah_ant_mode);
29931 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
29932 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
29933 unsigned int len = 0;
29934 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
29935
29936 + pax_track_stack();
29937 +
29938 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
29939 sc->bssidmask);
29940 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
29941 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
29942 unsigned int len = 0;
29943 int i;
29944
29945 + pax_track_stack();
29946 +
29947 len += snprintf(buf+len, sizeof(buf)-len,
29948 "RX\n---------------------\n");
29949 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
29950 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
29951 char buf[700];
29952 unsigned int len = 0;
29953
29954 + pax_track_stack();
29955 +
29956 len += snprintf(buf+len, sizeof(buf)-len,
29957 "HW has PHY error counters:\t%s\n",
29958 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
29959 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
29960 struct ath5k_buf *bf, *bf0;
29961 int i, n;
29962
29963 + pax_track_stack();
29964 +
29965 len += snprintf(buf+len, sizeof(buf)-len,
29966 "available txbuffers: %d\n", sc->txbuf_len);
29967
29968 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c
29969 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
29970 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
29971 @@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
29972 int i, im, j;
29973 int nmeasurement;
29974
29975 + pax_track_stack();
29976 +
29977 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
29978 if (ah->txchainmask & (1 << i))
29979 num_chains++;
29980 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
29981 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
29982 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
29983 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
29984 int theta_low_bin = 0;
29985 int i;
29986
29987 + pax_track_stack();
29988 +
29989 /* disregard any bin that contains <= 16 samples */
29990 thresh_accum_cnt = 16;
29991 scale_factor = 5;
29992 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c
29993 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
29994 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
29995 @@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
29996 char buf[512];
29997 unsigned int len = 0;
29998
29999 + pax_track_stack();
30000 +
30001 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30002 len += snprintf(buf + len, sizeof(buf) - len,
30003 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30004 @@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30005 u8 addr[ETH_ALEN];
30006 u32 tmp;
30007
30008 + pax_track_stack();
30009 +
30010 len += snprintf(buf + len, sizeof(buf) - len,
30011 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30012 wiphy_name(sc->hw->wiphy),
30013 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30014 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
30015 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
30016 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30017 unsigned int len = 0;
30018 int ret = 0;
30019
30020 + pax_track_stack();
30021 +
30022 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30023
30024 ath9k_htc_ps_wakeup(priv);
30025 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30026 unsigned int len = 0;
30027 int ret = 0;
30028
30029 + pax_track_stack();
30030 +
30031 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30032
30033 ath9k_htc_ps_wakeup(priv);
30034 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30035 unsigned int len = 0;
30036 int ret = 0;
30037
30038 + pax_track_stack();
30039 +
30040 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30041
30042 ath9k_htc_ps_wakeup(priv);
30043 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30044 char buf[512];
30045 unsigned int len = 0;
30046
30047 + pax_track_stack();
30048 +
30049 len += snprintf(buf + len, sizeof(buf) - len,
30050 "%20s : %10u\n", "Buffers queued",
30051 priv->debug.tx_stats.buf_queued);
30052 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30053 char buf[512];
30054 unsigned int len = 0;
30055
30056 + pax_track_stack();
30057 +
30058 spin_lock_bh(&priv->tx.tx_lock);
30059
30060 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30061 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30062 char buf[512];
30063 unsigned int len = 0;
30064
30065 + pax_track_stack();
30066 +
30067 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30068 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30069
30070 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h
30071 --- linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:44:40.000000000 -0400
30072 +++ linux-3.0.3/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
30073 @@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30074
30075 /* ANI */
30076 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30077 -};
30078 +} __no_const;
30079
30080 /**
30081 * struct ath_hw_ops - callbacks used by hardware code and driver code
30082 @@ -637,7 +637,7 @@ struct ath_hw_ops {
30083 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30084 struct ath_hw_antcomb_conf *antconf);
30085
30086 -};
30087 +} __no_const;
30088
30089 struct ath_nf_limits {
30090 s16 max;
30091 @@ -650,7 +650,7 @@ struct ath_nf_limits {
30092 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
30093
30094 struct ath_hw {
30095 - struct ath_ops reg_ops;
30096 + ath_ops_no_const reg_ops;
30097
30098 struct ieee80211_hw *hw;
30099 struct ath_common common;
30100 diff -urNp linux-3.0.3/drivers/net/wireless/ath/ath.h linux-3.0.3/drivers/net/wireless/ath/ath.h
30101 --- linux-3.0.3/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
30102 +++ linux-3.0.3/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
30103 @@ -121,6 +121,7 @@ struct ath_ops {
30104 void (*write_flush) (void *);
30105 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30106 };
30107 +typedef struct ath_ops __no_const ath_ops_no_const;
30108
30109 struct ath_common;
30110 struct ath_bus_ops;
30111 diff -urNp linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c
30112 --- linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
30113 +++ linux-3.0.3/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
30114 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30115 int err;
30116 DECLARE_SSID_BUF(ssid);
30117
30118 + pax_track_stack();
30119 +
30120 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30121
30122 if (ssid_len)
30123 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30124 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30125 int err;
30126
30127 + pax_track_stack();
30128 +
30129 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30130 idx, keylen, len);
30131
30132 diff -urNp linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c
30133 --- linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
30134 +++ linux-3.0.3/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
30135 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30136 unsigned long flags;
30137 DECLARE_SSID_BUF(ssid);
30138
30139 + pax_track_stack();
30140 +
30141 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30142 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30143 print_ssid(ssid, info_element->data, info_element->len),
30144 diff -urNp linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c
30145 --- linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
30146 +++ linux-3.0.3/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
30147 @@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30148 */
30149 if (iwl3945_mod_params.disable_hw_scan) {
30150 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30151 - iwl3945_hw_ops.hw_scan = NULL;
30152 + pax_open_kernel();
30153 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30154 + pax_close_kernel();
30155 }
30156
30157 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30158 diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30159 --- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
30160 +++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
30161 @@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30162 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30163 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30164
30165 + pax_track_stack();
30166 +
30167 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30168
30169 /* Treat uninitialized rate scaling data same as non-existing. */
30170 @@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30171 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30172 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30173
30174 + pax_track_stack();
30175 +
30176 /* Override starting rate (index 0) if needed for debug purposes */
30177 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30178
30179 diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30180 --- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
30181 +++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
30182 @@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30183 int pos = 0;
30184 const size_t bufsz = sizeof(buf);
30185
30186 + pax_track_stack();
30187 +
30188 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30189 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30190 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30191 @@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30192 char buf[256 * NUM_IWL_RXON_CTX];
30193 const size_t bufsz = sizeof(buf);
30194
30195 + pax_track_stack();
30196 +
30197 for_each_context(priv, ctx) {
30198 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30199 ctx->ctxid);
30200 diff -urNp linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h
30201 --- linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30202 +++ linux-3.0.3/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30203 @@ -68,8 +68,8 @@ do {
30204 } while (0)
30205
30206 #else
30207 -#define IWL_DEBUG(__priv, level, fmt, args...)
30208 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30209 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30210 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30211 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30212 const void *p, u32 len)
30213 {}
30214 diff -urNp linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c
30215 --- linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30216 +++ linux-3.0.3/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30217 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30218 int buf_len = 512;
30219 size_t len = 0;
30220
30221 + pax_track_stack();
30222 +
30223 if (*ppos != 0)
30224 return 0;
30225 if (count < sizeof(buf))
30226 diff -urNp linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c
30227 --- linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30228 +++ linux-3.0.3/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30229 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30230 return -EINVAL;
30231
30232 if (fake_hw_scan) {
30233 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30234 - mac80211_hwsim_ops.sw_scan_start = NULL;
30235 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30236 + pax_open_kernel();
30237 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30238 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30239 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30240 + pax_close_kernel();
30241 }
30242
30243 spin_lock_init(&hwsim_radio_lock);
30244 diff -urNp linux-3.0.3/drivers/net/wireless/rndis_wlan.c linux-3.0.3/drivers/net/wireless/rndis_wlan.c
30245 --- linux-3.0.3/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30246 +++ linux-3.0.3/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30247 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30248
30249 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30250
30251 - if (rts_threshold < 0 || rts_threshold > 2347)
30252 + if (rts_threshold > 2347)
30253 rts_threshold = 2347;
30254
30255 tmp = cpu_to_le32(rts_threshold);
30256 diff -urNp linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30257 --- linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30258 +++ linux-3.0.3/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30259 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30260 u8 rfpath;
30261 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30262
30263 + pax_track_stack();
30264 +
30265 precommoncmdcnt = 0;
30266 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30267 MAX_PRECMD_CNT,
30268 diff -urNp linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h
30269 --- linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30270 +++ linux-3.0.3/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30271 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
30272 void (*reset)(struct wl1251 *wl);
30273 void (*enable_irq)(struct wl1251 *wl);
30274 void (*disable_irq)(struct wl1251 *wl);
30275 -};
30276 +} __no_const;
30277
30278 struct wl1251 {
30279 struct ieee80211_hw *hw;
30280 diff -urNp linux-3.0.3/drivers/net/wireless/wl12xx/spi.c linux-3.0.3/drivers/net/wireless/wl12xx/spi.c
30281 --- linux-3.0.3/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30282 +++ linux-3.0.3/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30283 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30284 u32 chunk_len;
30285 int i;
30286
30287 + pax_track_stack();
30288 +
30289 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30290
30291 spi_message_init(&m);
30292 diff -urNp linux-3.0.3/drivers/oprofile/buffer_sync.c linux-3.0.3/drivers/oprofile/buffer_sync.c
30293 --- linux-3.0.3/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30294 +++ linux-3.0.3/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30295 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30296 if (cookie == NO_COOKIE)
30297 offset = pc;
30298 if (cookie == INVALID_COOKIE) {
30299 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30300 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30301 offset = pc;
30302 }
30303 if (cookie != last_cookie) {
30304 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30305 /* add userspace sample */
30306
30307 if (!mm) {
30308 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30309 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30310 return 0;
30311 }
30312
30313 cookie = lookup_dcookie(mm, s->eip, &offset);
30314
30315 if (cookie == INVALID_COOKIE) {
30316 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30317 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30318 return 0;
30319 }
30320
30321 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30322 /* ignore backtraces if failed to add a sample */
30323 if (state == sb_bt_start) {
30324 state = sb_bt_ignore;
30325 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30326 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30327 }
30328 }
30329 release_mm(mm);
30330 diff -urNp linux-3.0.3/drivers/oprofile/event_buffer.c linux-3.0.3/drivers/oprofile/event_buffer.c
30331 --- linux-3.0.3/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30332 +++ linux-3.0.3/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30333 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30334 }
30335
30336 if (buffer_pos == buffer_size) {
30337 - atomic_inc(&oprofile_stats.event_lost_overflow);
30338 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30339 return;
30340 }
30341
30342 diff -urNp linux-3.0.3/drivers/oprofile/oprof.c linux-3.0.3/drivers/oprofile/oprof.c
30343 --- linux-3.0.3/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30344 +++ linux-3.0.3/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30345 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30346 if (oprofile_ops.switch_events())
30347 return;
30348
30349 - atomic_inc(&oprofile_stats.multiplex_counter);
30350 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30351 start_switch_worker();
30352 }
30353
30354 diff -urNp linux-3.0.3/drivers/oprofile/oprofilefs.c linux-3.0.3/drivers/oprofile/oprofilefs.c
30355 --- linux-3.0.3/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30356 +++ linux-3.0.3/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30357 @@ -186,7 +186,7 @@ static const struct file_operations atom
30358
30359
30360 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30361 - char const *name, atomic_t *val)
30362 + char const *name, atomic_unchecked_t *val)
30363 {
30364 return __oprofilefs_create_file(sb, root, name,
30365 &atomic_ro_fops, 0444, val);
30366 diff -urNp linux-3.0.3/drivers/oprofile/oprofile_stats.c linux-3.0.3/drivers/oprofile/oprofile_stats.c
30367 --- linux-3.0.3/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30368 +++ linux-3.0.3/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30369 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30370 cpu_buf->sample_invalid_eip = 0;
30371 }
30372
30373 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30374 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30375 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30376 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30377 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30378 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30379 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30380 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30381 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30382 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30383 }
30384
30385
30386 diff -urNp linux-3.0.3/drivers/oprofile/oprofile_stats.h linux-3.0.3/drivers/oprofile/oprofile_stats.h
30387 --- linux-3.0.3/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30388 +++ linux-3.0.3/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30389 @@ -13,11 +13,11 @@
30390 #include <asm/atomic.h>
30391
30392 struct oprofile_stat_struct {
30393 - atomic_t sample_lost_no_mm;
30394 - atomic_t sample_lost_no_mapping;
30395 - atomic_t bt_lost_no_mapping;
30396 - atomic_t event_lost_overflow;
30397 - atomic_t multiplex_counter;
30398 + atomic_unchecked_t sample_lost_no_mm;
30399 + atomic_unchecked_t sample_lost_no_mapping;
30400 + atomic_unchecked_t bt_lost_no_mapping;
30401 + atomic_unchecked_t event_lost_overflow;
30402 + atomic_unchecked_t multiplex_counter;
30403 };
30404
30405 extern struct oprofile_stat_struct oprofile_stats;
30406 diff -urNp linux-3.0.3/drivers/parport/procfs.c linux-3.0.3/drivers/parport/procfs.c
30407 --- linux-3.0.3/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30408 +++ linux-3.0.3/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30409 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30410
30411 *ppos += len;
30412
30413 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30414 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30415 }
30416
30417 #ifdef CONFIG_PARPORT_1284
30418 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30419
30420 *ppos += len;
30421
30422 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30423 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30424 }
30425 #endif /* IEEE1284.3 support. */
30426
30427 diff -urNp linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h
30428 --- linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30429 +++ linux-3.0.3/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30430 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30431 int (*hardware_test) (struct slot* slot, u32 value);
30432 u8 (*get_power) (struct slot* slot);
30433 int (*set_power) (struct slot* slot, int value);
30434 -};
30435 +} __no_const;
30436
30437 struct cpci_hp_controller {
30438 unsigned int irq;
30439 diff -urNp linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c
30440 --- linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
30441 +++ linux-3.0.3/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
30442 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30443
30444 void compaq_nvram_init (void __iomem *rom_start)
30445 {
30446 +
30447 +#ifndef CONFIG_PAX_KERNEXEC
30448 if (rom_start) {
30449 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30450 }
30451 +#endif
30452 +
30453 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30454
30455 /* initialize our int15 lock */
30456 diff -urNp linux-3.0.3/drivers/pci/pcie/aspm.c linux-3.0.3/drivers/pci/pcie/aspm.c
30457 --- linux-3.0.3/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
30458 +++ linux-3.0.3/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
30459 @@ -27,9 +27,9 @@
30460 #define MODULE_PARAM_PREFIX "pcie_aspm."
30461
30462 /* Note: those are not register definitions */
30463 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30464 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30465 -#define ASPM_STATE_L1 (4) /* L1 state */
30466 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30467 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30468 +#define ASPM_STATE_L1 (4U) /* L1 state */
30469 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30470 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30471
30472 diff -urNp linux-3.0.3/drivers/pci/probe.c linux-3.0.3/drivers/pci/probe.c
30473 --- linux-3.0.3/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
30474 +++ linux-3.0.3/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
30475 @@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
30476 u32 l, sz, mask;
30477 u16 orig_cmd;
30478
30479 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30480 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30481
30482 if (!dev->mmio_always_on) {
30483 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30484 diff -urNp linux-3.0.3/drivers/pci/proc.c linux-3.0.3/drivers/pci/proc.c
30485 --- linux-3.0.3/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
30486 +++ linux-3.0.3/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
30487 @@ -476,7 +476,16 @@ static const struct file_operations proc
30488 static int __init pci_proc_init(void)
30489 {
30490 struct pci_dev *dev = NULL;
30491 +
30492 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30493 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30494 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30495 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30496 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30497 +#endif
30498 +#else
30499 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30500 +#endif
30501 proc_create("devices", 0, proc_bus_pci_dir,
30502 &proc_bus_pci_dev_operations);
30503 proc_initialized = 1;
30504 diff -urNp linux-3.0.3/drivers/pci/xen-pcifront.c linux-3.0.3/drivers/pci/xen-pcifront.c
30505 --- linux-3.0.3/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
30506 +++ linux-3.0.3/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
30507 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30508 struct pcifront_sd *sd = bus->sysdata;
30509 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30510
30511 + pax_track_stack();
30512 +
30513 if (verbose_request)
30514 dev_info(&pdev->xdev->dev,
30515 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30516 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30517 struct pcifront_sd *sd = bus->sysdata;
30518 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30519
30520 + pax_track_stack();
30521 +
30522 if (verbose_request)
30523 dev_info(&pdev->xdev->dev,
30524 "write dev=%04x:%02x:%02x.%01x - "
30525 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30526 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30527 struct msi_desc *entry;
30528
30529 + pax_track_stack();
30530 +
30531 if (nvec > SH_INFO_MAX_VEC) {
30532 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30533 " Increase SH_INFO_MAX_VEC.\n", nvec);
30534 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30535 struct pcifront_sd *sd = dev->bus->sysdata;
30536 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30537
30538 + pax_track_stack();
30539 +
30540 err = do_pci_op(pdev, &op);
30541
30542 /* What should do for error ? */
30543 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30544 struct pcifront_sd *sd = dev->bus->sysdata;
30545 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30546
30547 + pax_track_stack();
30548 +
30549 err = do_pci_op(pdev, &op);
30550 if (likely(!err)) {
30551 vector[0] = op.value;
30552 diff -urNp linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c
30553 --- linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
30554 +++ linux-3.0.3/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
30555 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
30556 return 0;
30557 }
30558
30559 -void static hotkey_mask_warn_incomplete_mask(void)
30560 +static void hotkey_mask_warn_incomplete_mask(void)
30561 {
30562 /* log only what the user can fix... */
30563 const u32 wantedmask = hotkey_driver_mask &
30564 diff -urNp linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c
30565 --- linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
30566 +++ linux-3.0.3/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
30567 @@ -59,7 +59,7 @@ do { \
30568 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30569 } while(0)
30570
30571 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30572 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30573 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30574
30575 /*
30576 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30577
30578 cpu = get_cpu();
30579 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30580 +
30581 + pax_open_kernel();
30582 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30583 + pax_close_kernel();
30584
30585 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30586 spin_lock_irqsave(&pnp_bios_lock, flags);
30587 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30588 :"memory");
30589 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30590
30591 + pax_open_kernel();
30592 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30593 + pax_close_kernel();
30594 +
30595 put_cpu();
30596
30597 /* If we get here and this is set then the PnP BIOS faulted on us. */
30598 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30599 return status;
30600 }
30601
30602 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30603 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30604 {
30605 int i;
30606
30607 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30608 pnp_bios_callpoint.offset = header->fields.pm16offset;
30609 pnp_bios_callpoint.segment = PNP_CS16;
30610
30611 + pax_open_kernel();
30612 +
30613 for_each_possible_cpu(i) {
30614 struct desc_struct *gdt = get_cpu_gdt_table(i);
30615 if (!gdt)
30616 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30617 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30618 (unsigned long)__va(header->fields.pm16dseg));
30619 }
30620 +
30621 + pax_close_kernel();
30622 }
30623 diff -urNp linux-3.0.3/drivers/pnp/resource.c linux-3.0.3/drivers/pnp/resource.c
30624 --- linux-3.0.3/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
30625 +++ linux-3.0.3/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
30626 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30627 return 1;
30628
30629 /* check if the resource is valid */
30630 - if (*irq < 0 || *irq > 15)
30631 + if (*irq > 15)
30632 return 0;
30633
30634 /* check if the resource is reserved */
30635 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30636 return 1;
30637
30638 /* check if the resource is valid */
30639 - if (*dma < 0 || *dma == 4 || *dma > 7)
30640 + if (*dma == 4 || *dma > 7)
30641 return 0;
30642
30643 /* check if the resource is reserved */
30644 diff -urNp linux-3.0.3/drivers/power/bq27x00_battery.c linux-3.0.3/drivers/power/bq27x00_battery.c
30645 --- linux-3.0.3/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
30646 +++ linux-3.0.3/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
30647 @@ -67,7 +67,7 @@
30648 struct bq27x00_device_info;
30649 struct bq27x00_access_methods {
30650 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30651 -};
30652 +} __no_const;
30653
30654 enum bq27x00_chip { BQ27000, BQ27500 };
30655
30656 diff -urNp linux-3.0.3/drivers/regulator/max8660.c linux-3.0.3/drivers/regulator/max8660.c
30657 --- linux-3.0.3/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
30658 +++ linux-3.0.3/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
30659 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30660 max8660->shadow_regs[MAX8660_OVER1] = 5;
30661 } else {
30662 /* Otherwise devices can be toggled via software */
30663 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
30664 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
30665 + pax_open_kernel();
30666 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30667 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30668 + pax_close_kernel();
30669 }
30670
30671 /*
30672 diff -urNp linux-3.0.3/drivers/regulator/mc13892-regulator.c linux-3.0.3/drivers/regulator/mc13892-regulator.c
30673 --- linux-3.0.3/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
30674 +++ linux-3.0.3/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
30675 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
30676 }
30677 mc13xxx_unlock(mc13892);
30678
30679 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30680 + pax_open_kernel();
30681 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30682 = mc13892_vcam_set_mode;
30683 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30684 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30685 = mc13892_vcam_get_mode;
30686 + pax_close_kernel();
30687 for (i = 0; i < pdata->num_regulators; i++) {
30688 init_data = &pdata->regulators[i];
30689 priv->regulators[i] = regulator_register(
30690 diff -urNp linux-3.0.3/drivers/rtc/rtc-dev.c linux-3.0.3/drivers/rtc/rtc-dev.c
30691 --- linux-3.0.3/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
30692 +++ linux-3.0.3/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
30693 @@ -14,6 +14,7 @@
30694 #include <linux/module.h>
30695 #include <linux/rtc.h>
30696 #include <linux/sched.h>
30697 +#include <linux/grsecurity.h>
30698 #include "rtc-core.h"
30699
30700 static dev_t rtc_devt;
30701 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30702 if (copy_from_user(&tm, uarg, sizeof(tm)))
30703 return -EFAULT;
30704
30705 + gr_log_timechange();
30706 +
30707 return rtc_set_time(rtc, &tm);
30708
30709 case RTC_PIE_ON:
30710 diff -urNp linux-3.0.3/drivers/scsi/aacraid/aacraid.h linux-3.0.3/drivers/scsi/aacraid/aacraid.h
30711 --- linux-3.0.3/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
30712 +++ linux-3.0.3/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
30713 @@ -492,7 +492,7 @@ struct adapter_ops
30714 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30715 /* Administrative operations */
30716 int (*adapter_comm)(struct aac_dev * dev, int comm);
30717 -};
30718 +} __no_const;
30719
30720 /*
30721 * Define which interrupt handler needs to be installed
30722 diff -urNp linux-3.0.3/drivers/scsi/aacraid/commctrl.c linux-3.0.3/drivers/scsi/aacraid/commctrl.c
30723 --- linux-3.0.3/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
30724 +++ linux-3.0.3/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
30725 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30726 u32 actual_fibsize64, actual_fibsize = 0;
30727 int i;
30728
30729 + pax_track_stack();
30730
30731 if (dev->in_reset) {
30732 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30733 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfad.c linux-3.0.3/drivers/scsi/bfa/bfad.c
30734 --- linux-3.0.3/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
30735 +++ linux-3.0.3/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
30736 @@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30737 struct bfad_vport_s *vport, *vport_new;
30738 struct bfa_fcs_driver_info_s driver_info;
30739
30740 + pax_track_stack();
30741 +
30742 /* Fill the driver_info info to fcs*/
30743 memset(&driver_info, 0, sizeof(driver_info));
30744 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30745 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c
30746 --- linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
30747 +++ linux-3.0.3/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
30748 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30749 u16 len, count;
30750 u16 templen;
30751
30752 + pax_track_stack();
30753 +
30754 /*
30755 * get hba attributes
30756 */
30757 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30758 u8 count = 0;
30759 u16 templen;
30760
30761 + pax_track_stack();
30762 +
30763 /*
30764 * get port attributes
30765 */
30766 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c
30767 --- linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
30768 +++ linux-3.0.3/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
30769 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30770 struct fc_rpsc_speed_info_s speeds;
30771 struct bfa_port_attr_s pport_attr;
30772
30773 + pax_track_stack();
30774 +
30775 bfa_trc(port->fcs, rx_fchs->s_id);
30776 bfa_trc(port->fcs, rx_fchs->d_id);
30777
30778 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa.h linux-3.0.3/drivers/scsi/bfa/bfa.h
30779 --- linux-3.0.3/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
30780 +++ linux-3.0.3/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
30781 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30782 u32 *nvecs, u32 *maxvec);
30783 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30784 u32 *end);
30785 -};
30786 +} __no_const;
30787 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30788
30789 struct bfa_iocfc_s {
30790 diff -urNp linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h
30791 --- linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
30792 +++ linux-3.0.3/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
30793 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30794 bfa_ioc_disable_cbfn_t disable_cbfn;
30795 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30796 bfa_ioc_reset_cbfn_t reset_cbfn;
30797 -};
30798 +} __no_const;
30799
30800 /*
30801 * Heartbeat failure notification queue element.
30802 @@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
30803 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30804 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30805 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30806 -};
30807 +} __no_const;
30808
30809 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30810 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30811 diff -urNp linux-3.0.3/drivers/scsi/BusLogic.c linux-3.0.3/drivers/scsi/BusLogic.c
30812 --- linux-3.0.3/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
30813 +++ linux-3.0.3/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
30814 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30815 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30816 *PrototypeHostAdapter)
30817 {
30818 + pax_track_stack();
30819 +
30820 /*
30821 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30822 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30823 diff -urNp linux-3.0.3/drivers/scsi/dpt_i2o.c linux-3.0.3/drivers/scsi/dpt_i2o.c
30824 --- linux-3.0.3/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
30825 +++ linux-3.0.3/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
30826 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30827 dma_addr_t addr;
30828 ulong flags = 0;
30829
30830 + pax_track_stack();
30831 +
30832 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30833 // get user msg size in u32s
30834 if(get_user(size, &user_msg[0])){
30835 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
30836 s32 rcode;
30837 dma_addr_t addr;
30838
30839 + pax_track_stack();
30840 +
30841 memset(msg, 0 , sizeof(msg));
30842 len = scsi_bufflen(cmd);
30843 direction = 0x00000000;
30844 diff -urNp linux-3.0.3/drivers/scsi/eata.c linux-3.0.3/drivers/scsi/eata.c
30845 --- linux-3.0.3/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
30846 +++ linux-3.0.3/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
30847 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
30848 struct hostdata *ha;
30849 char name[16];
30850
30851 + pax_track_stack();
30852 +
30853 sprintf(name, "%s%d", driver_name, j);
30854
30855 if (!request_region(port_base, REGION_SIZE, driver_name)) {
30856 diff -urNp linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c
30857 --- linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
30858 +++ linux-3.0.3/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
30859 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
30860 } buf;
30861 int rc;
30862
30863 + pax_track_stack();
30864 +
30865 fiph = (struct fip_header *)skb->data;
30866 sub = fiph->fip_subcode;
30867
30868 diff -urNp linux-3.0.3/drivers/scsi/gdth.c linux-3.0.3/drivers/scsi/gdth.c
30869 --- linux-3.0.3/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
30870 +++ linux-3.0.3/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
30871 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
30872 unsigned long flags;
30873 gdth_ha_str *ha;
30874
30875 + pax_track_stack();
30876 +
30877 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
30878 return -EFAULT;
30879 ha = gdth_find_ha(ldrv.ionode);
30880 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
30881 gdth_ha_str *ha;
30882 int rval;
30883
30884 + pax_track_stack();
30885 +
30886 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
30887 res.number >= MAX_HDRIVES)
30888 return -EFAULT;
30889 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
30890 gdth_ha_str *ha;
30891 int rval;
30892
30893 + pax_track_stack();
30894 +
30895 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
30896 return -EFAULT;
30897 ha = gdth_find_ha(gen.ionode);
30898 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
30899 int i;
30900 gdth_cmd_str gdtcmd;
30901 char cmnd[MAX_COMMAND_SIZE];
30902 +
30903 + pax_track_stack();
30904 +
30905 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
30906
30907 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
30908 diff -urNp linux-3.0.3/drivers/scsi/gdth_proc.c linux-3.0.3/drivers/scsi/gdth_proc.c
30909 --- linux-3.0.3/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
30910 +++ linux-3.0.3/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
30911 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
30912 u64 paddr;
30913
30914 char cmnd[MAX_COMMAND_SIZE];
30915 +
30916 + pax_track_stack();
30917 +
30918 memset(cmnd, 0xff, 12);
30919 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
30920
30921 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
30922 gdth_hget_str *phg;
30923 char cmnd[MAX_COMMAND_SIZE];
30924
30925 + pax_track_stack();
30926 +
30927 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
30928 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
30929 if (!gdtcmd || !estr)
30930 diff -urNp linux-3.0.3/drivers/scsi/hosts.c linux-3.0.3/drivers/scsi/hosts.c
30931 --- linux-3.0.3/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
30932 +++ linux-3.0.3/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
30933 @@ -42,7 +42,7 @@
30934 #include "scsi_logging.h"
30935
30936
30937 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
30938 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
30939
30940
30941 static void scsi_host_cls_release(struct device *dev)
30942 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
30943 * subtract one because we increment first then return, but we need to
30944 * know what the next host number was before increment
30945 */
30946 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
30947 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
30948 shost->dma_channel = 0xff;
30949
30950 /* These three are default values which can be overridden */
30951 diff -urNp linux-3.0.3/drivers/scsi/hpsa.c linux-3.0.3/drivers/scsi/hpsa.c
30952 --- linux-3.0.3/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
30953 +++ linux-3.0.3/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
30954 @@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
30955 u32 a;
30956
30957 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
30958 - return h->access.command_completed(h);
30959 + return h->access->command_completed(h);
30960
30961 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
30962 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
30963 @@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
30964 while (!list_empty(&h->reqQ)) {
30965 c = list_entry(h->reqQ.next, struct CommandList, list);
30966 /* can't do anything if fifo is full */
30967 - if ((h->access.fifo_full(h))) {
30968 + if ((h->access->fifo_full(h))) {
30969 dev_warn(&h->pdev->dev, "fifo full\n");
30970 break;
30971 }
30972 @@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
30973 h->Qdepth--;
30974
30975 /* Tell the controller execute command */
30976 - h->access.submit_command(h, c);
30977 + h->access->submit_command(h, c);
30978
30979 /* Put job onto the completed Q */
30980 addQ(&h->cmpQ, c);
30981 @@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
30982
30983 static inline unsigned long get_next_completion(struct ctlr_info *h)
30984 {
30985 - return h->access.command_completed(h);
30986 + return h->access->command_completed(h);
30987 }
30988
30989 static inline bool interrupt_pending(struct ctlr_info *h)
30990 {
30991 - return h->access.intr_pending(h);
30992 + return h->access->intr_pending(h);
30993 }
30994
30995 static inline long interrupt_not_for_us(struct ctlr_info *h)
30996 {
30997 - return (h->access.intr_pending(h) == 0) ||
30998 + return (h->access->intr_pending(h) == 0) ||
30999 (h->interrupts_enabled == 0);
31000 }
31001
31002 @@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31003 if (prod_index < 0)
31004 return -ENODEV;
31005 h->product_name = products[prod_index].product_name;
31006 - h->access = *(products[prod_index].access);
31007 + h->access = products[prod_index].access;
31008
31009 if (hpsa_board_disabled(h->pdev)) {
31010 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31011 @@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31012 }
31013
31014 /* make sure the board interrupts are off */
31015 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31016 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31017
31018 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31019 goto clean2;
31020 @@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31021 * fake ones to scoop up any residual completions.
31022 */
31023 spin_lock_irqsave(&h->lock, flags);
31024 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31025 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31026 spin_unlock_irqrestore(&h->lock, flags);
31027 free_irq(h->intr[h->intr_mode], h);
31028 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31029 @@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31030 dev_info(&h->pdev->dev, "Board READY.\n");
31031 dev_info(&h->pdev->dev,
31032 "Waiting for stale completions to drain.\n");
31033 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31034 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31035 msleep(10000);
31036 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31037 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31038
31039 rc = controller_reset_failed(h->cfgtable);
31040 if (rc)
31041 @@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31042 }
31043
31044 /* Turn the interrupts on so we can service requests */
31045 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31046 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31047
31048 hpsa_hba_inquiry(h);
31049 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
31050 @@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31051 * To write all data in the battery backed cache to disks
31052 */
31053 hpsa_flush_cache(h);
31054 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31055 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31056 free_irq(h->intr[h->intr_mode], h);
31057 #ifdef CONFIG_PCI_MSI
31058 if (h->msix_vector)
31059 @@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31060 return;
31061 }
31062 /* Change the access methods to the performant access methods */
31063 - h->access = SA5_performant_access;
31064 + h->access = &SA5_performant_access;
31065 h->transMethod = CFGTBL_Trans_Performant;
31066 }
31067
31068 diff -urNp linux-3.0.3/drivers/scsi/hpsa.h linux-3.0.3/drivers/scsi/hpsa.h
31069 --- linux-3.0.3/drivers/scsi/hpsa.h 2011-08-23 21:44:40.000000000 -0400
31070 +++ linux-3.0.3/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
31071 @@ -73,7 +73,7 @@ struct ctlr_info {
31072 unsigned int msix_vector;
31073 unsigned int msi_vector;
31074 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31075 - struct access_method access;
31076 + struct access_method *access;
31077
31078 /* queue and queue Info */
31079 struct list_head reqQ;
31080 diff -urNp linux-3.0.3/drivers/scsi/ips.h linux-3.0.3/drivers/scsi/ips.h
31081 --- linux-3.0.3/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
31082 +++ linux-3.0.3/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
31083 @@ -1027,7 +1027,7 @@ typedef struct {
31084 int (*intr)(struct ips_ha *);
31085 void (*enableint)(struct ips_ha *);
31086 uint32_t (*statupd)(struct ips_ha *);
31087 -} ips_hw_func_t;
31088 +} __no_const ips_hw_func_t;
31089
31090 typedef struct ips_ha {
31091 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31092 diff -urNp linux-3.0.3/drivers/scsi/libfc/fc_exch.c linux-3.0.3/drivers/scsi/libfc/fc_exch.c
31093 --- linux-3.0.3/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
31094 +++ linux-3.0.3/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
31095 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31096 * all together if not used XXX
31097 */
31098 struct {
31099 - atomic_t no_free_exch;
31100 - atomic_t no_free_exch_xid;
31101 - atomic_t xid_not_found;
31102 - atomic_t xid_busy;
31103 - atomic_t seq_not_found;
31104 - atomic_t non_bls_resp;
31105 + atomic_unchecked_t no_free_exch;
31106 + atomic_unchecked_t no_free_exch_xid;
31107 + atomic_unchecked_t xid_not_found;
31108 + atomic_unchecked_t xid_busy;
31109 + atomic_unchecked_t seq_not_found;
31110 + atomic_unchecked_t non_bls_resp;
31111 } stats;
31112 };
31113
31114 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31115 /* allocate memory for exchange */
31116 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31117 if (!ep) {
31118 - atomic_inc(&mp->stats.no_free_exch);
31119 + atomic_inc_unchecked(&mp->stats.no_free_exch);
31120 goto out;
31121 }
31122 memset(ep, 0, sizeof(*ep));
31123 @@ -761,7 +761,7 @@ out:
31124 return ep;
31125 err:
31126 spin_unlock_bh(&pool->lock);
31127 - atomic_inc(&mp->stats.no_free_exch_xid);
31128 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31129 mempool_free(ep, mp->ep_pool);
31130 return NULL;
31131 }
31132 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31133 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31134 ep = fc_exch_find(mp, xid);
31135 if (!ep) {
31136 - atomic_inc(&mp->stats.xid_not_found);
31137 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31138 reject = FC_RJT_OX_ID;
31139 goto out;
31140 }
31141 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31142 ep = fc_exch_find(mp, xid);
31143 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31144 if (ep) {
31145 - atomic_inc(&mp->stats.xid_busy);
31146 + atomic_inc_unchecked(&mp->stats.xid_busy);
31147 reject = FC_RJT_RX_ID;
31148 goto rel;
31149 }
31150 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31151 }
31152 xid = ep->xid; /* get our XID */
31153 } else if (!ep) {
31154 - atomic_inc(&mp->stats.xid_not_found);
31155 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31156 reject = FC_RJT_RX_ID; /* XID not found */
31157 goto out;
31158 }
31159 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31160 } else {
31161 sp = &ep->seq;
31162 if (sp->id != fh->fh_seq_id) {
31163 - atomic_inc(&mp->stats.seq_not_found);
31164 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31165 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31166 goto rel;
31167 }
31168 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31169
31170 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31171 if (!ep) {
31172 - atomic_inc(&mp->stats.xid_not_found);
31173 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31174 goto out;
31175 }
31176 if (ep->esb_stat & ESB_ST_COMPLETE) {
31177 - atomic_inc(&mp->stats.xid_not_found);
31178 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31179 goto rel;
31180 }
31181 if (ep->rxid == FC_XID_UNKNOWN)
31182 ep->rxid = ntohs(fh->fh_rx_id);
31183 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31184 - atomic_inc(&mp->stats.xid_not_found);
31185 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31186 goto rel;
31187 }
31188 if (ep->did != ntoh24(fh->fh_s_id) &&
31189 ep->did != FC_FID_FLOGI) {
31190 - atomic_inc(&mp->stats.xid_not_found);
31191 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31192 goto rel;
31193 }
31194 sof = fr_sof(fp);
31195 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31196 sp->ssb_stat |= SSB_ST_RESP;
31197 sp->id = fh->fh_seq_id;
31198 } else if (sp->id != fh->fh_seq_id) {
31199 - atomic_inc(&mp->stats.seq_not_found);
31200 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31201 goto rel;
31202 }
31203
31204 @@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31205 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31206
31207 if (!sp)
31208 - atomic_inc(&mp->stats.xid_not_found);
31209 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31210 else
31211 - atomic_inc(&mp->stats.non_bls_resp);
31212 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31213
31214 fc_frame_free(fp);
31215 }
31216 diff -urNp linux-3.0.3/drivers/scsi/libsas/sas_ata.c linux-3.0.3/drivers/scsi/libsas/sas_ata.c
31217 --- linux-3.0.3/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31218 +++ linux-3.0.3/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31219 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31220 .postreset = ata_std_postreset,
31221 .error_handler = ata_std_error_handler,
31222 .post_internal_cmd = sas_ata_post_internal,
31223 - .qc_defer = ata_std_qc_defer,
31224 + .qc_defer = ata_std_qc_defer,
31225 .qc_prep = ata_noop_qc_prep,
31226 .qc_issue = sas_ata_qc_issue,
31227 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31228 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c
31229 --- linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31230 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31231 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31232
31233 #include <linux/debugfs.h>
31234
31235 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31236 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31237 static unsigned long lpfc_debugfs_start_time = 0L;
31238
31239 /* iDiag */
31240 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31241 lpfc_debugfs_enable = 0;
31242
31243 len = 0;
31244 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31245 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31246 (lpfc_debugfs_max_disc_trc - 1);
31247 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31248 dtp = vport->disc_trc + i;
31249 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31250 lpfc_debugfs_enable = 0;
31251
31252 len = 0;
31253 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31254 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31255 (lpfc_debugfs_max_slow_ring_trc - 1);
31256 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31257 dtp = phba->slow_ring_trc + i;
31258 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31259 uint32_t *ptr;
31260 char buffer[1024];
31261
31262 + pax_track_stack();
31263 +
31264 off = 0;
31265 spin_lock_irq(&phba->hbalock);
31266
31267 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31268 !vport || !vport->disc_trc)
31269 return;
31270
31271 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31272 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31273 (lpfc_debugfs_max_disc_trc - 1);
31274 dtp = vport->disc_trc + index;
31275 dtp->fmt = fmt;
31276 dtp->data1 = data1;
31277 dtp->data2 = data2;
31278 dtp->data3 = data3;
31279 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31280 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31281 dtp->jif = jiffies;
31282 #endif
31283 return;
31284 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31285 !phba || !phba->slow_ring_trc)
31286 return;
31287
31288 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31289 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31290 (lpfc_debugfs_max_slow_ring_trc - 1);
31291 dtp = phba->slow_ring_trc + index;
31292 dtp->fmt = fmt;
31293 dtp->data1 = data1;
31294 dtp->data2 = data2;
31295 dtp->data3 = data3;
31296 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31297 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31298 dtp->jif = jiffies;
31299 #endif
31300 return;
31301 @@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31302 "slow_ring buffer\n");
31303 goto debug_failed;
31304 }
31305 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31306 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31307 memset(phba->slow_ring_trc, 0,
31308 (sizeof(struct lpfc_debugfs_trc) *
31309 lpfc_debugfs_max_slow_ring_trc));
31310 @@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31311 "buffer\n");
31312 goto debug_failed;
31313 }
31314 - atomic_set(&vport->disc_trc_cnt, 0);
31315 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31316
31317 snprintf(name, sizeof(name), "discovery_trace");
31318 vport->debug_disc_trc =
31319 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc.h linux-3.0.3/drivers/scsi/lpfc/lpfc.h
31320 --- linux-3.0.3/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31321 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31322 @@ -420,7 +420,7 @@ struct lpfc_vport {
31323 struct dentry *debug_nodelist;
31324 struct dentry *vport_debugfs_root;
31325 struct lpfc_debugfs_trc *disc_trc;
31326 - atomic_t disc_trc_cnt;
31327 + atomic_unchecked_t disc_trc_cnt;
31328 #endif
31329 uint8_t stat_data_enabled;
31330 uint8_t stat_data_blocked;
31331 @@ -826,8 +826,8 @@ struct lpfc_hba {
31332 struct timer_list fabric_block_timer;
31333 unsigned long bit_flags;
31334 #define FABRIC_COMANDS_BLOCKED 0
31335 - atomic_t num_rsrc_err;
31336 - atomic_t num_cmd_success;
31337 + atomic_unchecked_t num_rsrc_err;
31338 + atomic_unchecked_t num_cmd_success;
31339 unsigned long last_rsrc_error_time;
31340 unsigned long last_ramp_down_time;
31341 unsigned long last_ramp_up_time;
31342 @@ -841,7 +841,7 @@ struct lpfc_hba {
31343 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31344 struct dentry *debug_slow_ring_trc;
31345 struct lpfc_debugfs_trc *slow_ring_trc;
31346 - atomic_t slow_ring_trc_cnt;
31347 + atomic_unchecked_t slow_ring_trc_cnt;
31348 /* iDiag debugfs sub-directory */
31349 struct dentry *idiag_root;
31350 struct dentry *idiag_pci_cfg;
31351 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c
31352 --- linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31353 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31354 @@ -9923,8 +9923,10 @@ lpfc_init(void)
31355 printk(LPFC_COPYRIGHT "\n");
31356
31357 if (lpfc_enable_npiv) {
31358 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31359 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31360 + pax_open_kernel();
31361 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31362 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31363 + pax_close_kernel();
31364 }
31365 lpfc_transport_template =
31366 fc_attach_transport(&lpfc_transport_functions);
31367 diff -urNp linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c
31368 --- linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31369 +++ linux-3.0.3/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31370 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31371 uint32_t evt_posted;
31372
31373 spin_lock_irqsave(&phba->hbalock, flags);
31374 - atomic_inc(&phba->num_rsrc_err);
31375 + atomic_inc_unchecked(&phba->num_rsrc_err);
31376 phba->last_rsrc_error_time = jiffies;
31377
31378 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31379 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31380 unsigned long flags;
31381 struct lpfc_hba *phba = vport->phba;
31382 uint32_t evt_posted;
31383 - atomic_inc(&phba->num_cmd_success);
31384 + atomic_inc_unchecked(&phba->num_cmd_success);
31385
31386 if (vport->cfg_lun_queue_depth <= queue_depth)
31387 return;
31388 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31389 unsigned long num_rsrc_err, num_cmd_success;
31390 int i;
31391
31392 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31393 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31394 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31395 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31396
31397 vports = lpfc_create_vport_work_array(phba);
31398 if (vports != NULL)
31399 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31400 }
31401 }
31402 lpfc_destroy_vport_work_array(phba, vports);
31403 - atomic_set(&phba->num_rsrc_err, 0);
31404 - atomic_set(&phba->num_cmd_success, 0);
31405 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31406 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31407 }
31408
31409 /**
31410 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31411 }
31412 }
31413 lpfc_destroy_vport_work_array(phba, vports);
31414 - atomic_set(&phba->num_rsrc_err, 0);
31415 - atomic_set(&phba->num_cmd_success, 0);
31416 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31417 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31418 }
31419
31420 /**
31421 diff -urNp linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c
31422 --- linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31423 +++ linux-3.0.3/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31424 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31425 int rval;
31426 int i;
31427
31428 + pax_track_stack();
31429 +
31430 // Allocate memory for the base list of scb for management module.
31431 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31432
31433 diff -urNp linux-3.0.3/drivers/scsi/osd/osd_initiator.c linux-3.0.3/drivers/scsi/osd/osd_initiator.c
31434 --- linux-3.0.3/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31435 +++ linux-3.0.3/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31436 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31437 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31438 int ret;
31439
31440 + pax_track_stack();
31441 +
31442 or = osd_start_request(od, GFP_KERNEL);
31443 if (!or)
31444 return -ENOMEM;
31445 diff -urNp linux-3.0.3/drivers/scsi/pmcraid.c linux-3.0.3/drivers/scsi/pmcraid.c
31446 --- linux-3.0.3/drivers/scsi/pmcraid.c 2011-08-23 21:44:40.000000000 -0400
31447 +++ linux-3.0.3/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
31448 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31449 res->scsi_dev = scsi_dev;
31450 scsi_dev->hostdata = res;
31451 res->change_detected = 0;
31452 - atomic_set(&res->read_failures, 0);
31453 - atomic_set(&res->write_failures, 0);
31454 + atomic_set_unchecked(&res->read_failures, 0);
31455 + atomic_set_unchecked(&res->write_failures, 0);
31456 rc = 0;
31457 }
31458 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31459 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31460
31461 /* If this was a SCSI read/write command keep count of errors */
31462 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31463 - atomic_inc(&res->read_failures);
31464 + atomic_inc_unchecked(&res->read_failures);
31465 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31466 - atomic_inc(&res->write_failures);
31467 + atomic_inc_unchecked(&res->write_failures);
31468
31469 if (!RES_IS_GSCSI(res->cfg_entry) &&
31470 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31471 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31472 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31473 * hrrq_id assigned here in queuecommand
31474 */
31475 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31476 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31477 pinstance->num_hrrq;
31478 cmd->cmd_done = pmcraid_io_done;
31479
31480 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31481 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31482 * hrrq_id assigned here in queuecommand
31483 */
31484 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31485 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31486 pinstance->num_hrrq;
31487
31488 if (request_size) {
31489 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
31490
31491 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31492 /* add resources only after host is added into system */
31493 - if (!atomic_read(&pinstance->expose_resources))
31494 + if (!atomic_read_unchecked(&pinstance->expose_resources))
31495 return;
31496
31497 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31498 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
31499 init_waitqueue_head(&pinstance->reset_wait_q);
31500
31501 atomic_set(&pinstance->outstanding_cmds, 0);
31502 - atomic_set(&pinstance->last_message_id, 0);
31503 - atomic_set(&pinstance->expose_resources, 0);
31504 + atomic_set_unchecked(&pinstance->last_message_id, 0);
31505 + atomic_set_unchecked(&pinstance->expose_resources, 0);
31506
31507 INIT_LIST_HEAD(&pinstance->free_res_q);
31508 INIT_LIST_HEAD(&pinstance->used_res_q);
31509 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
31510 /* Schedule worker thread to handle CCN and take care of adding and
31511 * removing devices to OS
31512 */
31513 - atomic_set(&pinstance->expose_resources, 1);
31514 + atomic_set_unchecked(&pinstance->expose_resources, 1);
31515 schedule_work(&pinstance->worker_q);
31516 return rc;
31517
31518 diff -urNp linux-3.0.3/drivers/scsi/pmcraid.h linux-3.0.3/drivers/scsi/pmcraid.h
31519 --- linux-3.0.3/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
31520 +++ linux-3.0.3/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
31521 @@ -749,7 +749,7 @@ struct pmcraid_instance {
31522 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31523
31524 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31525 - atomic_t last_message_id;
31526 + atomic_unchecked_t last_message_id;
31527
31528 /* configuration table */
31529 struct pmcraid_config_table *cfg_table;
31530 @@ -778,7 +778,7 @@ struct pmcraid_instance {
31531 atomic_t outstanding_cmds;
31532
31533 /* should add/delete resources to mid-layer now ?*/
31534 - atomic_t expose_resources;
31535 + atomic_unchecked_t expose_resources;
31536
31537
31538
31539 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
31540 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31541 };
31542 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31543 - atomic_t read_failures; /* count of failed READ commands */
31544 - atomic_t write_failures; /* count of failed WRITE commands */
31545 + atomic_unchecked_t read_failures; /* count of failed READ commands */
31546 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31547
31548 /* To indicate add/delete/modify during CCN */
31549 u8 change_detected;
31550 diff -urNp linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h
31551 --- linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
31552 +++ linux-3.0.3/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
31553 @@ -2244,7 +2244,7 @@ struct isp_operations {
31554 int (*get_flash_version) (struct scsi_qla_host *, void *);
31555 int (*start_scsi) (srb_t *);
31556 int (*abort_isp) (struct scsi_qla_host *);
31557 -};
31558 +} __no_const;
31559
31560 /* MSI-X Support *************************************************************/
31561
31562 diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h
31563 --- linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
31564 +++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
31565 @@ -256,7 +256,7 @@ struct ddb_entry {
31566 atomic_t retry_relogin_timer; /* Min Time between relogins
31567 * (4000 only) */
31568 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31569 - atomic_t relogin_retry_count; /* Num of times relogin has been
31570 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31571 * retried */
31572
31573 uint16_t port;
31574 diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c
31575 --- linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
31576 +++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
31577 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31578 ddb_entry->fw_ddb_index = fw_ddb_index;
31579 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31580 atomic_set(&ddb_entry->relogin_timer, 0);
31581 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31582 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31583 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31584 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31585 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31586 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31587 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31588 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31589 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31590 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31591 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31592 atomic_set(&ddb_entry->relogin_timer, 0);
31593 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31594 iscsi_unblock_session(ddb_entry->sess);
31595 diff -urNp linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c
31596 --- linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
31597 +++ linux-3.0.3/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
31598 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
31599 ddb_entry->fw_ddb_device_state ==
31600 DDB_DS_SESSION_FAILED) {
31601 /* Reset retry relogin timer */
31602 - atomic_inc(&ddb_entry->relogin_retry_count);
31603 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31604 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31605 " timed out-retrying"
31606 " relogin (%d)\n",
31607 ha->host_no,
31608 ddb_entry->fw_ddb_index,
31609 - atomic_read(&ddb_entry->
31610 + atomic_read_unchecked(&ddb_entry->
31611 relogin_retry_count))
31612 );
31613 start_dpc++;
31614 diff -urNp linux-3.0.3/drivers/scsi/scsi.c linux-3.0.3/drivers/scsi/scsi.c
31615 --- linux-3.0.3/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
31616 +++ linux-3.0.3/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
31617 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31618 unsigned long timeout;
31619 int rtn = 0;
31620
31621 - atomic_inc(&cmd->device->iorequest_cnt);
31622 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31623
31624 /* check if the device is still usable */
31625 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31626 diff -urNp linux-3.0.3/drivers/scsi/scsi_debug.c linux-3.0.3/drivers/scsi/scsi_debug.c
31627 --- linux-3.0.3/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
31628 +++ linux-3.0.3/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
31629 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31630 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31631 unsigned char *cmd = (unsigned char *)scp->cmnd;
31632
31633 + pax_track_stack();
31634 +
31635 if ((errsts = check_readiness(scp, 1, devip)))
31636 return errsts;
31637 memset(arr, 0, sizeof(arr));
31638 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31639 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31640 unsigned char *cmd = (unsigned char *)scp->cmnd;
31641
31642 + pax_track_stack();
31643 +
31644 if ((errsts = check_readiness(scp, 1, devip)))
31645 return errsts;
31646 memset(arr, 0, sizeof(arr));
31647 diff -urNp linux-3.0.3/drivers/scsi/scsi_lib.c linux-3.0.3/drivers/scsi/scsi_lib.c
31648 --- linux-3.0.3/drivers/scsi/scsi_lib.c 2011-08-23 21:44:40.000000000 -0400
31649 +++ linux-3.0.3/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
31650 @@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
31651 shost = sdev->host;
31652 scsi_init_cmd_errh(cmd);
31653 cmd->result = DID_NO_CONNECT << 16;
31654 - atomic_inc(&cmd->device->iorequest_cnt);
31655 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31656
31657 /*
31658 * SCSI request completion path will do scsi_device_unbusy(),
31659 @@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
31660
31661 INIT_LIST_HEAD(&cmd->eh_entry);
31662
31663 - atomic_inc(&cmd->device->iodone_cnt);
31664 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
31665 if (cmd->result)
31666 - atomic_inc(&cmd->device->ioerr_cnt);
31667 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31668
31669 disposition = scsi_decide_disposition(cmd);
31670 if (disposition != SUCCESS &&
31671 diff -urNp linux-3.0.3/drivers/scsi/scsi_sysfs.c linux-3.0.3/drivers/scsi/scsi_sysfs.c
31672 --- linux-3.0.3/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
31673 +++ linux-3.0.3/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
31674 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31675 char *buf) \
31676 { \
31677 struct scsi_device *sdev = to_scsi_device(dev); \
31678 - unsigned long long count = atomic_read(&sdev->field); \
31679 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
31680 return snprintf(buf, 20, "0x%llx\n", count); \
31681 } \
31682 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31683 diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_fc.c linux-3.0.3/drivers/scsi/scsi_transport_fc.c
31684 --- linux-3.0.3/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
31685 +++ linux-3.0.3/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
31686 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31687 * Netlink Infrastructure
31688 */
31689
31690 -static atomic_t fc_event_seq;
31691 +static atomic_unchecked_t fc_event_seq;
31692
31693 /**
31694 * fc_get_event_number - Obtain the next sequential FC event number
31695 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
31696 u32
31697 fc_get_event_number(void)
31698 {
31699 - return atomic_add_return(1, &fc_event_seq);
31700 + return atomic_add_return_unchecked(1, &fc_event_seq);
31701 }
31702 EXPORT_SYMBOL(fc_get_event_number);
31703
31704 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void
31705 {
31706 int error;
31707
31708 - atomic_set(&fc_event_seq, 0);
31709 + atomic_set_unchecked(&fc_event_seq, 0);
31710
31711 error = transport_class_register(&fc_host_class);
31712 if (error)
31713 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
31714 char *cp;
31715
31716 *val = simple_strtoul(buf, &cp, 0);
31717 - if ((*cp && (*cp != '\n')) || (*val < 0))
31718 + if (*cp && (*cp != '\n'))
31719 return -EINVAL;
31720 /*
31721 * Check for overflow; dev_loss_tmo is u32
31722 diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c
31723 --- linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
31724 +++ linux-3.0.3/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
31725 @@ -83,7 +83,7 @@ struct iscsi_internal {
31726 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31727 };
31728
31729 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31730 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31731 static struct workqueue_struct *iscsi_eh_timer_workq;
31732
31733 /*
31734 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31735 int err;
31736
31737 ihost = shost->shost_data;
31738 - session->sid = atomic_add_return(1, &iscsi_session_nr);
31739 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31740
31741 if (id == ISCSI_MAX_TARGET) {
31742 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31743 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31744 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31745 ISCSI_TRANSPORT_VERSION);
31746
31747 - atomic_set(&iscsi_session_nr, 0);
31748 + atomic_set_unchecked(&iscsi_session_nr, 0);
31749
31750 err = class_register(&iscsi_transport_class);
31751 if (err)
31752 diff -urNp linux-3.0.3/drivers/scsi/scsi_transport_srp.c linux-3.0.3/drivers/scsi/scsi_transport_srp.c
31753 --- linux-3.0.3/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
31754 +++ linux-3.0.3/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
31755 @@ -33,7 +33,7 @@
31756 #include "scsi_transport_srp_internal.h"
31757
31758 struct srp_host_attrs {
31759 - atomic_t next_port_id;
31760 + atomic_unchecked_t next_port_id;
31761 };
31762 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31763
31764 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31765 struct Scsi_Host *shost = dev_to_shost(dev);
31766 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31767
31768 - atomic_set(&srp_host->next_port_id, 0);
31769 + atomic_set_unchecked(&srp_host->next_port_id, 0);
31770 return 0;
31771 }
31772
31773 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31774 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31775 rport->roles = ids->roles;
31776
31777 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31778 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31779 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31780
31781 transport_setup_device(&rport->dev);
31782 diff -urNp linux-3.0.3/drivers/scsi/sg.c linux-3.0.3/drivers/scsi/sg.c
31783 --- linux-3.0.3/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
31784 +++ linux-3.0.3/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
31785 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31786 const struct file_operations * fops;
31787 };
31788
31789 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31790 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31791 {"allow_dio", &adio_fops},
31792 {"debug", &debug_fops},
31793 {"def_reserved_size", &dressz_fops},
31794 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31795 {
31796 int k, mask;
31797 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31798 - struct sg_proc_leaf * leaf;
31799 + const struct sg_proc_leaf * leaf;
31800
31801 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31802 if (!sg_proc_sgp)
31803 diff -urNp linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c
31804 --- linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
31805 +++ linux-3.0.3/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
31806 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31807 int do_iounmap = 0;
31808 int do_disable_device = 1;
31809
31810 + pax_track_stack();
31811 +
31812 memset(&sym_dev, 0, sizeof(sym_dev));
31813 memset(&nvram, 0, sizeof(nvram));
31814 sym_dev.pdev = pdev;
31815 diff -urNp linux-3.0.3/drivers/scsi/vmw_pvscsi.c linux-3.0.3/drivers/scsi/vmw_pvscsi.c
31816 --- linux-3.0.3/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
31817 +++ linux-3.0.3/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
31818 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31819 dma_addr_t base;
31820 unsigned i;
31821
31822 + pax_track_stack();
31823 +
31824 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31825 cmd.reqRingNumPages = adapter->req_pages;
31826 cmd.cmpRingNumPages = adapter->cmp_pages;
31827 diff -urNp linux-3.0.3/drivers/spi/spi.c linux-3.0.3/drivers/spi/spi.c
31828 --- linux-3.0.3/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
31829 +++ linux-3.0.3/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
31830 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31831 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31832
31833 /* portable code must never pass more than 32 bytes */
31834 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31835 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
31836
31837 static u8 *buf;
31838
31839 diff -urNp linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c
31840 --- linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:44:40.000000000 -0400
31841 +++ linux-3.0.3/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
31842 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
31843 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
31844
31845
31846 -static struct net_device_ops ar6000_netdev_ops = {
31847 +static net_device_ops_no_const ar6000_netdev_ops = {
31848 .ndo_init = NULL,
31849 .ndo_open = ar6000_open,
31850 .ndo_stop = ar6000_close,
31851 diff -urNp linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
31852 --- linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
31853 +++ linux-3.0.3/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
31854 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
31855 typedef struct ar6k_pal_config_s
31856 {
31857 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
31858 -}ar6k_pal_config_t;
31859 +} __no_const ar6k_pal_config_t;
31860
31861 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
31862 #endif /* _AR6K_PAL_H_ */
31863 diff -urNp linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31864 --- linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
31865 +++ linux-3.0.3/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
31866 @@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31867 free_netdev(ifp->net);
31868 }
31869 /* Allocate etherdev, including space for private structure */
31870 - ifp->net = alloc_etherdev(sizeof(dhd));
31871 + ifp->net = alloc_etherdev(sizeof(*dhd));
31872 if (!ifp->net) {
31873 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31874 ret = -ENOMEM;
31875 }
31876 if (ret == 0) {
31877 strcpy(ifp->net->name, ifp->name);
31878 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31879 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31880 err = dhd_net_attach(&dhd->pub, ifp->idx);
31881 if (err != 0) {
31882 DHD_ERROR(("%s: dhd_net_attach failed, "
31883 @@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31884 strcpy(nv_path, nvram_path);
31885
31886 /* Allocate etherdev, including space for private structure */
31887 - net = alloc_etherdev(sizeof(dhd));
31888 + net = alloc_etherdev(sizeof(*dhd));
31889 if (!net) {
31890 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31891 goto fail;
31892 @@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31893 /*
31894 * Save the dhd_info into the priv
31895 */
31896 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31897 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31898
31899 /* Set network interface name if it was provided as module parameter */
31900 if (iface_name[0]) {
31901 @@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
31902 /*
31903 * Save the dhd_info into the priv
31904 */
31905 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
31906 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
31907
31908 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
31909 g_bus = bus;
31910 diff -urNp linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
31911 --- linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
31912 +++ linux-3.0.3/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
31913 @@ -593,7 +593,7 @@ struct phy_func_ptr {
31914 initfn_t carrsuppr;
31915 rxsigpwrfn_t rxsigpwr;
31916 detachfn_t detach;
31917 -};
31918 +} __no_const;
31919 typedef struct phy_func_ptr phy_func_ptr_t;
31920
31921 struct phy_info {
31922 diff -urNp linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h
31923 --- linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
31924 +++ linux-3.0.3/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
31925 @@ -185,7 +185,7 @@ typedef struct {
31926 u16 func, uint bustype, void *regsva, void *param);
31927 /* detach from device */
31928 void (*detach) (void *ch);
31929 -} bcmsdh_driver_t;
31930 +} __no_const bcmsdh_driver_t;
31931
31932 /* platform specific/high level functions */
31933 extern int bcmsdh_register(bcmsdh_driver_t *driver);
31934 diff -urNp linux-3.0.3/drivers/staging/et131x/et1310_tx.c linux-3.0.3/drivers/staging/et131x/et1310_tx.c
31935 --- linux-3.0.3/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
31936 +++ linux-3.0.3/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
31937 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
31938 struct net_device_stats *stats = &etdev->net_stats;
31939
31940 if (tcb->flags & fMP_DEST_BROAD)
31941 - atomic_inc(&etdev->Stats.brdcstxmt);
31942 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
31943 else if (tcb->flags & fMP_DEST_MULTI)
31944 - atomic_inc(&etdev->Stats.multixmt);
31945 + atomic_inc_unchecked(&etdev->Stats.multixmt);
31946 else
31947 - atomic_inc(&etdev->Stats.unixmt);
31948 + atomic_inc_unchecked(&etdev->Stats.unixmt);
31949
31950 if (tcb->skb) {
31951 stats->tx_bytes += tcb->skb->len;
31952 diff -urNp linux-3.0.3/drivers/staging/et131x/et131x_adapter.h linux-3.0.3/drivers/staging/et131x/et131x_adapter.h
31953 --- linux-3.0.3/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
31954 +++ linux-3.0.3/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
31955 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
31956 * operations
31957 */
31958 u32 unircv; /* # multicast packets received */
31959 - atomic_t unixmt; /* # multicast packets for Tx */
31960 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
31961 u32 multircv; /* # multicast packets received */
31962 - atomic_t multixmt; /* # multicast packets for Tx */
31963 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
31964 u32 brdcstrcv; /* # broadcast packets received */
31965 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
31966 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
31967 u32 norcvbuf; /* # Rx packets discarded */
31968 u32 noxmtbuf; /* # Tx packets discarded */
31969
31970 diff -urNp linux-3.0.3/drivers/staging/hv/channel.c linux-3.0.3/drivers/staging/hv/channel.c
31971 --- linux-3.0.3/drivers/staging/hv/channel.c 2011-08-23 21:44:40.000000000 -0400
31972 +++ linux-3.0.3/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
31973 @@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
31974 int ret = 0;
31975 int t;
31976
31977 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31978 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31979 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31980 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31981
31982 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31983 if (ret)
31984 diff -urNp linux-3.0.3/drivers/staging/hv/hv.c linux-3.0.3/drivers/staging/hv/hv.c
31985 --- linux-3.0.3/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
31986 +++ linux-3.0.3/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
31987 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
31988 u64 output_address = (output) ? virt_to_phys(output) : 0;
31989 u32 output_address_hi = output_address >> 32;
31990 u32 output_address_lo = output_address & 0xFFFFFFFF;
31991 - volatile void *hypercall_page = hv_context.hypercall_page;
31992 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31993
31994 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31995 "=a"(hv_status_lo) : "d" (control_hi),
31996 diff -urNp linux-3.0.3/drivers/staging/hv/hv_mouse.c linux-3.0.3/drivers/staging/hv/hv_mouse.c
31997 --- linux-3.0.3/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
31998 +++ linux-3.0.3/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
31999 @@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32000 if (hid_dev) {
32001 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32002
32003 - hid_dev->ll_driver->open = mousevsc_hid_open;
32004 - hid_dev->ll_driver->close = mousevsc_hid_close;
32005 + pax_open_kernel();
32006 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32007 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32008 + pax_close_kernel();
32009
32010 hid_dev->bus = BUS_VIRTUAL;
32011 hid_dev->vendor = input_device_ctx->device_info.vendor;
32012 diff -urNp linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h
32013 --- linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
32014 +++ linux-3.0.3/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
32015 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
32016 struct vmbus_connection {
32017 enum vmbus_connect_state conn_state;
32018
32019 - atomic_t next_gpadl_handle;
32020 + atomic_unchecked_t next_gpadl_handle;
32021
32022 /*
32023 * Represents channel interrupts. Each bit position represents a
32024 diff -urNp linux-3.0.3/drivers/staging/hv/rndis_filter.c linux-3.0.3/drivers/staging/hv/rndis_filter.c
32025 --- linux-3.0.3/drivers/staging/hv/rndis_filter.c 2011-08-23 21:44:40.000000000 -0400
32026 +++ linux-3.0.3/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
32027 @@ -43,7 +43,7 @@ struct rndis_device {
32028
32029 enum rndis_device_state state;
32030 u32 link_stat;
32031 - atomic_t new_req_id;
32032 + atomic_unchecked_t new_req_id;
32033
32034 spinlock_t request_lock;
32035 struct list_head req_list;
32036 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32037 * template
32038 */
32039 set = &rndis_msg->msg.set_req;
32040 - set->req_id = atomic_inc_return(&dev->new_req_id);
32041 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32042
32043 /* Add to the request list */
32044 spin_lock_irqsave(&dev->request_lock, flags);
32045 @@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32046
32047 /* Setup the rndis set */
32048 halt = &request->request_msg.msg.halt_req;
32049 - halt->req_id = atomic_inc_return(&dev->new_req_id);
32050 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32051
32052 /* Ignore return since this msg is optional. */
32053 rndis_filter_send_request(dev, request);
32054 diff -urNp linux-3.0.3/drivers/staging/hv/vmbus_drv.c linux-3.0.3/drivers/staging/hv/vmbus_drv.c
32055 --- linux-3.0.3/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
32056 +++ linux-3.0.3/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
32057 @@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32058 {
32059 int ret = 0;
32060
32061 - static atomic_t device_num = ATOMIC_INIT(0);
32062 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32063
32064 /* Set the device name. Otherwise, device_register() will fail. */
32065 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32066 - atomic_inc_return(&device_num));
32067 + atomic_inc_return_unchecked(&device_num));
32068
32069 /* The new device belongs to this bus */
32070 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32071 diff -urNp linux-3.0.3/drivers/staging/iio/ring_generic.h linux-3.0.3/drivers/staging/iio/ring_generic.h
32072 --- linux-3.0.3/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
32073 +++ linux-3.0.3/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
32074 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32075
32076 int (*is_enabled)(struct iio_ring_buffer *ring);
32077 int (*enable)(struct iio_ring_buffer *ring);
32078 -};
32079 +} __no_const;
32080
32081 struct iio_ring_setup_ops {
32082 int (*preenable)(struct iio_dev *);
32083 diff -urNp linux-3.0.3/drivers/staging/octeon/ethernet.c linux-3.0.3/drivers/staging/octeon/ethernet.c
32084 --- linux-3.0.3/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
32085 +++ linux-3.0.3/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
32086 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32087 * since the RX tasklet also increments it.
32088 */
32089 #ifdef CONFIG_64BIT
32090 - atomic64_add(rx_status.dropped_packets,
32091 - (atomic64_t *)&priv->stats.rx_dropped);
32092 + atomic64_add_unchecked(rx_status.dropped_packets,
32093 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32094 #else
32095 - atomic_add(rx_status.dropped_packets,
32096 - (atomic_t *)&priv->stats.rx_dropped);
32097 + atomic_add_unchecked(rx_status.dropped_packets,
32098 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
32099 #endif
32100 }
32101
32102 diff -urNp linux-3.0.3/drivers/staging/octeon/ethernet-rx.c linux-3.0.3/drivers/staging/octeon/ethernet-rx.c
32103 --- linux-3.0.3/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
32104 +++ linux-3.0.3/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
32105 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32106 /* Increment RX stats for virtual ports */
32107 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32108 #ifdef CONFIG_64BIT
32109 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32110 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32111 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32112 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32113 #else
32114 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32115 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32116 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32117 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32118 #endif
32119 }
32120 netif_receive_skb(skb);
32121 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32122 dev->name);
32123 */
32124 #ifdef CONFIG_64BIT
32125 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32126 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32127 #else
32128 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32129 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32130 #endif
32131 dev_kfree_skb_irq(skb);
32132 }
32133 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/inode.c linux-3.0.3/drivers/staging/pohmelfs/inode.c
32134 --- linux-3.0.3/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
32135 +++ linux-3.0.3/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
32136 @@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32137 mutex_init(&psb->mcache_lock);
32138 psb->mcache_root = RB_ROOT;
32139 psb->mcache_timeout = msecs_to_jiffies(5000);
32140 - atomic_long_set(&psb->mcache_gen, 0);
32141 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
32142
32143 psb->trans_max_pages = 100;
32144
32145 @@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32146 INIT_LIST_HEAD(&psb->crypto_ready_list);
32147 INIT_LIST_HEAD(&psb->crypto_active_list);
32148
32149 - atomic_set(&psb->trans_gen, 1);
32150 + atomic_set_unchecked(&psb->trans_gen, 1);
32151 atomic_long_set(&psb->total_inodes, 0);
32152
32153 mutex_init(&psb->state_lock);
32154 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/mcache.c linux-3.0.3/drivers/staging/pohmelfs/mcache.c
32155 --- linux-3.0.3/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
32156 +++ linux-3.0.3/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
32157 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32158 m->data = data;
32159 m->start = start;
32160 m->size = size;
32161 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
32162 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32163
32164 mutex_lock(&psb->mcache_lock);
32165 err = pohmelfs_mcache_insert(psb, m);
32166 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/netfs.h linux-3.0.3/drivers/staging/pohmelfs/netfs.h
32167 --- linux-3.0.3/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
32168 +++ linux-3.0.3/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
32169 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32170 struct pohmelfs_sb {
32171 struct rb_root mcache_root;
32172 struct mutex mcache_lock;
32173 - atomic_long_t mcache_gen;
32174 + atomic_long_unchecked_t mcache_gen;
32175 unsigned long mcache_timeout;
32176
32177 unsigned int idx;
32178
32179 unsigned int trans_retries;
32180
32181 - atomic_t trans_gen;
32182 + atomic_unchecked_t trans_gen;
32183
32184 unsigned int crypto_attached_size;
32185 unsigned int crypto_align_size;
32186 diff -urNp linux-3.0.3/drivers/staging/pohmelfs/trans.c linux-3.0.3/drivers/staging/pohmelfs/trans.c
32187 --- linux-3.0.3/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32188 +++ linux-3.0.3/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32189 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32190 int err;
32191 struct netfs_cmd *cmd = t->iovec.iov_base;
32192
32193 - t->gen = atomic_inc_return(&psb->trans_gen);
32194 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32195
32196 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32197 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32198 diff -urNp linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h
32199 --- linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32200 +++ linux-3.0.3/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32201 @@ -83,7 +83,7 @@ struct _io_ops {
32202 u8 *pmem);
32203 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32204 u8 *pmem);
32205 -};
32206 +} __no_const;
32207
32208 struct io_req {
32209 struct list_head list;
32210 diff -urNp linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c
32211 --- linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32212 +++ linux-3.0.3/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
32213 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32214 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32215
32216 if (rlen)
32217 - if (copy_to_user(data, &resp, rlen))
32218 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32219 return -EFAULT;
32220
32221 return 0;
32222 diff -urNp linux-3.0.3/drivers/staging/tty/stallion.c linux-3.0.3/drivers/staging/tty/stallion.c
32223 --- linux-3.0.3/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32224 +++ linux-3.0.3/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32225 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32226 struct stlport stl_dummyport;
32227 struct stlport *portp;
32228
32229 + pax_track_stack();
32230 +
32231 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32232 return -EFAULT;
32233 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32234 diff -urNp linux-3.0.3/drivers/staging/usbip/usbip_common.h linux-3.0.3/drivers/staging/usbip/usbip_common.h
32235 --- linux-3.0.3/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32236 +++ linux-3.0.3/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32237 @@ -315,7 +315,7 @@ struct usbip_device {
32238 void (*shutdown)(struct usbip_device *);
32239 void (*reset)(struct usbip_device *);
32240 void (*unusable)(struct usbip_device *);
32241 - } eh_ops;
32242 + } __no_const eh_ops;
32243 };
32244
32245 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32246 diff -urNp linux-3.0.3/drivers/staging/usbip/vhci.h linux-3.0.3/drivers/staging/usbip/vhci.h
32247 --- linux-3.0.3/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32248 +++ linux-3.0.3/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32249 @@ -94,7 +94,7 @@ struct vhci_hcd {
32250 unsigned resuming:1;
32251 unsigned long re_timeout;
32252
32253 - atomic_t seqnum;
32254 + atomic_unchecked_t seqnum;
32255
32256 /*
32257 * NOTE:
32258 diff -urNp linux-3.0.3/drivers/staging/usbip/vhci_hcd.c linux-3.0.3/drivers/staging/usbip/vhci_hcd.c
32259 --- linux-3.0.3/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:44:40.000000000 -0400
32260 +++ linux-3.0.3/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32261 @@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32262 return;
32263 }
32264
32265 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32266 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32267 if (priv->seqnum == 0xffff)
32268 dev_info(&urb->dev->dev, "seqnum max\n");
32269
32270 @@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32271 return -ENOMEM;
32272 }
32273
32274 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32275 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32276 if (unlink->seqnum == 0xffff)
32277 pr_info("seqnum max\n");
32278
32279 @@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32280 vdev->rhport = rhport;
32281 }
32282
32283 - atomic_set(&vhci->seqnum, 0);
32284 + atomic_set_unchecked(&vhci->seqnum, 0);
32285 spin_lock_init(&vhci->lock);
32286
32287 hcd->power_budget = 0; /* no limit */
32288 diff -urNp linux-3.0.3/drivers/staging/usbip/vhci_rx.c linux-3.0.3/drivers/staging/usbip/vhci_rx.c
32289 --- linux-3.0.3/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32290 +++ linux-3.0.3/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32291 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32292 if (!urb) {
32293 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32294 pr_info("max seqnum %d\n",
32295 - atomic_read(&the_controller->seqnum));
32296 + atomic_read_unchecked(&the_controller->seqnum));
32297 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32298 return;
32299 }
32300 diff -urNp linux-3.0.3/drivers/staging/vt6655/hostap.c linux-3.0.3/drivers/staging/vt6655/hostap.c
32301 --- linux-3.0.3/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32302 +++ linux-3.0.3/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32303 @@ -79,14 +79,13 @@ static int msglevel
32304 *
32305 */
32306
32307 +static net_device_ops_no_const apdev_netdev_ops;
32308 +
32309 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32310 {
32311 PSDevice apdev_priv;
32312 struct net_device *dev = pDevice->dev;
32313 int ret;
32314 - const struct net_device_ops apdev_netdev_ops = {
32315 - .ndo_start_xmit = pDevice->tx_80211,
32316 - };
32317
32318 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32319
32320 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32321 *apdev_priv = *pDevice;
32322 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32323
32324 + /* only half broken now */
32325 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32326 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32327
32328 pDevice->apdev->type = ARPHRD_IEEE80211;
32329 diff -urNp linux-3.0.3/drivers/staging/vt6656/hostap.c linux-3.0.3/drivers/staging/vt6656/hostap.c
32330 --- linux-3.0.3/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32331 +++ linux-3.0.3/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32332 @@ -80,14 +80,13 @@ static int msglevel
32333 *
32334 */
32335
32336 +static net_device_ops_no_const apdev_netdev_ops;
32337 +
32338 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32339 {
32340 PSDevice apdev_priv;
32341 struct net_device *dev = pDevice->dev;
32342 int ret;
32343 - const struct net_device_ops apdev_netdev_ops = {
32344 - .ndo_start_xmit = pDevice->tx_80211,
32345 - };
32346
32347 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32348
32349 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32350 *apdev_priv = *pDevice;
32351 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32352
32353 + /* only half broken now */
32354 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32355 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32356
32357 pDevice->apdev->type = ARPHRD_IEEE80211;
32358 diff -urNp linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c
32359 --- linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32360 +++ linux-3.0.3/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32361 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32362
32363 struct usbctlx_completor {
32364 int (*complete) (struct usbctlx_completor *);
32365 -};
32366 +} __no_const;
32367
32368 static int
32369 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32370 diff -urNp linux-3.0.3/drivers/staging/zcache/tmem.c linux-3.0.3/drivers/staging/zcache/tmem.c
32371 --- linux-3.0.3/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32372 +++ linux-3.0.3/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32373 @@ -39,7 +39,7 @@
32374 * A tmem host implementation must use this function to register callbacks
32375 * for memory allocation.
32376 */
32377 -static struct tmem_hostops tmem_hostops;
32378 +static tmem_hostops_no_const tmem_hostops;
32379
32380 static void tmem_objnode_tree_init(void);
32381
32382 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32383 * A tmem host implementation must use this function to register
32384 * callbacks for a page-accessible memory (PAM) implementation
32385 */
32386 -static struct tmem_pamops tmem_pamops;
32387 +static tmem_pamops_no_const tmem_pamops;
32388
32389 void tmem_register_pamops(struct tmem_pamops *m)
32390 {
32391 diff -urNp linux-3.0.3/drivers/staging/zcache/tmem.h linux-3.0.3/drivers/staging/zcache/tmem.h
32392 --- linux-3.0.3/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32393 +++ linux-3.0.3/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32394 @@ -171,6 +171,7 @@ struct tmem_pamops {
32395 int (*get_data)(struct page *, void *, struct tmem_pool *);
32396 void (*free)(void *, struct tmem_pool *);
32397 };
32398 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32399 extern void tmem_register_pamops(struct tmem_pamops *m);
32400
32401 /* memory allocation methods provided by the host implementation */
32402 @@ -180,6 +181,7 @@ struct tmem_hostops {
32403 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32404 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32405 };
32406 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32407 extern void tmem_register_hostops(struct tmem_hostops *m);
32408
32409 /* core tmem accessor functions */
32410 diff -urNp linux-3.0.3/drivers/target/target_core_alua.c linux-3.0.3/drivers/target/target_core_alua.c
32411 --- linux-3.0.3/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32412 +++ linux-3.0.3/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32413 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32414 char path[ALUA_METADATA_PATH_LEN];
32415 int len;
32416
32417 + pax_track_stack();
32418 +
32419 memset(path, 0, ALUA_METADATA_PATH_LEN);
32420
32421 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32422 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32423 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32424 int len;
32425
32426 + pax_track_stack();
32427 +
32428 memset(path, 0, ALUA_METADATA_PATH_LEN);
32429 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32430
32431 diff -urNp linux-3.0.3/drivers/target/target_core_cdb.c linux-3.0.3/drivers/target/target_core_cdb.c
32432 --- linux-3.0.3/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32433 +++ linux-3.0.3/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32434 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32435 int length = 0;
32436 unsigned char buf[SE_MODE_PAGE_BUF];
32437
32438 + pax_track_stack();
32439 +
32440 memset(buf, 0, SE_MODE_PAGE_BUF);
32441
32442 switch (cdb[2] & 0x3f) {
32443 diff -urNp linux-3.0.3/drivers/target/target_core_configfs.c linux-3.0.3/drivers/target/target_core_configfs.c
32444 --- linux-3.0.3/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
32445 +++ linux-3.0.3/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
32446 @@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
32447 ssize_t len = 0;
32448 int reg_count = 0, prf_isid;
32449
32450 + pax_track_stack();
32451 +
32452 if (!(su_dev->se_dev_ptr))
32453 return -ENODEV;
32454
32455 diff -urNp linux-3.0.3/drivers/target/target_core_pr.c linux-3.0.3/drivers/target/target_core_pr.c
32456 --- linux-3.0.3/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
32457 +++ linux-3.0.3/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
32458 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32459 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32460 u16 tpgt;
32461
32462 + pax_track_stack();
32463 +
32464 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32465 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32466 /*
32467 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32468 ssize_t len = 0;
32469 int reg_count = 0;
32470
32471 + pax_track_stack();
32472 +
32473 memset(buf, 0, pr_aptpl_buf_len);
32474 /*
32475 * Called to clear metadata once APTPL has been deactivated.
32476 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32477 char path[512];
32478 int ret;
32479
32480 + pax_track_stack();
32481 +
32482 memset(iov, 0, sizeof(struct iovec));
32483 memset(path, 0, 512);
32484
32485 diff -urNp linux-3.0.3/drivers/target/target_core_tmr.c linux-3.0.3/drivers/target/target_core_tmr.c
32486 --- linux-3.0.3/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
32487 +++ linux-3.0.3/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
32488 @@ -269,7 +269,7 @@ int core_tmr_lun_reset(
32489 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32490 T_TASK(cmd)->t_task_cdbs,
32491 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32492 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32493 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32494 atomic_read(&T_TASK(cmd)->t_transport_active),
32495 atomic_read(&T_TASK(cmd)->t_transport_stop),
32496 atomic_read(&T_TASK(cmd)->t_transport_sent));
32497 @@ -311,7 +311,7 @@ int core_tmr_lun_reset(
32498 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32499 " task: %p, t_fe_count: %d dev: %p\n", task,
32500 fe_count, dev);
32501 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32502 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32503 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32504 flags);
32505 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32506 @@ -321,7 +321,7 @@ int core_tmr_lun_reset(
32507 }
32508 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32509 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32510 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32511 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32512 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32513 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32514
32515 diff -urNp linux-3.0.3/drivers/target/target_core_transport.c linux-3.0.3/drivers/target/target_core_transport.c
32516 --- linux-3.0.3/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
32517 +++ linux-3.0.3/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
32518 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32519
32520 dev->queue_depth = dev_limits->queue_depth;
32521 atomic_set(&dev->depth_left, dev->queue_depth);
32522 - atomic_set(&dev->dev_ordered_id, 0);
32523 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
32524
32525 se_dev_set_default_attribs(dev, dev_limits);
32526
32527 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32528 * Used to determine when ORDERED commands should go from
32529 * Dormant to Active status.
32530 */
32531 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32532 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32533 smp_mb__after_atomic_inc();
32534 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32535 cmd->se_ordered_id, cmd->sam_task_attr,
32536 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32537 " t_transport_active: %d t_transport_stop: %d"
32538 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32539 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32540 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32541 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32542 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32543 atomic_read(&T_TASK(cmd)->t_transport_active),
32544 atomic_read(&T_TASK(cmd)->t_transport_stop),
32545 @@ -2673,9 +2673,9 @@ check_depth:
32546 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32547 atomic_set(&task->task_active, 1);
32548 atomic_set(&task->task_sent, 1);
32549 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32550 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32551
32552 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32553 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32554 T_TASK(cmd)->t_task_cdbs)
32555 atomic_set(&cmd->transport_sent, 1);
32556
32557 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32558 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32559 }
32560 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32561 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
32562 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32563 goto remove;
32564
32565 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32566 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32567 {
32568 int ret = 0;
32569
32570 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32571 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32572 if (!(send_status) ||
32573 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32574 return 1;
32575 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32576 */
32577 if (cmd->data_direction == DMA_TO_DEVICE) {
32578 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32579 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32580 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32581 smp_mb__after_atomic_inc();
32582 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32583 transport_new_cmd_failure(cmd);
32584 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32585 CMD_TFO(cmd)->get_task_tag(cmd),
32586 T_TASK(cmd)->t_task_cdbs,
32587 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32588 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32589 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32590 atomic_read(&T_TASK(cmd)->t_transport_active),
32591 atomic_read(&T_TASK(cmd)->t_transport_stop),
32592 atomic_read(&T_TASK(cmd)->t_transport_sent));
32593 diff -urNp linux-3.0.3/drivers/telephony/ixj.c linux-3.0.3/drivers/telephony/ixj.c
32594 --- linux-3.0.3/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
32595 +++ linux-3.0.3/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
32596 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32597 bool mContinue;
32598 char *pIn, *pOut;
32599
32600 + pax_track_stack();
32601 +
32602 if (!SCI_Prepare(j))
32603 return 0;
32604
32605 diff -urNp linux-3.0.3/drivers/tty/hvc/hvcs.c linux-3.0.3/drivers/tty/hvc/hvcs.c
32606 --- linux-3.0.3/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
32607 +++ linux-3.0.3/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
32608 @@ -83,6 +83,7 @@
32609 #include <asm/hvcserver.h>
32610 #include <asm/uaccess.h>
32611 #include <asm/vio.h>
32612 +#include <asm/local.h>
32613
32614 /*
32615 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32616 @@ -270,7 +271,7 @@ struct hvcs_struct {
32617 unsigned int index;
32618
32619 struct tty_struct *tty;
32620 - int open_count;
32621 + local_t open_count;
32622
32623 /*
32624 * Used to tell the driver kernel_thread what operations need to take
32625 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32626
32627 spin_lock_irqsave(&hvcsd->lock, flags);
32628
32629 - if (hvcsd->open_count > 0) {
32630 + if (local_read(&hvcsd->open_count) > 0) {
32631 spin_unlock_irqrestore(&hvcsd->lock, flags);
32632 printk(KERN_INFO "HVCS: vterm state unchanged. "
32633 "The hvcs device node is still in use.\n");
32634 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32635 if ((retval = hvcs_partner_connect(hvcsd)))
32636 goto error_release;
32637
32638 - hvcsd->open_count = 1;
32639 + local_set(&hvcsd->open_count, 1);
32640 hvcsd->tty = tty;
32641 tty->driver_data = hvcsd;
32642
32643 @@ -1179,7 +1180,7 @@ fast_open:
32644
32645 spin_lock_irqsave(&hvcsd->lock, flags);
32646 kref_get(&hvcsd->kref);
32647 - hvcsd->open_count++;
32648 + local_inc(&hvcsd->open_count);
32649 hvcsd->todo_mask |= HVCS_SCHED_READ;
32650 spin_unlock_irqrestore(&hvcsd->lock, flags);
32651
32652 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32653 hvcsd = tty->driver_data;
32654
32655 spin_lock_irqsave(&hvcsd->lock, flags);
32656 - if (--hvcsd->open_count == 0) {
32657 + if (local_dec_and_test(&hvcsd->open_count)) {
32658
32659 vio_disable_interrupts(hvcsd->vdev);
32660
32661 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32662 free_irq(irq, hvcsd);
32663 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32664 return;
32665 - } else if (hvcsd->open_count < 0) {
32666 + } else if (local_read(&hvcsd->open_count) < 0) {
32667 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32668 " is missmanaged.\n",
32669 - hvcsd->vdev->unit_address, hvcsd->open_count);
32670 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32671 }
32672
32673 spin_unlock_irqrestore(&hvcsd->lock, flags);
32674 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32675
32676 spin_lock_irqsave(&hvcsd->lock, flags);
32677 /* Preserve this so that we know how many kref refs to put */
32678 - temp_open_count = hvcsd->open_count;
32679 + temp_open_count = local_read(&hvcsd->open_count);
32680
32681 /*
32682 * Don't kref put inside the spinlock because the destruction
32683 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32684 hvcsd->tty->driver_data = NULL;
32685 hvcsd->tty = NULL;
32686
32687 - hvcsd->open_count = 0;
32688 + local_set(&hvcsd->open_count, 0);
32689
32690 /* This will drop any buffered data on the floor which is OK in a hangup
32691 * scenario. */
32692 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32693 * the middle of a write operation? This is a crummy place to do this
32694 * but we want to keep it all in the spinlock.
32695 */
32696 - if (hvcsd->open_count <= 0) {
32697 + if (local_read(&hvcsd->open_count) <= 0) {
32698 spin_unlock_irqrestore(&hvcsd->lock, flags);
32699 return -ENODEV;
32700 }
32701 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32702 {
32703 struct hvcs_struct *hvcsd = tty->driver_data;
32704
32705 - if (!hvcsd || hvcsd->open_count <= 0)
32706 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32707 return 0;
32708
32709 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32710 diff -urNp linux-3.0.3/drivers/tty/ipwireless/tty.c linux-3.0.3/drivers/tty/ipwireless/tty.c
32711 --- linux-3.0.3/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
32712 +++ linux-3.0.3/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
32713 @@ -29,6 +29,7 @@
32714 #include <linux/tty_driver.h>
32715 #include <linux/tty_flip.h>
32716 #include <linux/uaccess.h>
32717 +#include <asm/local.h>
32718
32719 #include "tty.h"
32720 #include "network.h"
32721 @@ -51,7 +52,7 @@ struct ipw_tty {
32722 int tty_type;
32723 struct ipw_network *network;
32724 struct tty_struct *linux_tty;
32725 - int open_count;
32726 + local_t open_count;
32727 unsigned int control_lines;
32728 struct mutex ipw_tty_mutex;
32729 int tx_bytes_queued;
32730 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32731 mutex_unlock(&tty->ipw_tty_mutex);
32732 return -ENODEV;
32733 }
32734 - if (tty->open_count == 0)
32735 + if (local_read(&tty->open_count) == 0)
32736 tty->tx_bytes_queued = 0;
32737
32738 - tty->open_count++;
32739 + local_inc(&tty->open_count);
32740
32741 tty->linux_tty = linux_tty;
32742 linux_tty->driver_data = tty;
32743 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32744
32745 static void do_ipw_close(struct ipw_tty *tty)
32746 {
32747 - tty->open_count--;
32748 -
32749 - if (tty->open_count == 0) {
32750 + if (local_dec_return(&tty->open_count) == 0) {
32751 struct tty_struct *linux_tty = tty->linux_tty;
32752
32753 if (linux_tty != NULL) {
32754 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32755 return;
32756
32757 mutex_lock(&tty->ipw_tty_mutex);
32758 - if (tty->open_count == 0) {
32759 + if (local_read(&tty->open_count) == 0) {
32760 mutex_unlock(&tty->ipw_tty_mutex);
32761 return;
32762 }
32763 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32764 return;
32765 }
32766
32767 - if (!tty->open_count) {
32768 + if (!local_read(&tty->open_count)) {
32769 mutex_unlock(&tty->ipw_tty_mutex);
32770 return;
32771 }
32772 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32773 return -ENODEV;
32774
32775 mutex_lock(&tty->ipw_tty_mutex);
32776 - if (!tty->open_count) {
32777 + if (!local_read(&tty->open_count)) {
32778 mutex_unlock(&tty->ipw_tty_mutex);
32779 return -EINVAL;
32780 }
32781 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32782 if (!tty)
32783 return -ENODEV;
32784
32785 - if (!tty->open_count)
32786 + if (!local_read(&tty->open_count))
32787 return -EINVAL;
32788
32789 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32790 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32791 if (!tty)
32792 return 0;
32793
32794 - if (!tty->open_count)
32795 + if (!local_read(&tty->open_count))
32796 return 0;
32797
32798 return tty->tx_bytes_queued;
32799 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32800 if (!tty)
32801 return -ENODEV;
32802
32803 - if (!tty->open_count)
32804 + if (!local_read(&tty->open_count))
32805 return -EINVAL;
32806
32807 return get_control_lines(tty);
32808 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32809 if (!tty)
32810 return -ENODEV;
32811
32812 - if (!tty->open_count)
32813 + if (!local_read(&tty->open_count))
32814 return -EINVAL;
32815
32816 return set_control_lines(tty, set, clear);
32817 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32818 if (!tty)
32819 return -ENODEV;
32820
32821 - if (!tty->open_count)
32822 + if (!local_read(&tty->open_count))
32823 return -EINVAL;
32824
32825 /* FIXME: Exactly how is the tty object locked here .. */
32826 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32827 against a parallel ioctl etc */
32828 mutex_lock(&ttyj->ipw_tty_mutex);
32829 }
32830 - while (ttyj->open_count)
32831 + while (local_read(&ttyj->open_count))
32832 do_ipw_close(ttyj);
32833 ipwireless_disassociate_network_ttys(network,
32834 ttyj->channel_idx);
32835 diff -urNp linux-3.0.3/drivers/tty/n_gsm.c linux-3.0.3/drivers/tty/n_gsm.c
32836 --- linux-3.0.3/drivers/tty/n_gsm.c 2011-08-23 21:44:40.000000000 -0400
32837 +++ linux-3.0.3/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
32838 @@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32839 return NULL;
32840 spin_lock_init(&dlci->lock);
32841 dlci->fifo = &dlci->_fifo;
32842 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32843 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32844 kfree(dlci);
32845 return NULL;
32846 }
32847 diff -urNp linux-3.0.3/drivers/tty/n_tty.c linux-3.0.3/drivers/tty/n_tty.c
32848 --- linux-3.0.3/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
32849 +++ linux-3.0.3/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
32850 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32851 {
32852 *ops = tty_ldisc_N_TTY;
32853 ops->owner = NULL;
32854 - ops->refcount = ops->flags = 0;
32855 + atomic_set(&ops->refcount, 0);
32856 + ops->flags = 0;
32857 }
32858 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32859 diff -urNp linux-3.0.3/drivers/tty/pty.c linux-3.0.3/drivers/tty/pty.c
32860 --- linux-3.0.3/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
32861 +++ linux-3.0.3/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
32862 @@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
32863 register_sysctl_table(pty_root_table);
32864
32865 /* Now create the /dev/ptmx special device */
32866 + pax_open_kernel();
32867 tty_default_fops(&ptmx_fops);
32868 - ptmx_fops.open = ptmx_open;
32869 + *(void **)&ptmx_fops.open = ptmx_open;
32870 + pax_close_kernel();
32871
32872 cdev_init(&ptmx_cdev, &ptmx_fops);
32873 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32874 diff -urNp linux-3.0.3/drivers/tty/rocket.c linux-3.0.3/drivers/tty/rocket.c
32875 --- linux-3.0.3/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
32876 +++ linux-3.0.3/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
32877 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32878 struct rocket_ports tmp;
32879 int board;
32880
32881 + pax_track_stack();
32882 +
32883 if (!retports)
32884 return -EFAULT;
32885 memset(&tmp, 0, sizeof (tmp));
32886 diff -urNp linux-3.0.3/drivers/tty/serial/kgdboc.c linux-3.0.3/drivers/tty/serial/kgdboc.c
32887 --- linux-3.0.3/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
32888 +++ linux-3.0.3/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
32889 @@ -23,8 +23,9 @@
32890 #define MAX_CONFIG_LEN 40
32891
32892 static struct kgdb_io kgdboc_io_ops;
32893 +static struct kgdb_io kgdboc_io_ops_console;
32894
32895 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32896 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32897 static int configured = -1;
32898
32899 static char config[MAX_CONFIG_LEN];
32900 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32901 kgdboc_unregister_kbd();
32902 if (configured == 1)
32903 kgdb_unregister_io_module(&kgdboc_io_ops);
32904 + else if (configured == 2)
32905 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
32906 }
32907
32908 static int configure_kgdboc(void)
32909 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32910 int err;
32911 char *cptr = config;
32912 struct console *cons;
32913 + int is_console = 0;
32914
32915 err = kgdboc_option_setup(config);
32916 if (err || !strlen(config) || isspace(config[0]))
32917 goto noconfig;
32918
32919 err = -ENODEV;
32920 - kgdboc_io_ops.is_console = 0;
32921 kgdb_tty_driver = NULL;
32922
32923 kgdboc_use_kms = 0;
32924 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32925 int idx;
32926 if (cons->device && cons->device(cons, &idx) == p &&
32927 idx == tty_line) {
32928 - kgdboc_io_ops.is_console = 1;
32929 + is_console = 1;
32930 break;
32931 }
32932 cons = cons->next;
32933 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
32934 kgdb_tty_line = tty_line;
32935
32936 do_register:
32937 - err = kgdb_register_io_module(&kgdboc_io_ops);
32938 + if (is_console) {
32939 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
32940 + configured = 2;
32941 + } else {
32942 + err = kgdb_register_io_module(&kgdboc_io_ops);
32943 + configured = 1;
32944 + }
32945 if (err)
32946 goto noconfig;
32947
32948 - configured = 1;
32949 -
32950 return 0;
32951
32952 noconfig:
32953 @@ -212,7 +219,7 @@ noconfig:
32954 static int __init init_kgdboc(void)
32955 {
32956 /* Already configured? */
32957 - if (configured == 1)
32958 + if (configured >= 1)
32959 return 0;
32960
32961 return configure_kgdboc();
32962 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
32963 if (config[len - 1] == '\n')
32964 config[len - 1] = '\0';
32965
32966 - if (configured == 1)
32967 + if (configured >= 1)
32968 cleanup_kgdboc();
32969
32970 /* Go and configure with the new params. */
32971 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
32972 .post_exception = kgdboc_post_exp_handler,
32973 };
32974
32975 +static struct kgdb_io kgdboc_io_ops_console = {
32976 + .name = "kgdboc",
32977 + .read_char = kgdboc_get_char,
32978 + .write_char = kgdboc_put_char,
32979 + .pre_exception = kgdboc_pre_exp_handler,
32980 + .post_exception = kgdboc_post_exp_handler,
32981 + .is_console = 1
32982 +};
32983 +
32984 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
32985 /* This is only available if kgdboc is a built in for early debugging */
32986 static int __init kgdboc_early_init(char *opt)
32987 diff -urNp linux-3.0.3/drivers/tty/serial/mrst_max3110.c linux-3.0.3/drivers/tty/serial/mrst_max3110.c
32988 --- linux-3.0.3/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
32989 +++ linux-3.0.3/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
32990 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
32991 int loop = 1, num, total = 0;
32992 u8 recv_buf[512], *pbuf;
32993
32994 + pax_track_stack();
32995 +
32996 pbuf = recv_buf;
32997 do {
32998 num = max3110_read_multi(max, pbuf);
32999 diff -urNp linux-3.0.3/drivers/tty/tty_io.c linux-3.0.3/drivers/tty/tty_io.c
33000 --- linux-3.0.3/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
33001 +++ linux-3.0.3/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
33002 @@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33003
33004 void tty_default_fops(struct file_operations *fops)
33005 {
33006 - *fops = tty_fops;
33007 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33008 }
33009
33010 /*
33011 diff -urNp linux-3.0.3/drivers/tty/tty_ldisc.c linux-3.0.3/drivers/tty/tty_ldisc.c
33012 --- linux-3.0.3/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33013 +++ linux-3.0.3/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33014 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33015 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33016 struct tty_ldisc_ops *ldo = ld->ops;
33017
33018 - ldo->refcount--;
33019 + atomic_dec(&ldo->refcount);
33020 module_put(ldo->owner);
33021 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33022
33023 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33024 spin_lock_irqsave(&tty_ldisc_lock, flags);
33025 tty_ldiscs[disc] = new_ldisc;
33026 new_ldisc->num = disc;
33027 - new_ldisc->refcount = 0;
33028 + atomic_set(&new_ldisc->refcount, 0);
33029 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33030
33031 return ret;
33032 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33033 return -EINVAL;
33034
33035 spin_lock_irqsave(&tty_ldisc_lock, flags);
33036 - if (tty_ldiscs[disc]->refcount)
33037 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33038 ret = -EBUSY;
33039 else
33040 tty_ldiscs[disc] = NULL;
33041 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33042 if (ldops) {
33043 ret = ERR_PTR(-EAGAIN);
33044 if (try_module_get(ldops->owner)) {
33045 - ldops->refcount++;
33046 + atomic_inc(&ldops->refcount);
33047 ret = ldops;
33048 }
33049 }
33050 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33051 unsigned long flags;
33052
33053 spin_lock_irqsave(&tty_ldisc_lock, flags);
33054 - ldops->refcount--;
33055 + atomic_dec(&ldops->refcount);
33056 module_put(ldops->owner);
33057 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33058 }
33059 diff -urNp linux-3.0.3/drivers/tty/vt/keyboard.c linux-3.0.3/drivers/tty/vt/keyboard.c
33060 --- linux-3.0.3/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
33061 +++ linux-3.0.3/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
33062 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33063 kbd->kbdmode == VC_OFF) &&
33064 value != KVAL(K_SAK))
33065 return; /* SAK is allowed even in raw mode */
33066 +
33067 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33068 + {
33069 + void *func = fn_handler[value];
33070 + if (func == fn_show_state || func == fn_show_ptregs ||
33071 + func == fn_show_mem)
33072 + return;
33073 + }
33074 +#endif
33075 +
33076 fn_handler[value](vc);
33077 }
33078
33079 diff -urNp linux-3.0.3/drivers/tty/vt/vt.c linux-3.0.3/drivers/tty/vt/vt.c
33080 --- linux-3.0.3/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
33081 +++ linux-3.0.3/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
33082 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33083
33084 static void notify_write(struct vc_data *vc, unsigned int unicode)
33085 {
33086 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33087 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33088 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33089 }
33090
33091 diff -urNp linux-3.0.3/drivers/tty/vt/vt_ioctl.c linux-3.0.3/drivers/tty/vt/vt_ioctl.c
33092 --- linux-3.0.3/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
33093 +++ linux-3.0.3/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
33094 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33095 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33096 return -EFAULT;
33097
33098 - if (!capable(CAP_SYS_TTY_CONFIG))
33099 - perm = 0;
33100 -
33101 switch (cmd) {
33102 case KDGKBENT:
33103 key_map = key_maps[s];
33104 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33105 val = (i ? K_HOLE : K_NOSUCHMAP);
33106 return put_user(val, &user_kbe->kb_value);
33107 case KDSKBENT:
33108 + if (!capable(CAP_SYS_TTY_CONFIG))
33109 + perm = 0;
33110 +
33111 if (!perm)
33112 return -EPERM;
33113 if (!i && v == K_NOSUCHMAP) {
33114 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33115 int i, j, k;
33116 int ret;
33117
33118 - if (!capable(CAP_SYS_TTY_CONFIG))
33119 - perm = 0;
33120 -
33121 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33122 if (!kbs) {
33123 ret = -ENOMEM;
33124 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33125 kfree(kbs);
33126 return ((p && *p) ? -EOVERFLOW : 0);
33127 case KDSKBSENT:
33128 + if (!capable(CAP_SYS_TTY_CONFIG))
33129 + perm = 0;
33130 +
33131 if (!perm) {
33132 ret = -EPERM;
33133 goto reterr;
33134 diff -urNp linux-3.0.3/drivers/uio/uio.c linux-3.0.3/drivers/uio/uio.c
33135 --- linux-3.0.3/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
33136 +++ linux-3.0.3/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
33137 @@ -25,6 +25,7 @@
33138 #include <linux/kobject.h>
33139 #include <linux/cdev.h>
33140 #include <linux/uio_driver.h>
33141 +#include <asm/local.h>
33142
33143 #define UIO_MAX_DEVICES (1U << MINORBITS)
33144
33145 @@ -32,10 +33,10 @@ struct uio_device {
33146 struct module *owner;
33147 struct device *dev;
33148 int minor;
33149 - atomic_t event;
33150 + atomic_unchecked_t event;
33151 struct fasync_struct *async_queue;
33152 wait_queue_head_t wait;
33153 - int vma_count;
33154 + local_t vma_count;
33155 struct uio_info *info;
33156 struct kobject *map_dir;
33157 struct kobject *portio_dir;
33158 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33159 struct device_attribute *attr, char *buf)
33160 {
33161 struct uio_device *idev = dev_get_drvdata(dev);
33162 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33163 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33164 }
33165
33166 static struct device_attribute uio_class_attributes[] = {
33167 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33168 {
33169 struct uio_device *idev = info->uio_dev;
33170
33171 - atomic_inc(&idev->event);
33172 + atomic_inc_unchecked(&idev->event);
33173 wake_up_interruptible(&idev->wait);
33174 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33175 }
33176 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33177 }
33178
33179 listener->dev = idev;
33180 - listener->event_count = atomic_read(&idev->event);
33181 + listener->event_count = atomic_read_unchecked(&idev->event);
33182 filep->private_data = listener;
33183
33184 if (idev->info->open) {
33185 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33186 return -EIO;
33187
33188 poll_wait(filep, &idev->wait, wait);
33189 - if (listener->event_count != atomic_read(&idev->event))
33190 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33191 return POLLIN | POLLRDNORM;
33192 return 0;
33193 }
33194 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33195 do {
33196 set_current_state(TASK_INTERRUPTIBLE);
33197
33198 - event_count = atomic_read(&idev->event);
33199 + event_count = atomic_read_unchecked(&idev->event);
33200 if (event_count != listener->event_count) {
33201 if (copy_to_user(buf, &event_count, count))
33202 retval = -EFAULT;
33203 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33204 static void uio_vma_open(struct vm_area_struct *vma)
33205 {
33206 struct uio_device *idev = vma->vm_private_data;
33207 - idev->vma_count++;
33208 + local_inc(&idev->vma_count);
33209 }
33210
33211 static void uio_vma_close(struct vm_area_struct *vma)
33212 {
33213 struct uio_device *idev = vma->vm_private_data;
33214 - idev->vma_count--;
33215 + local_dec(&idev->vma_count);
33216 }
33217
33218 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33219 @@ -823,7 +824,7 @@ int __uio_register_device(struct module
33220 idev->owner = owner;
33221 idev->info = info;
33222 init_waitqueue_head(&idev->wait);
33223 - atomic_set(&idev->event, 0);
33224 + atomic_set_unchecked(&idev->event, 0);
33225
33226 ret = uio_get_minor(idev);
33227 if (ret)
33228 diff -urNp linux-3.0.3/drivers/usb/atm/cxacru.c linux-3.0.3/drivers/usb/atm/cxacru.c
33229 --- linux-3.0.3/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33230 +++ linux-3.0.3/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33231 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33232 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33233 if (ret < 2)
33234 return -EINVAL;
33235 - if (index < 0 || index > 0x7f)
33236 + if (index > 0x7f)
33237 return -EINVAL;
33238 pos += tmp;
33239
33240 diff -urNp linux-3.0.3/drivers/usb/atm/usbatm.c linux-3.0.3/drivers/usb/atm/usbatm.c
33241 --- linux-3.0.3/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33242 +++ linux-3.0.3/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33243 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33244 if (printk_ratelimit())
33245 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33246 __func__, vpi, vci);
33247 - atomic_inc(&vcc->stats->rx_err);
33248 + atomic_inc_unchecked(&vcc->stats->rx_err);
33249 return;
33250 }
33251
33252 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33253 if (length > ATM_MAX_AAL5_PDU) {
33254 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33255 __func__, length, vcc);
33256 - atomic_inc(&vcc->stats->rx_err);
33257 + atomic_inc_unchecked(&vcc->stats->rx_err);
33258 goto out;
33259 }
33260
33261 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33262 if (sarb->len < pdu_length) {
33263 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33264 __func__, pdu_length, sarb->len, vcc);
33265 - atomic_inc(&vcc->stats->rx_err);
33266 + atomic_inc_unchecked(&vcc->stats->rx_err);
33267 goto out;
33268 }
33269
33270 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33271 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33272 __func__, vcc);
33273 - atomic_inc(&vcc->stats->rx_err);
33274 + atomic_inc_unchecked(&vcc->stats->rx_err);
33275 goto out;
33276 }
33277
33278 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33279 if (printk_ratelimit())
33280 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33281 __func__, length);
33282 - atomic_inc(&vcc->stats->rx_drop);
33283 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33284 goto out;
33285 }
33286
33287 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33288
33289 vcc->push(vcc, skb);
33290
33291 - atomic_inc(&vcc->stats->rx);
33292 + atomic_inc_unchecked(&vcc->stats->rx);
33293 out:
33294 skb_trim(sarb, 0);
33295 }
33296 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33297 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33298
33299 usbatm_pop(vcc, skb);
33300 - atomic_inc(&vcc->stats->tx);
33301 + atomic_inc_unchecked(&vcc->stats->tx);
33302
33303 skb = skb_dequeue(&instance->sndqueue);
33304 }
33305 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33306 if (!left--)
33307 return sprintf(page,
33308 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33309 - atomic_read(&atm_dev->stats.aal5.tx),
33310 - atomic_read(&atm_dev->stats.aal5.tx_err),
33311 - atomic_read(&atm_dev->stats.aal5.rx),
33312 - atomic_read(&atm_dev->stats.aal5.rx_err),
33313 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33314 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33315 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33316 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33317 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33318 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33319
33320 if (!left--) {
33321 if (instance->disconnected)
33322 diff -urNp linux-3.0.3/drivers/usb/core/devices.c linux-3.0.3/drivers/usb/core/devices.c
33323 --- linux-3.0.3/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33324 +++ linux-3.0.3/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33325 @@ -126,7 +126,7 @@ static const char format_endpt[] =
33326 * time it gets called.
33327 */
33328 static struct device_connect_event {
33329 - atomic_t count;
33330 + atomic_unchecked_t count;
33331 wait_queue_head_t wait;
33332 } device_event = {
33333 .count = ATOMIC_INIT(1),
33334 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33335
33336 void usbfs_conn_disc_event(void)
33337 {
33338 - atomic_add(2, &device_event.count);
33339 + atomic_add_unchecked(2, &device_event.count);
33340 wake_up(&device_event.wait);
33341 }
33342
33343 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33344
33345 poll_wait(file, &device_event.wait, wait);
33346
33347 - event_count = atomic_read(&device_event.count);
33348 + event_count = atomic_read_unchecked(&device_event.count);
33349 if (file->f_version != event_count) {
33350 file->f_version = event_count;
33351 return POLLIN | POLLRDNORM;
33352 diff -urNp linux-3.0.3/drivers/usb/core/message.c linux-3.0.3/drivers/usb/core/message.c
33353 --- linux-3.0.3/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33354 +++ linux-3.0.3/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33355 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33356 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33357 if (buf) {
33358 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33359 - if (len > 0) {
33360 - smallbuf = kmalloc(++len, GFP_NOIO);
33361 + if (len++ > 0) {
33362 + smallbuf = kmalloc(len, GFP_NOIO);
33363 if (!smallbuf)
33364 return buf;
33365 memcpy(smallbuf, buf, len);
33366 diff -urNp linux-3.0.3/drivers/usb/early/ehci-dbgp.c linux-3.0.3/drivers/usb/early/ehci-dbgp.c
33367 --- linux-3.0.3/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33368 +++ linux-3.0.3/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33369 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33370
33371 #ifdef CONFIG_KGDB
33372 static struct kgdb_io kgdbdbgp_io_ops;
33373 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33374 +static struct kgdb_io kgdbdbgp_io_ops_console;
33375 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33376 #else
33377 #define dbgp_kgdb_mode (0)
33378 #endif
33379 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33380 .write_char = kgdbdbgp_write_char,
33381 };
33382
33383 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33384 + .name = "kgdbdbgp",
33385 + .read_char = kgdbdbgp_read_char,
33386 + .write_char = kgdbdbgp_write_char,
33387 + .is_console = 1
33388 +};
33389 +
33390 static int kgdbdbgp_wait_time;
33391
33392 static int __init kgdbdbgp_parse_config(char *str)
33393 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33394 ptr++;
33395 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33396 }
33397 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33398 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33399 + if (early_dbgp_console.index != -1)
33400 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33401 + else
33402 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33403
33404 return 0;
33405 }
33406 diff -urNp linux-3.0.3/drivers/usb/host/xhci-mem.c linux-3.0.3/drivers/usb/host/xhci-mem.c
33407 --- linux-3.0.3/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33408 +++ linux-3.0.3/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33409 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33410 unsigned int num_tests;
33411 int i, ret;
33412
33413 + pax_track_stack();
33414 +
33415 num_tests = ARRAY_SIZE(simple_test_vector);
33416 for (i = 0; i < num_tests; i++) {
33417 ret = xhci_test_trb_in_td(xhci,
33418 diff -urNp linux-3.0.3/drivers/usb/wusbcore/wa-hc.h linux-3.0.3/drivers/usb/wusbcore/wa-hc.h
33419 --- linux-3.0.3/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33420 +++ linux-3.0.3/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33421 @@ -192,7 +192,7 @@ struct wahc {
33422 struct list_head xfer_delayed_list;
33423 spinlock_t xfer_list_lock;
33424 struct work_struct xfer_work;
33425 - atomic_t xfer_id_count;
33426 + atomic_unchecked_t xfer_id_count;
33427 };
33428
33429
33430 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33431 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33432 spin_lock_init(&wa->xfer_list_lock);
33433 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33434 - atomic_set(&wa->xfer_id_count, 1);
33435 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33436 }
33437
33438 /**
33439 diff -urNp linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c
33440 --- linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
33441 +++ linux-3.0.3/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
33442 @@ -294,7 +294,7 @@ out:
33443 */
33444 static void wa_xfer_id_init(struct wa_xfer *xfer)
33445 {
33446 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33447 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33448 }
33449
33450 /*
33451 diff -urNp linux-3.0.3/drivers/vhost/vhost.c linux-3.0.3/drivers/vhost/vhost.c
33452 --- linux-3.0.3/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
33453 +++ linux-3.0.3/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
33454 @@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
33455 return get_user(vq->last_used_idx, &used->idx);
33456 }
33457
33458 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33459 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33460 {
33461 struct file *eventfp, *filep = NULL,
33462 *pollstart = NULL, *pollstop = NULL;
33463 diff -urNp linux-3.0.3/drivers/video/fbcmap.c linux-3.0.3/drivers/video/fbcmap.c
33464 --- linux-3.0.3/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
33465 +++ linux-3.0.3/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
33466 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33467 rc = -ENODEV;
33468 goto out;
33469 }
33470 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33471 - !info->fbops->fb_setcmap)) {
33472 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33473 rc = -EINVAL;
33474 goto out1;
33475 }
33476 diff -urNp linux-3.0.3/drivers/video/fbmem.c linux-3.0.3/drivers/video/fbmem.c
33477 --- linux-3.0.3/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
33478 +++ linux-3.0.3/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
33479 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33480 image->dx += image->width + 8;
33481 }
33482 } else if (rotate == FB_ROTATE_UD) {
33483 - for (x = 0; x < num && image->dx >= 0; x++) {
33484 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33485 info->fbops->fb_imageblit(info, image);
33486 image->dx -= image->width + 8;
33487 }
33488 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33489 image->dy += image->height + 8;
33490 }
33491 } else if (rotate == FB_ROTATE_CCW) {
33492 - for (x = 0; x < num && image->dy >= 0; x++) {
33493 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33494 info->fbops->fb_imageblit(info, image);
33495 image->dy -= image->height + 8;
33496 }
33497 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33498 int flags = info->flags;
33499 int ret = 0;
33500
33501 + pax_track_stack();
33502 +
33503 if (var->activate & FB_ACTIVATE_INV_MODE) {
33504 struct fb_videomode mode1, mode2;
33505
33506 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33507 void __user *argp = (void __user *)arg;
33508 long ret = 0;
33509
33510 + pax_track_stack();
33511 +
33512 switch (cmd) {
33513 case FBIOGET_VSCREENINFO:
33514 if (!lock_fb_info(info))
33515 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33516 return -EFAULT;
33517 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33518 return -EINVAL;
33519 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33520 + if (con2fb.framebuffer >= FB_MAX)
33521 return -EINVAL;
33522 if (!registered_fb[con2fb.framebuffer])
33523 request_module("fb%d", con2fb.framebuffer);
33524 diff -urNp linux-3.0.3/drivers/video/i810/i810_accel.c linux-3.0.3/drivers/video/i810/i810_accel.c
33525 --- linux-3.0.3/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
33526 +++ linux-3.0.3/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
33527 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33528 }
33529 }
33530 printk("ringbuffer lockup!!!\n");
33531 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33532 i810_report_error(mmio);
33533 par->dev_flags |= LOCKUP;
33534 info->pixmap.scan_align = 1;
33535 diff -urNp linux-3.0.3/drivers/video/udlfb.c linux-3.0.3/drivers/video/udlfb.c
33536 --- linux-3.0.3/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
33537 +++ linux-3.0.3/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
33538 @@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
33539 dlfb_urb_completion(urb);
33540
33541 error:
33542 - atomic_add(bytes_sent, &dev->bytes_sent);
33543 - atomic_add(bytes_identical, &dev->bytes_identical);
33544 - atomic_add(width*height*2, &dev->bytes_rendered);
33545 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33546 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33547 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33548 end_cycles = get_cycles();
33549 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33550 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33551 >> 10)), /* Kcycles */
33552 &dev->cpu_kcycles_used);
33553
33554 @@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
33555 dlfb_urb_completion(urb);
33556
33557 error:
33558 - atomic_add(bytes_sent, &dev->bytes_sent);
33559 - atomic_add(bytes_identical, &dev->bytes_identical);
33560 - atomic_add(bytes_rendered, &dev->bytes_rendered);
33561 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33562 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33563 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33564 end_cycles = get_cycles();
33565 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33566 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33567 >> 10)), /* Kcycles */
33568 &dev->cpu_kcycles_used);
33569 }
33570 @@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
33571 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33572 struct dlfb_data *dev = fb_info->par;
33573 return snprintf(buf, PAGE_SIZE, "%u\n",
33574 - atomic_read(&dev->bytes_rendered));
33575 + atomic_read_unchecked(&dev->bytes_rendered));
33576 }
33577
33578 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33579 @@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
33580 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33581 struct dlfb_data *dev = fb_info->par;
33582 return snprintf(buf, PAGE_SIZE, "%u\n",
33583 - atomic_read(&dev->bytes_identical));
33584 + atomic_read_unchecked(&dev->bytes_identical));
33585 }
33586
33587 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33588 @@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
33589 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33590 struct dlfb_data *dev = fb_info->par;
33591 return snprintf(buf, PAGE_SIZE, "%u\n",
33592 - atomic_read(&dev->bytes_sent));
33593 + atomic_read_unchecked(&dev->bytes_sent));
33594 }
33595
33596 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33597 @@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
33598 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33599 struct dlfb_data *dev = fb_info->par;
33600 return snprintf(buf, PAGE_SIZE, "%u\n",
33601 - atomic_read(&dev->cpu_kcycles_used));
33602 + atomic_read_unchecked(&dev->cpu_kcycles_used));
33603 }
33604
33605 static ssize_t edid_show(
33606 @@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
33607 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33608 struct dlfb_data *dev = fb_info->par;
33609
33610 - atomic_set(&dev->bytes_rendered, 0);
33611 - atomic_set(&dev->bytes_identical, 0);
33612 - atomic_set(&dev->bytes_sent, 0);
33613 - atomic_set(&dev->cpu_kcycles_used, 0);
33614 + atomic_set_unchecked(&dev->bytes_rendered, 0);
33615 + atomic_set_unchecked(&dev->bytes_identical, 0);
33616 + atomic_set_unchecked(&dev->bytes_sent, 0);
33617 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33618
33619 return count;
33620 }
33621 diff -urNp linux-3.0.3/drivers/video/uvesafb.c linux-3.0.3/drivers/video/uvesafb.c
33622 --- linux-3.0.3/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
33623 +++ linux-3.0.3/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
33624 @@ -19,6 +19,7 @@
33625 #include <linux/io.h>
33626 #include <linux/mutex.h>
33627 #include <linux/slab.h>
33628 +#include <linux/moduleloader.h>
33629 #include <video/edid.h>
33630 #include <video/uvesafb.h>
33631 #ifdef CONFIG_X86
33632 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33633 NULL,
33634 };
33635
33636 - return call_usermodehelper(v86d_path, argv, envp, 1);
33637 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33638 }
33639
33640 /*
33641 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33642 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33643 par->pmi_setpal = par->ypan = 0;
33644 } else {
33645 +
33646 +#ifdef CONFIG_PAX_KERNEXEC
33647 +#ifdef CONFIG_MODULES
33648 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33649 +#endif
33650 + if (!par->pmi_code) {
33651 + par->pmi_setpal = par->ypan = 0;
33652 + return 0;
33653 + }
33654 +#endif
33655 +
33656 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33657 + task->t.regs.edi);
33658 +
33659 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33660 + pax_open_kernel();
33661 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33662 + pax_close_kernel();
33663 +
33664 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33665 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33666 +#else
33667 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33668 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33669 +#endif
33670 +
33671 printk(KERN_INFO "uvesafb: protected mode interface info at "
33672 "%04x:%04x\n",
33673 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33674 @@ -1821,6 +1844,11 @@ out:
33675 if (par->vbe_modes)
33676 kfree(par->vbe_modes);
33677
33678 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33679 + if (par->pmi_code)
33680 + module_free_exec(NULL, par->pmi_code);
33681 +#endif
33682 +
33683 framebuffer_release(info);
33684 return err;
33685 }
33686 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33687 kfree(par->vbe_state_orig);
33688 if (par->vbe_state_saved)
33689 kfree(par->vbe_state_saved);
33690 +
33691 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33692 + if (par->pmi_code)
33693 + module_free_exec(NULL, par->pmi_code);
33694 +#endif
33695 +
33696 }
33697
33698 framebuffer_release(info);
33699 diff -urNp linux-3.0.3/drivers/video/vesafb.c linux-3.0.3/drivers/video/vesafb.c
33700 --- linux-3.0.3/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
33701 +++ linux-3.0.3/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
33702 @@ -9,6 +9,7 @@
33703 */
33704
33705 #include <linux/module.h>
33706 +#include <linux/moduleloader.h>
33707 #include <linux/kernel.h>
33708 #include <linux/errno.h>
33709 #include <linux/string.h>
33710 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33711 static int vram_total __initdata; /* Set total amount of memory */
33712 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33713 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33714 -static void (*pmi_start)(void) __read_mostly;
33715 -static void (*pmi_pal) (void) __read_mostly;
33716 +static void (*pmi_start)(void) __read_only;
33717 +static void (*pmi_pal) (void) __read_only;
33718 static int depth __read_mostly;
33719 static int vga_compat __read_mostly;
33720 /* --------------------------------------------------------------------- */
33721 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
33722 unsigned int size_vmode;
33723 unsigned int size_remap;
33724 unsigned int size_total;
33725 + void *pmi_code = NULL;
33726
33727 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33728 return -ENODEV;
33729 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
33730 size_remap = size_total;
33731 vesafb_fix.smem_len = size_remap;
33732
33733 -#ifndef __i386__
33734 - screen_info.vesapm_seg = 0;
33735 -#endif
33736 -
33737 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33738 printk(KERN_WARNING
33739 "vesafb: cannot reserve video memory at 0x%lx\n",
33740 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
33741 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33742 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33743
33744 +#ifdef __i386__
33745 +
33746 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33747 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
33748 + if (!pmi_code)
33749 +#elif !defined(CONFIG_PAX_KERNEXEC)
33750 + if (0)
33751 +#endif
33752 +
33753 +#endif
33754 + screen_info.vesapm_seg = 0;
33755 +
33756 if (screen_info.vesapm_seg) {
33757 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33758 - screen_info.vesapm_seg,screen_info.vesapm_off);
33759 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33760 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33761 }
33762
33763 if (screen_info.vesapm_seg < 0xc000)
33764 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
33765
33766 if (ypan || pmi_setpal) {
33767 unsigned short *pmi_base;
33768 +
33769 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33770 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33771 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33772 +
33773 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33774 + pax_open_kernel();
33775 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33776 +#else
33777 + pmi_code = pmi_base;
33778 +#endif
33779 +
33780 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33781 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33782 +
33783 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33784 + pmi_start = ktva_ktla(pmi_start);
33785 + pmi_pal = ktva_ktla(pmi_pal);
33786 + pax_close_kernel();
33787 +#endif
33788 +
33789 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33790 if (pmi_base[3]) {
33791 printk(KERN_INFO "vesafb: pmi: ports = ");
33792 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
33793 info->node, info->fix.id);
33794 return 0;
33795 err:
33796 +
33797 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33798 + module_free_exec(NULL, pmi_code);
33799 +#endif
33800 +
33801 if (info->screen_base)
33802 iounmap(info->screen_base);
33803 framebuffer_release(info);
33804 diff -urNp linux-3.0.3/drivers/video/via/via_clock.h linux-3.0.3/drivers/video/via/via_clock.h
33805 --- linux-3.0.3/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
33806 +++ linux-3.0.3/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
33807 @@ -56,7 +56,7 @@ struct via_clock {
33808
33809 void (*set_engine_pll_state)(u8 state);
33810 void (*set_engine_pll)(struct via_pll_config config);
33811 -};
33812 +} __no_const;
33813
33814
33815 static inline u32 get_pll_internal_frequency(u32 ref_freq,
33816 diff -urNp linux-3.0.3/drivers/virtio/virtio_balloon.c linux-3.0.3/drivers/virtio/virtio_balloon.c
33817 --- linux-3.0.3/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
33818 +++ linux-3.0.3/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
33819 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct
33820 struct sysinfo i;
33821 int idx = 0;
33822
33823 + pax_track_stack();
33824 +
33825 all_vm_events(events);
33826 si_meminfo(&i);
33827
33828 diff -urNp linux-3.0.3/fs/9p/vfs_inode.c linux-3.0.3/fs/9p/vfs_inode.c
33829 --- linux-3.0.3/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
33830 +++ linux-3.0.3/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
33831 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33832 void
33833 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33834 {
33835 - char *s = nd_get_link(nd);
33836 + const char *s = nd_get_link(nd);
33837
33838 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33839 IS_ERR(s) ? "<error>" : s);
33840 diff -urNp linux-3.0.3/fs/aio.c linux-3.0.3/fs/aio.c
33841 --- linux-3.0.3/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
33842 +++ linux-3.0.3/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
33843 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33844 size += sizeof(struct io_event) * nr_events;
33845 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33846
33847 - if (nr_pages < 0)
33848 + if (nr_pages <= 0)
33849 return -EINVAL;
33850
33851 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33852 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33853 struct aio_timeout to;
33854 int retry = 0;
33855
33856 + pax_track_stack();
33857 +
33858 /* needed to zero any padding within an entry (there shouldn't be
33859 * any, but C is fun!
33860 */
33861 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33862 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33863 {
33864 ssize_t ret;
33865 + struct iovec iovstack;
33866
33867 #ifdef CONFIG_COMPAT
33868 if (compat)
33869 ret = compat_rw_copy_check_uvector(type,
33870 (struct compat_iovec __user *)kiocb->ki_buf,
33871 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33872 + kiocb->ki_nbytes, 1, &iovstack,
33873 &kiocb->ki_iovec);
33874 else
33875 #endif
33876 ret = rw_copy_check_uvector(type,
33877 (struct iovec __user *)kiocb->ki_buf,
33878 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33879 + kiocb->ki_nbytes, 1, &iovstack,
33880 &kiocb->ki_iovec);
33881 if (ret < 0)
33882 goto out;
33883
33884 + if (kiocb->ki_iovec == &iovstack) {
33885 + kiocb->ki_inline_vec = iovstack;
33886 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
33887 + }
33888 kiocb->ki_nr_segs = kiocb->ki_nbytes;
33889 kiocb->ki_cur_seg = 0;
33890 /* ki_nbytes/left now reflect bytes instead of segs */
33891 diff -urNp linux-3.0.3/fs/attr.c linux-3.0.3/fs/attr.c
33892 --- linux-3.0.3/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
33893 +++ linux-3.0.3/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
33894 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
33895 unsigned long limit;
33896
33897 limit = rlimit(RLIMIT_FSIZE);
33898 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
33899 if (limit != RLIM_INFINITY && offset > limit)
33900 goto out_sig;
33901 if (offset > inode->i_sb->s_maxbytes)
33902 diff -urNp linux-3.0.3/fs/befs/linuxvfs.c linux-3.0.3/fs/befs/linuxvfs.c
33903 --- linux-3.0.3/fs/befs/linuxvfs.c 2011-07-21 22:17:23.000000000 -0400
33904 +++ linux-3.0.3/fs/befs/linuxvfs.c 2011-08-23 21:47:56.000000000 -0400
33905 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
33906 {
33907 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
33908 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
33909 - char *link = nd_get_link(nd);
33910 + const char *link = nd_get_link(nd);
33911 if (!IS_ERR(link))
33912 kfree(link);
33913 }
33914 diff -urNp linux-3.0.3/fs/binfmt_aout.c linux-3.0.3/fs/binfmt_aout.c
33915 --- linux-3.0.3/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
33916 +++ linux-3.0.3/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
33917 @@ -16,6 +16,7 @@
33918 #include <linux/string.h>
33919 #include <linux/fs.h>
33920 #include <linux/file.h>
33921 +#include <linux/security.h>
33922 #include <linux/stat.h>
33923 #include <linux/fcntl.h>
33924 #include <linux/ptrace.h>
33925 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
33926 #endif
33927 # define START_STACK(u) ((void __user *)u.start_stack)
33928
33929 + memset(&dump, 0, sizeof(dump));
33930 +
33931 fs = get_fs();
33932 set_fs(KERNEL_DS);
33933 has_dumped = 1;
33934 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
33935
33936 /* If the size of the dump file exceeds the rlimit, then see what would happen
33937 if we wrote the stack, but not the data area. */
33938 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
33939 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
33940 dump.u_dsize = 0;
33941
33942 /* Make sure we have enough room to write the stack and data areas. */
33943 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
33944 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
33945 dump.u_ssize = 0;
33946
33947 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
33948 rlim = rlimit(RLIMIT_DATA);
33949 if (rlim >= RLIM_INFINITY)
33950 rlim = ~0;
33951 +
33952 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
33953 if (ex.a_data + ex.a_bss > rlim)
33954 return -ENOMEM;
33955
33956 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
33957 install_exec_creds(bprm);
33958 current->flags &= ~PF_FORKNOEXEC;
33959
33960 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
33961 + current->mm->pax_flags = 0UL;
33962 +#endif
33963 +
33964 +#ifdef CONFIG_PAX_PAGEEXEC
33965 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
33966 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
33967 +
33968 +#ifdef CONFIG_PAX_EMUTRAMP
33969 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
33970 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
33971 +#endif
33972 +
33973 +#ifdef CONFIG_PAX_MPROTECT
33974 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
33975 + current->mm->pax_flags |= MF_PAX_MPROTECT;
33976 +#endif
33977 +
33978 + }
33979 +#endif
33980 +
33981 if (N_MAGIC(ex) == OMAGIC) {
33982 unsigned long text_addr, map_size;
33983 loff_t pos;
33984 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
33985
33986 down_write(&current->mm->mmap_sem);
33987 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
33988 - PROT_READ | PROT_WRITE | PROT_EXEC,
33989 + PROT_READ | PROT_WRITE,
33990 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
33991 fd_offset + ex.a_text);
33992 up_write(&current->mm->mmap_sem);
33993 diff -urNp linux-3.0.3/fs/binfmt_elf.c linux-3.0.3/fs/binfmt_elf.c
33994 --- linux-3.0.3/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
33995 +++ linux-3.0.3/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
33996 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
33997 #define elf_core_dump NULL
33998 #endif
33999
34000 +#ifdef CONFIG_PAX_MPROTECT
34001 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34002 +#endif
34003 +
34004 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34005 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34006 #else
34007 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34008 .load_binary = load_elf_binary,
34009 .load_shlib = load_elf_library,
34010 .core_dump = elf_core_dump,
34011 +
34012 +#ifdef CONFIG_PAX_MPROTECT
34013 + .handle_mprotect= elf_handle_mprotect,
34014 +#endif
34015 +
34016 .min_coredump = ELF_EXEC_PAGESIZE,
34017 };
34018
34019 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34020
34021 static int set_brk(unsigned long start, unsigned long end)
34022 {
34023 + unsigned long e = end;
34024 +
34025 start = ELF_PAGEALIGN(start);
34026 end = ELF_PAGEALIGN(end);
34027 if (end > start) {
34028 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34029 if (BAD_ADDR(addr))
34030 return addr;
34031 }
34032 - current->mm->start_brk = current->mm->brk = end;
34033 + current->mm->start_brk = current->mm->brk = e;
34034 return 0;
34035 }
34036
34037 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34038 elf_addr_t __user *u_rand_bytes;
34039 const char *k_platform = ELF_PLATFORM;
34040 const char *k_base_platform = ELF_BASE_PLATFORM;
34041 - unsigned char k_rand_bytes[16];
34042 + u32 k_rand_bytes[4];
34043 int items;
34044 elf_addr_t *elf_info;
34045 int ei_index = 0;
34046 const struct cred *cred = current_cred();
34047 struct vm_area_struct *vma;
34048 + unsigned long saved_auxv[AT_VECTOR_SIZE];
34049 +
34050 + pax_track_stack();
34051
34052 /*
34053 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34054 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34055 * Generate 16 random bytes for userspace PRNG seeding.
34056 */
34057 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34058 - u_rand_bytes = (elf_addr_t __user *)
34059 - STACK_ALLOC(p, sizeof(k_rand_bytes));
34060 + srandom32(k_rand_bytes[0] ^ random32());
34061 + srandom32(k_rand_bytes[1] ^ random32());
34062 + srandom32(k_rand_bytes[2] ^ random32());
34063 + srandom32(k_rand_bytes[3] ^ random32());
34064 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
34065 + u_rand_bytes = (elf_addr_t __user *) p;
34066 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34067 return -EFAULT;
34068
34069 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34070 return -EFAULT;
34071 current->mm->env_end = p;
34072
34073 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34074 +
34075 /* Put the elf_info on the stack in the right place. */
34076 sp = (elf_addr_t __user *)envp + 1;
34077 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34078 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34079 return -EFAULT;
34080 return 0;
34081 }
34082 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34083 {
34084 struct elf_phdr *elf_phdata;
34085 struct elf_phdr *eppnt;
34086 - unsigned long load_addr = 0;
34087 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34088 int load_addr_set = 0;
34089 unsigned long last_bss = 0, elf_bss = 0;
34090 - unsigned long error = ~0UL;
34091 + unsigned long error = -EINVAL;
34092 unsigned long total_size;
34093 int retval, i, size;
34094
34095 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34096 goto out_close;
34097 }
34098
34099 +#ifdef CONFIG_PAX_SEGMEXEC
34100 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34101 + pax_task_size = SEGMEXEC_TASK_SIZE;
34102 +#endif
34103 +
34104 eppnt = elf_phdata;
34105 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34106 if (eppnt->p_type == PT_LOAD) {
34107 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34108 k = load_addr + eppnt->p_vaddr;
34109 if (BAD_ADDR(k) ||
34110 eppnt->p_filesz > eppnt->p_memsz ||
34111 - eppnt->p_memsz > TASK_SIZE ||
34112 - TASK_SIZE - eppnt->p_memsz < k) {
34113 + eppnt->p_memsz > pax_task_size ||
34114 + pax_task_size - eppnt->p_memsz < k) {
34115 error = -ENOMEM;
34116 goto out_close;
34117 }
34118 @@ -528,6 +553,193 @@ out:
34119 return error;
34120 }
34121
34122 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34123 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34124 +{
34125 + unsigned long pax_flags = 0UL;
34126 +
34127 +#ifdef CONFIG_PAX_PAGEEXEC
34128 + if (elf_phdata->p_flags & PF_PAGEEXEC)
34129 + pax_flags |= MF_PAX_PAGEEXEC;
34130 +#endif
34131 +
34132 +#ifdef CONFIG_PAX_SEGMEXEC
34133 + if (elf_phdata->p_flags & PF_SEGMEXEC)
34134 + pax_flags |= MF_PAX_SEGMEXEC;
34135 +#endif
34136 +
34137 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34138 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34139 + if ((__supported_pte_mask & _PAGE_NX))
34140 + pax_flags &= ~MF_PAX_SEGMEXEC;
34141 + else
34142 + pax_flags &= ~MF_PAX_PAGEEXEC;
34143 + }
34144 +#endif
34145 +
34146 +#ifdef CONFIG_PAX_EMUTRAMP
34147 + if (elf_phdata->p_flags & PF_EMUTRAMP)
34148 + pax_flags |= MF_PAX_EMUTRAMP;
34149 +#endif
34150 +
34151 +#ifdef CONFIG_PAX_MPROTECT
34152 + if (elf_phdata->p_flags & PF_MPROTECT)
34153 + pax_flags |= MF_PAX_MPROTECT;
34154 +#endif
34155 +
34156 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34157 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34158 + pax_flags |= MF_PAX_RANDMMAP;
34159 +#endif
34160 +
34161 + return pax_flags;
34162 +}
34163 +#endif
34164 +
34165 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34166 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34167 +{
34168 + unsigned long pax_flags = 0UL;
34169 +
34170 +#ifdef CONFIG_PAX_PAGEEXEC
34171 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34172 + pax_flags |= MF_PAX_PAGEEXEC;
34173 +#endif
34174 +
34175 +#ifdef CONFIG_PAX_SEGMEXEC
34176 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34177 + pax_flags |= MF_PAX_SEGMEXEC;
34178 +#endif
34179 +
34180 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34181 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34182 + if ((__supported_pte_mask & _PAGE_NX))
34183 + pax_flags &= ~MF_PAX_SEGMEXEC;
34184 + else
34185 + pax_flags &= ~MF_PAX_PAGEEXEC;
34186 + }
34187 +#endif
34188 +
34189 +#ifdef CONFIG_PAX_EMUTRAMP
34190 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34191 + pax_flags |= MF_PAX_EMUTRAMP;
34192 +#endif
34193 +
34194 +#ifdef CONFIG_PAX_MPROTECT
34195 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34196 + pax_flags |= MF_PAX_MPROTECT;
34197 +#endif
34198 +
34199 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34200 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34201 + pax_flags |= MF_PAX_RANDMMAP;
34202 +#endif
34203 +
34204 + return pax_flags;
34205 +}
34206 +#endif
34207 +
34208 +#ifdef CONFIG_PAX_EI_PAX
34209 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34210 +{
34211 + unsigned long pax_flags = 0UL;
34212 +
34213 +#ifdef CONFIG_PAX_PAGEEXEC
34214 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34215 + pax_flags |= MF_PAX_PAGEEXEC;
34216 +#endif
34217 +
34218 +#ifdef CONFIG_PAX_SEGMEXEC
34219 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34220 + pax_flags |= MF_PAX_SEGMEXEC;
34221 +#endif
34222 +
34223 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34224 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34225 + if ((__supported_pte_mask & _PAGE_NX))
34226 + pax_flags &= ~MF_PAX_SEGMEXEC;
34227 + else
34228 + pax_flags &= ~MF_PAX_PAGEEXEC;
34229 + }
34230 +#endif
34231 +
34232 +#ifdef CONFIG_PAX_EMUTRAMP
34233 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34234 + pax_flags |= MF_PAX_EMUTRAMP;
34235 +#endif
34236 +
34237 +#ifdef CONFIG_PAX_MPROTECT
34238 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34239 + pax_flags |= MF_PAX_MPROTECT;
34240 +#endif
34241 +
34242 +#ifdef CONFIG_PAX_ASLR
34243 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34244 + pax_flags |= MF_PAX_RANDMMAP;
34245 +#endif
34246 +
34247 + return pax_flags;
34248 +}
34249 +#endif
34250 +
34251 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34252 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34253 +{
34254 + unsigned long pax_flags = 0UL;
34255 +
34256 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34257 + unsigned long i;
34258 + int found_flags = 0;
34259 +#endif
34260 +
34261 +#ifdef CONFIG_PAX_EI_PAX
34262 + pax_flags = pax_parse_ei_pax(elf_ex);
34263 +#endif
34264 +
34265 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34266 + for (i = 0UL; i < elf_ex->e_phnum; i++)
34267 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34268 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34269 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34270 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34271 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34272 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34273 + return -EINVAL;
34274 +
34275 +#ifdef CONFIG_PAX_SOFTMODE
34276 + if (pax_softmode)
34277 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
34278 + else
34279 +#endif
34280 +
34281 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34282 + found_flags = 1;
34283 + break;
34284 + }
34285 +#endif
34286 +
34287 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34288 + if (found_flags == 0) {
34289 + struct elf_phdr phdr;
34290 + memset(&phdr, 0, sizeof(phdr));
34291 + phdr.p_flags = PF_NOEMUTRAMP;
34292 +#ifdef CONFIG_PAX_SOFTMODE
34293 + if (pax_softmode)
34294 + pax_flags = pax_parse_softmode(&phdr);
34295 + else
34296 +#endif
34297 + pax_flags = pax_parse_hardmode(&phdr);
34298 + }
34299 +#endif
34300 +
34301 + if (0 > pax_check_flags(&pax_flags))
34302 + return -EINVAL;
34303 +
34304 + current->mm->pax_flags = pax_flags;
34305 + return 0;
34306 +}
34307 +#endif
34308 +
34309 /*
34310 * These are the functions used to load ELF style executables and shared
34311 * libraries. There is no binary dependent code anywhere else.
34312 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34313 {
34314 unsigned int random_variable = 0;
34315
34316 +#ifdef CONFIG_PAX_RANDUSTACK
34317 + if (randomize_va_space)
34318 + return stack_top - current->mm->delta_stack;
34319 +#endif
34320 +
34321 if ((current->flags & PF_RANDOMIZE) &&
34322 !(current->personality & ADDR_NO_RANDOMIZE)) {
34323 random_variable = get_random_int() & STACK_RND_MASK;
34324 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34325 unsigned long load_addr = 0, load_bias = 0;
34326 int load_addr_set = 0;
34327 char * elf_interpreter = NULL;
34328 - unsigned long error;
34329 + unsigned long error = 0;
34330 struct elf_phdr *elf_ppnt, *elf_phdata;
34331 unsigned long elf_bss, elf_brk;
34332 int retval, i;
34333 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34334 unsigned long start_code, end_code, start_data, end_data;
34335 unsigned long reloc_func_desc __maybe_unused = 0;
34336 int executable_stack = EXSTACK_DEFAULT;
34337 - unsigned long def_flags = 0;
34338 struct {
34339 struct elfhdr elf_ex;
34340 struct elfhdr interp_elf_ex;
34341 } *loc;
34342 + unsigned long pax_task_size = TASK_SIZE;
34343
34344 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34345 if (!loc) {
34346 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34347
34348 /* OK, This is the point of no return */
34349 current->flags &= ~PF_FORKNOEXEC;
34350 - current->mm->def_flags = def_flags;
34351 +
34352 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34353 + current->mm->pax_flags = 0UL;
34354 +#endif
34355 +
34356 +#ifdef CONFIG_PAX_DLRESOLVE
34357 + current->mm->call_dl_resolve = 0UL;
34358 +#endif
34359 +
34360 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34361 + current->mm->call_syscall = 0UL;
34362 +#endif
34363 +
34364 +#ifdef CONFIG_PAX_ASLR
34365 + current->mm->delta_mmap = 0UL;
34366 + current->mm->delta_stack = 0UL;
34367 +#endif
34368 +
34369 + current->mm->def_flags = 0;
34370 +
34371 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34372 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34373 + send_sig(SIGKILL, current, 0);
34374 + goto out_free_dentry;
34375 + }
34376 +#endif
34377 +
34378 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34379 + pax_set_initial_flags(bprm);
34380 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34381 + if (pax_set_initial_flags_func)
34382 + (pax_set_initial_flags_func)(bprm);
34383 +#endif
34384 +
34385 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34386 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34387 + current->mm->context.user_cs_limit = PAGE_SIZE;
34388 + current->mm->def_flags |= VM_PAGEEXEC;
34389 + }
34390 +#endif
34391 +
34392 +#ifdef CONFIG_PAX_SEGMEXEC
34393 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34394 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34395 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34396 + pax_task_size = SEGMEXEC_TASK_SIZE;
34397 + current->mm->def_flags |= VM_NOHUGEPAGE;
34398 + }
34399 +#endif
34400 +
34401 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34402 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34403 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34404 + put_cpu();
34405 + }
34406 +#endif
34407
34408 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34409 may depend on the personality. */
34410 SET_PERSONALITY(loc->elf_ex);
34411 +
34412 +#ifdef CONFIG_PAX_ASLR
34413 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34414 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34415 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34416 + }
34417 +#endif
34418 +
34419 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34420 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34421 + executable_stack = EXSTACK_DISABLE_X;
34422 + current->personality &= ~READ_IMPLIES_EXEC;
34423 + } else
34424 +#endif
34425 +
34426 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34427 current->personality |= READ_IMPLIES_EXEC;
34428
34429 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34430 #else
34431 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34432 #endif
34433 +
34434 +#ifdef CONFIG_PAX_RANDMMAP
34435 + /* PaX: randomize base address at the default exe base if requested */
34436 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34437 +#ifdef CONFIG_SPARC64
34438 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34439 +#else
34440 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34441 +#endif
34442 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34443 + elf_flags |= MAP_FIXED;
34444 + }
34445 +#endif
34446 +
34447 }
34448
34449 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34450 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34451 * allowed task size. Note that p_filesz must always be
34452 * <= p_memsz so it is only necessary to check p_memsz.
34453 */
34454 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34455 - elf_ppnt->p_memsz > TASK_SIZE ||
34456 - TASK_SIZE - elf_ppnt->p_memsz < k) {
34457 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34458 + elf_ppnt->p_memsz > pax_task_size ||
34459 + pax_task_size - elf_ppnt->p_memsz < k) {
34460 /* set_brk can never work. Avoid overflows. */
34461 send_sig(SIGKILL, current, 0);
34462 retval = -EINVAL;
34463 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34464 start_data += load_bias;
34465 end_data += load_bias;
34466
34467 +#ifdef CONFIG_PAX_RANDMMAP
34468 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34469 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34470 +#endif
34471 +
34472 /* Calling set_brk effectively mmaps the pages that we need
34473 * for the bss and break sections. We must do this before
34474 * mapping in the interpreter, to make sure it doesn't wind
34475 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34476 goto out_free_dentry;
34477 }
34478 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34479 - send_sig(SIGSEGV, current, 0);
34480 - retval = -EFAULT; /* Nobody gets to see this, but.. */
34481 - goto out_free_dentry;
34482 + /*
34483 + * This bss-zeroing can fail if the ELF
34484 + * file specifies odd protections. So
34485 + * we don't check the return value
34486 + */
34487 }
34488
34489 if (elf_interpreter) {
34490 @@ -1090,7 +1398,7 @@ out:
34491 * Decide what to dump of a segment, part, all or none.
34492 */
34493 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34494 - unsigned long mm_flags)
34495 + unsigned long mm_flags, long signr)
34496 {
34497 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34498
34499 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34500 if (vma->vm_file == NULL)
34501 return 0;
34502
34503 - if (FILTER(MAPPED_PRIVATE))
34504 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34505 goto whole;
34506
34507 /*
34508 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34509 {
34510 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34511 int i = 0;
34512 - do
34513 + do {
34514 i += 2;
34515 - while (auxv[i - 2] != AT_NULL);
34516 + } while (auxv[i - 2] != AT_NULL);
34517 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34518 }
34519
34520 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34521 }
34522
34523 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34524 - unsigned long mm_flags)
34525 + struct coredump_params *cprm)
34526 {
34527 struct vm_area_struct *vma;
34528 size_t size = 0;
34529
34530 for (vma = first_vma(current, gate_vma); vma != NULL;
34531 vma = next_vma(vma, gate_vma))
34532 - size += vma_dump_size(vma, mm_flags);
34533 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34534 return size;
34535 }
34536
34537 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34538
34539 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34540
34541 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34542 + offset += elf_core_vma_data_size(gate_vma, cprm);
34543 offset += elf_core_extra_data_size();
34544 e_shoff = offset;
34545
34546 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34547 offset = dataoff;
34548
34549 size += sizeof(*elf);
34550 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34551 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34552 goto end_coredump;
34553
34554 size += sizeof(*phdr4note);
34555 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34556 if (size > cprm->limit
34557 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34558 goto end_coredump;
34559 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34560 phdr.p_offset = offset;
34561 phdr.p_vaddr = vma->vm_start;
34562 phdr.p_paddr = 0;
34563 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34564 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34565 phdr.p_memsz = vma->vm_end - vma->vm_start;
34566 offset += phdr.p_filesz;
34567 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34568 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34569 phdr.p_align = ELF_EXEC_PAGESIZE;
34570
34571 size += sizeof(phdr);
34572 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34573 if (size > cprm->limit
34574 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34575 goto end_coredump;
34576 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34577 unsigned long addr;
34578 unsigned long end;
34579
34580 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34581 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34582
34583 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34584 struct page *page;
34585 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34586 page = get_dump_page(addr);
34587 if (page) {
34588 void *kaddr = kmap(page);
34589 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34590 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34591 !dump_write(cprm->file, kaddr,
34592 PAGE_SIZE);
34593 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34594
34595 if (e_phnum == PN_XNUM) {
34596 size += sizeof(*shdr4extnum);
34597 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34598 if (size > cprm->limit
34599 || !dump_write(cprm->file, shdr4extnum,
34600 sizeof(*shdr4extnum)))
34601 @@ -2067,6 +2380,97 @@ out:
34602
34603 #endif /* CONFIG_ELF_CORE */
34604
34605 +#ifdef CONFIG_PAX_MPROTECT
34606 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
34607 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34608 + * we'll remove VM_MAYWRITE for good on RELRO segments.
34609 + *
34610 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34611 + * basis because we want to allow the common case and not the special ones.
34612 + */
34613 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34614 +{
34615 + struct elfhdr elf_h;
34616 + struct elf_phdr elf_p;
34617 + unsigned long i;
34618 + unsigned long oldflags;
34619 + bool is_textrel_rw, is_textrel_rx, is_relro;
34620 +
34621 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34622 + return;
34623 +
34624 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34625 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34626 +
34627 +#ifdef CONFIG_PAX_ELFRELOCS
34628 + /* possible TEXTREL */
34629 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34630 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34631 +#else
34632 + is_textrel_rw = false;
34633 + is_textrel_rx = false;
34634 +#endif
34635 +
34636 + /* possible RELRO */
34637 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34638 +
34639 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34640 + return;
34641 +
34642 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34643 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34644 +
34645 +#ifdef CONFIG_PAX_ETEXECRELOCS
34646 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34647 +#else
34648 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34649 +#endif
34650 +
34651 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34652 + !elf_check_arch(&elf_h) ||
34653 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34654 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34655 + return;
34656 +
34657 + for (i = 0UL; i < elf_h.e_phnum; i++) {
34658 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34659 + return;
34660 + switch (elf_p.p_type) {
34661 + case PT_DYNAMIC:
34662 + if (!is_textrel_rw && !is_textrel_rx)
34663 + continue;
34664 + i = 0UL;
34665 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34666 + elf_dyn dyn;
34667 +
34668 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34669 + return;
34670 + if (dyn.d_tag == DT_NULL)
34671 + return;
34672 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34673 + gr_log_textrel(vma);
34674 + if (is_textrel_rw)
34675 + vma->vm_flags |= VM_MAYWRITE;
34676 + else
34677 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34678 + vma->vm_flags &= ~VM_MAYWRITE;
34679 + return;
34680 + }
34681 + i++;
34682 + }
34683 + return;
34684 +
34685 + case PT_GNU_RELRO:
34686 + if (!is_relro)
34687 + continue;
34688 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34689 + vma->vm_flags &= ~VM_MAYWRITE;
34690 + return;
34691 + }
34692 + }
34693 +}
34694 +#endif
34695 +
34696 static int __init init_elf_binfmt(void)
34697 {
34698 return register_binfmt(&elf_format);
34699 diff -urNp linux-3.0.3/fs/binfmt_flat.c linux-3.0.3/fs/binfmt_flat.c
34700 --- linux-3.0.3/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
34701 +++ linux-3.0.3/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
34702 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34703 realdatastart = (unsigned long) -ENOMEM;
34704 printk("Unable to allocate RAM for process data, errno %d\n",
34705 (int)-realdatastart);
34706 + down_write(&current->mm->mmap_sem);
34707 do_munmap(current->mm, textpos, text_len);
34708 + up_write(&current->mm->mmap_sem);
34709 ret = realdatastart;
34710 goto err;
34711 }
34712 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34713 }
34714 if (IS_ERR_VALUE(result)) {
34715 printk("Unable to read data+bss, errno %d\n", (int)-result);
34716 + down_write(&current->mm->mmap_sem);
34717 do_munmap(current->mm, textpos, text_len);
34718 do_munmap(current->mm, realdatastart, len);
34719 + up_write(&current->mm->mmap_sem);
34720 ret = result;
34721 goto err;
34722 }
34723 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34724 }
34725 if (IS_ERR_VALUE(result)) {
34726 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34727 + down_write(&current->mm->mmap_sem);
34728 do_munmap(current->mm, textpos, text_len + data_len + extra +
34729 MAX_SHARED_LIBS * sizeof(unsigned long));
34730 + up_write(&current->mm->mmap_sem);
34731 ret = result;
34732 goto err;
34733 }
34734 diff -urNp linux-3.0.3/fs/bio.c linux-3.0.3/fs/bio.c
34735 --- linux-3.0.3/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
34736 +++ linux-3.0.3/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
34737 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34738 const int read = bio_data_dir(bio) == READ;
34739 struct bio_map_data *bmd = bio->bi_private;
34740 int i;
34741 - char *p = bmd->sgvecs[0].iov_base;
34742 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
34743
34744 __bio_for_each_segment(bvec, bio, i, 0) {
34745 char *addr = page_address(bvec->bv_page);
34746 diff -urNp linux-3.0.3/fs/block_dev.c linux-3.0.3/fs/block_dev.c
34747 --- linux-3.0.3/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
34748 +++ linux-3.0.3/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
34749 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34750 else if (bdev->bd_contains == bdev)
34751 return true; /* is a whole device which isn't held */
34752
34753 - else if (whole->bd_holder == bd_may_claim)
34754 + else if (whole->bd_holder == (void *)bd_may_claim)
34755 return true; /* is a partition of a device that is being partitioned */
34756 else if (whole->bd_holder != NULL)
34757 return false; /* is a partition of a held device */
34758 diff -urNp linux-3.0.3/fs/btrfs/ctree.c linux-3.0.3/fs/btrfs/ctree.c
34759 --- linux-3.0.3/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
34760 +++ linux-3.0.3/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
34761 @@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
34762 free_extent_buffer(buf);
34763 add_root_to_dirty_list(root);
34764 } else {
34765 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34766 - parent_start = parent->start;
34767 - else
34768 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34769 + if (parent)
34770 + parent_start = parent->start;
34771 + else
34772 + parent_start = 0;
34773 + } else
34774 parent_start = 0;
34775
34776 WARN_ON(trans->transid != btrfs_header_generation(parent));
34777 diff -urNp linux-3.0.3/fs/btrfs/inode.c linux-3.0.3/fs/btrfs/inode.c
34778 --- linux-3.0.3/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
34779 +++ linux-3.0.3/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
34780 @@ -6895,7 +6895,7 @@ fail:
34781 return -ENOMEM;
34782 }
34783
34784 -static int btrfs_getattr(struct vfsmount *mnt,
34785 +int btrfs_getattr(struct vfsmount *mnt,
34786 struct dentry *dentry, struct kstat *stat)
34787 {
34788 struct inode *inode = dentry->d_inode;
34789 @@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
34790 return 0;
34791 }
34792
34793 +EXPORT_SYMBOL(btrfs_getattr);
34794 +
34795 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
34796 +{
34797 + return BTRFS_I(inode)->root->anon_super.s_dev;
34798 +}
34799 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34800 +
34801 /*
34802 * If a file is moved, it will inherit the cow and compression flags of the new
34803 * directory.
34804 diff -urNp linux-3.0.3/fs/btrfs/ioctl.c linux-3.0.3/fs/btrfs/ioctl.c
34805 --- linux-3.0.3/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
34806 +++ linux-3.0.3/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
34807 @@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
34808 for (i = 0; i < num_types; i++) {
34809 struct btrfs_space_info *tmp;
34810
34811 + /* Don't copy in more than we allocated */
34812 if (!slot_count)
34813 break;
34814
34815 + slot_count--;
34816 +
34817 info = NULL;
34818 rcu_read_lock();
34819 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34820 @@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
34821 memcpy(dest, &space, sizeof(space));
34822 dest++;
34823 space_args.total_spaces++;
34824 - slot_count--;
34825 }
34826 - if (!slot_count)
34827 - break;
34828 }
34829 up_read(&info->groups_sem);
34830 }
34831 diff -urNp linux-3.0.3/fs/btrfs/relocation.c linux-3.0.3/fs/btrfs/relocation.c
34832 --- linux-3.0.3/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
34833 +++ linux-3.0.3/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
34834 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
34835 }
34836 spin_unlock(&rc->reloc_root_tree.lock);
34837
34838 - BUG_ON((struct btrfs_root *)node->data != root);
34839 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
34840
34841 if (!del) {
34842 spin_lock(&rc->reloc_root_tree.lock);
34843 diff -urNp linux-3.0.3/fs/cachefiles/bind.c linux-3.0.3/fs/cachefiles/bind.c
34844 --- linux-3.0.3/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
34845 +++ linux-3.0.3/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
34846 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
34847 args);
34848
34849 /* start by checking things over */
34850 - ASSERT(cache->fstop_percent >= 0 &&
34851 - cache->fstop_percent < cache->fcull_percent &&
34852 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
34853 cache->fcull_percent < cache->frun_percent &&
34854 cache->frun_percent < 100);
34855
34856 - ASSERT(cache->bstop_percent >= 0 &&
34857 - cache->bstop_percent < cache->bcull_percent &&
34858 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
34859 cache->bcull_percent < cache->brun_percent &&
34860 cache->brun_percent < 100);
34861
34862 diff -urNp linux-3.0.3/fs/cachefiles/daemon.c linux-3.0.3/fs/cachefiles/daemon.c
34863 --- linux-3.0.3/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
34864 +++ linux-3.0.3/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
34865 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
34866 if (n > buflen)
34867 return -EMSGSIZE;
34868
34869 - if (copy_to_user(_buffer, buffer, n) != 0)
34870 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
34871 return -EFAULT;
34872
34873 return n;
34874 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
34875 if (test_bit(CACHEFILES_DEAD, &cache->flags))
34876 return -EIO;
34877
34878 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
34879 + if (datalen > PAGE_SIZE - 1)
34880 return -EOPNOTSUPP;
34881
34882 /* drag the command string into the kernel so we can parse it */
34883 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
34884 if (args[0] != '%' || args[1] != '\0')
34885 return -EINVAL;
34886
34887 - if (fstop < 0 || fstop >= cache->fcull_percent)
34888 + if (fstop >= cache->fcull_percent)
34889 return cachefiles_daemon_range_error(cache, args);
34890
34891 cache->fstop_percent = fstop;
34892 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
34893 if (args[0] != '%' || args[1] != '\0')
34894 return -EINVAL;
34895
34896 - if (bstop < 0 || bstop >= cache->bcull_percent)
34897 + if (bstop >= cache->bcull_percent)
34898 return cachefiles_daemon_range_error(cache, args);
34899
34900 cache->bstop_percent = bstop;
34901 diff -urNp linux-3.0.3/fs/cachefiles/internal.h linux-3.0.3/fs/cachefiles/internal.h
34902 --- linux-3.0.3/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
34903 +++ linux-3.0.3/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
34904 @@ -57,7 +57,7 @@ struct cachefiles_cache {
34905 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
34906 struct rb_root active_nodes; /* active nodes (can't be culled) */
34907 rwlock_t active_lock; /* lock for active_nodes */
34908 - atomic_t gravecounter; /* graveyard uniquifier */
34909 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
34910 unsigned frun_percent; /* when to stop culling (% files) */
34911 unsigned fcull_percent; /* when to start culling (% files) */
34912 unsigned fstop_percent; /* when to stop allocating (% files) */
34913 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
34914 * proc.c
34915 */
34916 #ifdef CONFIG_CACHEFILES_HISTOGRAM
34917 -extern atomic_t cachefiles_lookup_histogram[HZ];
34918 -extern atomic_t cachefiles_mkdir_histogram[HZ];
34919 -extern atomic_t cachefiles_create_histogram[HZ];
34920 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34921 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34922 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
34923
34924 extern int __init cachefiles_proc_init(void);
34925 extern void cachefiles_proc_cleanup(void);
34926 static inline
34927 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
34928 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
34929 {
34930 unsigned long jif = jiffies - start_jif;
34931 if (jif >= HZ)
34932 jif = HZ - 1;
34933 - atomic_inc(&histogram[jif]);
34934 + atomic_inc_unchecked(&histogram[jif]);
34935 }
34936
34937 #else
34938 diff -urNp linux-3.0.3/fs/cachefiles/namei.c linux-3.0.3/fs/cachefiles/namei.c
34939 --- linux-3.0.3/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
34940 +++ linux-3.0.3/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
34941 @@ -318,7 +318,7 @@ try_again:
34942 /* first step is to make up a grave dentry in the graveyard */
34943 sprintf(nbuffer, "%08x%08x",
34944 (uint32_t) get_seconds(),
34945 - (uint32_t) atomic_inc_return(&cache->gravecounter));
34946 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
34947
34948 /* do the multiway lock magic */
34949 trap = lock_rename(cache->graveyard, dir);
34950 diff -urNp linux-3.0.3/fs/cachefiles/proc.c linux-3.0.3/fs/cachefiles/proc.c
34951 --- linux-3.0.3/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
34952 +++ linux-3.0.3/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
34953 @@ -14,9 +14,9 @@
34954 #include <linux/seq_file.h>
34955 #include "internal.h"
34956
34957 -atomic_t cachefiles_lookup_histogram[HZ];
34958 -atomic_t cachefiles_mkdir_histogram[HZ];
34959 -atomic_t cachefiles_create_histogram[HZ];
34960 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
34961 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
34962 +atomic_unchecked_t cachefiles_create_histogram[HZ];
34963
34964 /*
34965 * display the latency histogram
34966 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
34967 return 0;
34968 default:
34969 index = (unsigned long) v - 3;
34970 - x = atomic_read(&cachefiles_lookup_histogram[index]);
34971 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
34972 - z = atomic_read(&cachefiles_create_histogram[index]);
34973 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
34974 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
34975 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
34976 if (x == 0 && y == 0 && z == 0)
34977 return 0;
34978
34979 diff -urNp linux-3.0.3/fs/cachefiles/rdwr.c linux-3.0.3/fs/cachefiles/rdwr.c
34980 --- linux-3.0.3/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
34981 +++ linux-3.0.3/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
34982 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
34983 old_fs = get_fs();
34984 set_fs(KERNEL_DS);
34985 ret = file->f_op->write(
34986 - file, (const void __user *) data, len, &pos);
34987 + file, (__force const void __user *) data, len, &pos);
34988 set_fs(old_fs);
34989 kunmap(page);
34990 if (ret != len)
34991 diff -urNp linux-3.0.3/fs/ceph/dir.c linux-3.0.3/fs/ceph/dir.c
34992 --- linux-3.0.3/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
34993 +++ linux-3.0.3/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
34994 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
34995 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
34996 struct ceph_mds_client *mdsc = fsc->mdsc;
34997 unsigned frag = fpos_frag(filp->f_pos);
34998 - int off = fpos_off(filp->f_pos);
34999 + unsigned int off = fpos_off(filp->f_pos);
35000 int err;
35001 u32 ftype;
35002 struct ceph_mds_reply_info_parsed *rinfo;
35003 diff -urNp linux-3.0.3/fs/cifs/cifs_debug.c linux-3.0.3/fs/cifs/cifs_debug.c
35004 --- linux-3.0.3/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
35005 +++ linux-3.0.3/fs/cifs/cifs_debug.c 2011-08-23 21:47:56.000000000 -0400
35006 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35007 tcon = list_entry(tmp3,
35008 struct cifs_tcon,
35009 tcon_list);
35010 - atomic_set(&tcon->num_smbs_sent, 0);
35011 - atomic_set(&tcon->num_writes, 0);
35012 - atomic_set(&tcon->num_reads, 0);
35013 - atomic_set(&tcon->num_oplock_brks, 0);
35014 - atomic_set(&tcon->num_opens, 0);
35015 - atomic_set(&tcon->num_posixopens, 0);
35016 - atomic_set(&tcon->num_posixmkdirs, 0);
35017 - atomic_set(&tcon->num_closes, 0);
35018 - atomic_set(&tcon->num_deletes, 0);
35019 - atomic_set(&tcon->num_mkdirs, 0);
35020 - atomic_set(&tcon->num_rmdirs, 0);
35021 - atomic_set(&tcon->num_renames, 0);
35022 - atomic_set(&tcon->num_t2renames, 0);
35023 - atomic_set(&tcon->num_ffirst, 0);
35024 - atomic_set(&tcon->num_fnext, 0);
35025 - atomic_set(&tcon->num_fclose, 0);
35026 - atomic_set(&tcon->num_hardlinks, 0);
35027 - atomic_set(&tcon->num_symlinks, 0);
35028 - atomic_set(&tcon->num_locks, 0);
35029 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35030 + atomic_set_unchecked(&tcon->num_writes, 0);
35031 + atomic_set_unchecked(&tcon->num_reads, 0);
35032 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35033 + atomic_set_unchecked(&tcon->num_opens, 0);
35034 + atomic_set_unchecked(&tcon->num_posixopens, 0);
35035 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35036 + atomic_set_unchecked(&tcon->num_closes, 0);
35037 + atomic_set_unchecked(&tcon->num_deletes, 0);
35038 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
35039 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
35040 + atomic_set_unchecked(&tcon->num_renames, 0);
35041 + atomic_set_unchecked(&tcon->num_t2renames, 0);
35042 + atomic_set_unchecked(&tcon->num_ffirst, 0);
35043 + atomic_set_unchecked(&tcon->num_fnext, 0);
35044 + atomic_set_unchecked(&tcon->num_fclose, 0);
35045 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
35046 + atomic_set_unchecked(&tcon->num_symlinks, 0);
35047 + atomic_set_unchecked(&tcon->num_locks, 0);
35048 }
35049 }
35050 }
35051 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35052 if (tcon->need_reconnect)
35053 seq_puts(m, "\tDISCONNECTED ");
35054 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35055 - atomic_read(&tcon->num_smbs_sent),
35056 - atomic_read(&tcon->num_oplock_brks));
35057 + atomic_read_unchecked(&tcon->num_smbs_sent),
35058 + atomic_read_unchecked(&tcon->num_oplock_brks));
35059 seq_printf(m, "\nReads: %d Bytes: %lld",
35060 - atomic_read(&tcon->num_reads),
35061 + atomic_read_unchecked(&tcon->num_reads),
35062 (long long)(tcon->bytes_read));
35063 seq_printf(m, "\nWrites: %d Bytes: %lld",
35064 - atomic_read(&tcon->num_writes),
35065 + atomic_read_unchecked(&tcon->num_writes),
35066 (long long)(tcon->bytes_written));
35067 seq_printf(m, "\nFlushes: %d",
35068 - atomic_read(&tcon->num_flushes));
35069 + atomic_read_unchecked(&tcon->num_flushes));
35070 seq_printf(m, "\nLocks: %d HardLinks: %d "
35071 "Symlinks: %d",
35072 - atomic_read(&tcon->num_locks),
35073 - atomic_read(&tcon->num_hardlinks),
35074 - atomic_read(&tcon->num_symlinks));
35075 + atomic_read_unchecked(&tcon->num_locks),
35076 + atomic_read_unchecked(&tcon->num_hardlinks),
35077 + atomic_read_unchecked(&tcon->num_symlinks));
35078 seq_printf(m, "\nOpens: %d Closes: %d "
35079 "Deletes: %d",
35080 - atomic_read(&tcon->num_opens),
35081 - atomic_read(&tcon->num_closes),
35082 - atomic_read(&tcon->num_deletes));
35083 + atomic_read_unchecked(&tcon->num_opens),
35084 + atomic_read_unchecked(&tcon->num_closes),
35085 + atomic_read_unchecked(&tcon->num_deletes));
35086 seq_printf(m, "\nPosix Opens: %d "
35087 "Posix Mkdirs: %d",
35088 - atomic_read(&tcon->num_posixopens),
35089 - atomic_read(&tcon->num_posixmkdirs));
35090 + atomic_read_unchecked(&tcon->num_posixopens),
35091 + atomic_read_unchecked(&tcon->num_posixmkdirs));
35092 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35093 - atomic_read(&tcon->num_mkdirs),
35094 - atomic_read(&tcon->num_rmdirs));
35095 + atomic_read_unchecked(&tcon->num_mkdirs),
35096 + atomic_read_unchecked(&tcon->num_rmdirs));
35097 seq_printf(m, "\nRenames: %d T2 Renames %d",
35098 - atomic_read(&tcon->num_renames),
35099 - atomic_read(&tcon->num_t2renames));
35100 + atomic_read_unchecked(&tcon->num_renames),
35101 + atomic_read_unchecked(&tcon->num_t2renames));
35102 seq_printf(m, "\nFindFirst: %d FNext %d "
35103 "FClose %d",
35104 - atomic_read(&tcon->num_ffirst),
35105 - atomic_read(&tcon->num_fnext),
35106 - atomic_read(&tcon->num_fclose));
35107 + atomic_read_unchecked(&tcon->num_ffirst),
35108 + atomic_read_unchecked(&tcon->num_fnext),
35109 + atomic_read_unchecked(&tcon->num_fclose));
35110 }
35111 }
35112 }
35113 diff -urNp linux-3.0.3/fs/cifs/cifsglob.h linux-3.0.3/fs/cifs/cifsglob.h
35114 --- linux-3.0.3/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
35115 +++ linux-3.0.3/fs/cifs/cifsglob.h 2011-08-23 21:47:56.000000000 -0400
35116 @@ -381,28 +381,28 @@ struct cifs_tcon {
35117 __u16 Flags; /* optional support bits */
35118 enum statusEnum tidStatus;
35119 #ifdef CONFIG_CIFS_STATS
35120 - atomic_t num_smbs_sent;
35121 - atomic_t num_writes;
35122 - atomic_t num_reads;
35123 - atomic_t num_flushes;
35124 - atomic_t num_oplock_brks;
35125 - atomic_t num_opens;
35126 - atomic_t num_closes;
35127 - atomic_t num_deletes;
35128 - atomic_t num_mkdirs;
35129 - atomic_t num_posixopens;
35130 - atomic_t num_posixmkdirs;
35131 - atomic_t num_rmdirs;
35132 - atomic_t num_renames;
35133 - atomic_t num_t2renames;
35134 - atomic_t num_ffirst;
35135 - atomic_t num_fnext;
35136 - atomic_t num_fclose;
35137 - atomic_t num_hardlinks;
35138 - atomic_t num_symlinks;
35139 - atomic_t num_locks;
35140 - atomic_t num_acl_get;
35141 - atomic_t num_acl_set;
35142 + atomic_unchecked_t num_smbs_sent;
35143 + atomic_unchecked_t num_writes;
35144 + atomic_unchecked_t num_reads;
35145 + atomic_unchecked_t num_flushes;
35146 + atomic_unchecked_t num_oplock_brks;
35147 + atomic_unchecked_t num_opens;
35148 + atomic_unchecked_t num_closes;
35149 + atomic_unchecked_t num_deletes;
35150 + atomic_unchecked_t num_mkdirs;
35151 + atomic_unchecked_t num_posixopens;
35152 + atomic_unchecked_t num_posixmkdirs;
35153 + atomic_unchecked_t num_rmdirs;
35154 + atomic_unchecked_t num_renames;
35155 + atomic_unchecked_t num_t2renames;
35156 + atomic_unchecked_t num_ffirst;
35157 + atomic_unchecked_t num_fnext;
35158 + atomic_unchecked_t num_fclose;
35159 + atomic_unchecked_t num_hardlinks;
35160 + atomic_unchecked_t num_symlinks;
35161 + atomic_unchecked_t num_locks;
35162 + atomic_unchecked_t num_acl_get;
35163 + atomic_unchecked_t num_acl_set;
35164 #ifdef CONFIG_CIFS_STATS2
35165 unsigned long long time_writes;
35166 unsigned long long time_reads;
35167 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
35168 }
35169
35170 #ifdef CONFIG_CIFS_STATS
35171 -#define cifs_stats_inc atomic_inc
35172 +#define cifs_stats_inc atomic_inc_unchecked
35173
35174 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
35175 unsigned int bytes)
35176 diff -urNp linux-3.0.3/fs/cifs/link.c linux-3.0.3/fs/cifs/link.c
35177 --- linux-3.0.3/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
35178 +++ linux-3.0.3/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
35179 @@ -587,7 +587,7 @@ symlink_exit:
35180
35181 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35182 {
35183 - char *p = nd_get_link(nd);
35184 + const char *p = nd_get_link(nd);
35185 if (!IS_ERR(p))
35186 kfree(p);
35187 }
35188 diff -urNp linux-3.0.3/fs/coda/cache.c linux-3.0.3/fs/coda/cache.c
35189 --- linux-3.0.3/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
35190 +++ linux-3.0.3/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
35191 @@ -24,7 +24,7 @@
35192 #include "coda_linux.h"
35193 #include "coda_cache.h"
35194
35195 -static atomic_t permission_epoch = ATOMIC_INIT(0);
35196 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35197
35198 /* replace or extend an acl cache hit */
35199 void coda_cache_enter(struct inode *inode, int mask)
35200 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35201 struct coda_inode_info *cii = ITOC(inode);
35202
35203 spin_lock(&cii->c_lock);
35204 - cii->c_cached_epoch = atomic_read(&permission_epoch);
35205 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35206 if (cii->c_uid != current_fsuid()) {
35207 cii->c_uid = current_fsuid();
35208 cii->c_cached_perm = mask;
35209 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35210 {
35211 struct coda_inode_info *cii = ITOC(inode);
35212 spin_lock(&cii->c_lock);
35213 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35214 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35215 spin_unlock(&cii->c_lock);
35216 }
35217
35218 /* remove all acl caches */
35219 void coda_cache_clear_all(struct super_block *sb)
35220 {
35221 - atomic_inc(&permission_epoch);
35222 + atomic_inc_unchecked(&permission_epoch);
35223 }
35224
35225
35226 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35227 spin_lock(&cii->c_lock);
35228 hit = (mask & cii->c_cached_perm) == mask &&
35229 cii->c_uid == current_fsuid() &&
35230 - cii->c_cached_epoch == atomic_read(&permission_epoch);
35231 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35232 spin_unlock(&cii->c_lock);
35233
35234 return hit;
35235 diff -urNp linux-3.0.3/fs/compat_binfmt_elf.c linux-3.0.3/fs/compat_binfmt_elf.c
35236 --- linux-3.0.3/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
35237 +++ linux-3.0.3/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
35238 @@ -30,11 +30,13 @@
35239 #undef elf_phdr
35240 #undef elf_shdr
35241 #undef elf_note
35242 +#undef elf_dyn
35243 #undef elf_addr_t
35244 #define elfhdr elf32_hdr
35245 #define elf_phdr elf32_phdr
35246 #define elf_shdr elf32_shdr
35247 #define elf_note elf32_note
35248 +#define elf_dyn Elf32_Dyn
35249 #define elf_addr_t Elf32_Addr
35250
35251 /*
35252 diff -urNp linux-3.0.3/fs/compat.c linux-3.0.3/fs/compat.c
35253 --- linux-3.0.3/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
35254 +++ linux-3.0.3/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
35255 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35256 goto out;
35257
35258 ret = -EINVAL;
35259 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35260 + if (nr_segs > UIO_MAXIOV)
35261 goto out;
35262 if (nr_segs > fast_segs) {
35263 ret = -ENOMEM;
35264 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35265
35266 struct compat_readdir_callback {
35267 struct compat_old_linux_dirent __user *dirent;
35268 + struct file * file;
35269 int result;
35270 };
35271
35272 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35273 buf->result = -EOVERFLOW;
35274 return -EOVERFLOW;
35275 }
35276 +
35277 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35278 + return 0;
35279 +
35280 buf->result++;
35281 dirent = buf->dirent;
35282 if (!access_ok(VERIFY_WRITE, dirent,
35283 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35284
35285 buf.result = 0;
35286 buf.dirent = dirent;
35287 + buf.file = file;
35288
35289 error = vfs_readdir(file, compat_fillonedir, &buf);
35290 if (buf.result)
35291 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
35292 struct compat_getdents_callback {
35293 struct compat_linux_dirent __user *current_dir;
35294 struct compat_linux_dirent __user *previous;
35295 + struct file * file;
35296 int count;
35297 int error;
35298 };
35299 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35300 buf->error = -EOVERFLOW;
35301 return -EOVERFLOW;
35302 }
35303 +
35304 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35305 + return 0;
35306 +
35307 dirent = buf->previous;
35308 if (dirent) {
35309 if (__put_user(offset, &dirent->d_off))
35310 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35311 buf.previous = NULL;
35312 buf.count = count;
35313 buf.error = 0;
35314 + buf.file = file;
35315
35316 error = vfs_readdir(file, compat_filldir, &buf);
35317 if (error >= 0)
35318 @@ -1006,6 +1018,7 @@ out:
35319 struct compat_getdents_callback64 {
35320 struct linux_dirent64 __user *current_dir;
35321 struct linux_dirent64 __user *previous;
35322 + struct file * file;
35323 int count;
35324 int error;
35325 };
35326 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35327 buf->error = -EINVAL; /* only used if we fail.. */
35328 if (reclen > buf->count)
35329 return -EINVAL;
35330 +
35331 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35332 + return 0;
35333 +
35334 dirent = buf->previous;
35335
35336 if (dirent) {
35337 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35338 buf.previous = NULL;
35339 buf.count = count;
35340 buf.error = 0;
35341 + buf.file = file;
35342
35343 error = vfs_readdir(file, compat_filldir64, &buf);
35344 if (error >= 0)
35345 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
35346 struct fdtable *fdt;
35347 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35348
35349 + pax_track_stack();
35350 +
35351 if (n < 0)
35352 goto out_nofds;
35353
35354 diff -urNp linux-3.0.3/fs/compat_ioctl.c linux-3.0.3/fs/compat_ioctl.c
35355 --- linux-3.0.3/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
35356 +++ linux-3.0.3/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
35357 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35358
35359 err = get_user(palp, &up->palette);
35360 err |= get_user(length, &up->length);
35361 + if (err)
35362 + return -EFAULT;
35363
35364 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35365 err = put_user(compat_ptr(palp), &up_native->palette);
35366 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35367 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35368 {
35369 unsigned int a, b;
35370 - a = *(unsigned int *)p;
35371 - b = *(unsigned int *)q;
35372 + a = *(const unsigned int *)p;
35373 + b = *(const unsigned int *)q;
35374 if (a > b)
35375 return 1;
35376 if (a < b)
35377 diff -urNp linux-3.0.3/fs/configfs/dir.c linux-3.0.3/fs/configfs/dir.c
35378 --- linux-3.0.3/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
35379 +++ linux-3.0.3/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
35380 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35381 }
35382 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35383 struct configfs_dirent *next;
35384 - const char * name;
35385 + const unsigned char * name;
35386 + char d_name[sizeof(next->s_dentry->d_iname)];
35387 int len;
35388 struct inode *inode = NULL;
35389
35390 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35391 continue;
35392
35393 name = configfs_get_name(next);
35394 - len = strlen(name);
35395 + if (next->s_dentry && name == next->s_dentry->d_iname) {
35396 + len = next->s_dentry->d_name.len;
35397 + memcpy(d_name, name, len);
35398 + name = d_name;
35399 + } else
35400 + len = strlen(name);
35401
35402 /*
35403 * We'll have a dentry and an inode for
35404 diff -urNp linux-3.0.3/fs/dcache.c linux-3.0.3/fs/dcache.c
35405 --- linux-3.0.3/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
35406 +++ linux-3.0.3/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
35407 @@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
35408 mempages -= reserve;
35409
35410 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35411 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35412 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35413
35414 dcache_init();
35415 inode_init();
35416 diff -urNp linux-3.0.3/fs/ecryptfs/inode.c linux-3.0.3/fs/ecryptfs/inode.c
35417 --- linux-3.0.3/fs/ecryptfs/inode.c 2011-08-23 21:44:40.000000000 -0400
35418 +++ linux-3.0.3/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
35419 @@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
35420 old_fs = get_fs();
35421 set_fs(get_ds());
35422 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35423 - (char __user *)lower_buf,
35424 + (__force char __user *)lower_buf,
35425 lower_bufsiz);
35426 set_fs(old_fs);
35427 if (rc < 0)
35428 @@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
35429 }
35430 old_fs = get_fs();
35431 set_fs(get_ds());
35432 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35433 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35434 set_fs(old_fs);
35435 if (rc < 0) {
35436 kfree(buf);
35437 @@ -765,7 +765,7 @@ out:
35438 static void
35439 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35440 {
35441 - char *buf = nd_get_link(nd);
35442 + const char *buf = nd_get_link(nd);
35443 if (!IS_ERR(buf)) {
35444 /* Free the char* */
35445 kfree(buf);
35446 diff -urNp linux-3.0.3/fs/ecryptfs/miscdev.c linux-3.0.3/fs/ecryptfs/miscdev.c
35447 --- linux-3.0.3/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
35448 +++ linux-3.0.3/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
35449 @@ -328,7 +328,7 @@ check_list:
35450 goto out_unlock_msg_ctx;
35451 i = 5;
35452 if (msg_ctx->msg) {
35453 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
35454 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35455 goto out_unlock_msg_ctx;
35456 i += packet_length_size;
35457 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35458 diff -urNp linux-3.0.3/fs/exec.c linux-3.0.3/fs/exec.c
35459 --- linux-3.0.3/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
35460 +++ linux-3.0.3/fs/exec.c 2011-08-23 21:48:14.000000000 -0400
35461 @@ -55,12 +55,24 @@
35462 #include <linux/pipe_fs_i.h>
35463 #include <linux/oom.h>
35464 #include <linux/compat.h>
35465 +#include <linux/random.h>
35466 +#include <linux/seq_file.h>
35467 +
35468 +#ifdef CONFIG_PAX_REFCOUNT
35469 +#include <linux/kallsyms.h>
35470 +#include <linux/kdebug.h>
35471 +#endif
35472
35473 #include <asm/uaccess.h>
35474 #include <asm/mmu_context.h>
35475 #include <asm/tlb.h>
35476 #include "internal.h"
35477
35478 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35479 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35480 +EXPORT_SYMBOL(pax_set_initial_flags_func);
35481 +#endif
35482 +
35483 int core_uses_pid;
35484 char core_pattern[CORENAME_MAX_SIZE] = "core";
35485 unsigned int core_pipe_limit;
35486 @@ -70,7 +82,7 @@ struct core_name {
35487 char *corename;
35488 int used, size;
35489 };
35490 -static atomic_t call_count = ATOMIC_INIT(1);
35491 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35492
35493 /* The maximal length of core_pattern is also specified in sysctl.c */
35494
35495 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35496 char *tmp = getname(library);
35497 int error = PTR_ERR(tmp);
35498 static const struct open_flags uselib_flags = {
35499 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35500 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35501 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35502 .intent = LOOKUP_OPEN
35503 };
35504 @@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
35505 int write)
35506 {
35507 struct page *page;
35508 - int ret;
35509
35510 -#ifdef CONFIG_STACK_GROWSUP
35511 - if (write) {
35512 - ret = expand_downwards(bprm->vma, pos);
35513 - if (ret < 0)
35514 - return NULL;
35515 - }
35516 -#endif
35517 - ret = get_user_pages(current, bprm->mm, pos,
35518 - 1, write, 1, &page, NULL);
35519 - if (ret <= 0)
35520 + if (0 > expand_downwards(bprm->vma, pos))
35521 + return NULL;
35522 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35523 return NULL;
35524
35525 if (write) {
35526 @@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
35527 vma->vm_end = STACK_TOP_MAX;
35528 vma->vm_start = vma->vm_end - PAGE_SIZE;
35529 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35530 +
35531 +#ifdef CONFIG_PAX_SEGMEXEC
35532 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35533 +#endif
35534 +
35535 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35536 INIT_LIST_HEAD(&vma->anon_vma_chain);
35537
35538 @@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
35539 mm->stack_vm = mm->total_vm = 1;
35540 up_write(&mm->mmap_sem);
35541 bprm->p = vma->vm_end - sizeof(void *);
35542 +
35543 +#ifdef CONFIG_PAX_RANDUSTACK
35544 + if (randomize_va_space)
35545 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35546 +#endif
35547 +
35548 return 0;
35549 err:
35550 up_write(&mm->mmap_sem);
35551 @@ -403,19 +418,7 @@ err:
35552 return err;
35553 }
35554
35555 -struct user_arg_ptr {
35556 -#ifdef CONFIG_COMPAT
35557 - bool is_compat;
35558 -#endif
35559 - union {
35560 - const char __user *const __user *native;
35561 -#ifdef CONFIG_COMPAT
35562 - compat_uptr_t __user *compat;
35563 -#endif
35564 - } ptr;
35565 -};
35566 -
35567 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35568 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
35569 {
35570 const char __user *native;
35571
35572 @@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
35573 int r;
35574 mm_segment_t oldfs = get_fs();
35575 struct user_arg_ptr argv = {
35576 - .ptr.native = (const char __user *const __user *)__argv,
35577 + .ptr.native = (__force const char __user *const __user *)__argv,
35578 };
35579
35580 set_fs(KERNEL_DS);
35581 @@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
35582 unsigned long new_end = old_end - shift;
35583 struct mmu_gather tlb;
35584
35585 - BUG_ON(new_start > new_end);
35586 + if (new_start >= new_end || new_start < mmap_min_addr)
35587 + return -ENOMEM;
35588
35589 /*
35590 * ensure there are no vmas between where we want to go
35591 @@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
35592 if (vma != find_vma(mm, new_start))
35593 return -EFAULT;
35594
35595 +#ifdef CONFIG_PAX_SEGMEXEC
35596 + BUG_ON(pax_find_mirror_vma(vma));
35597 +#endif
35598 +
35599 /*
35600 * cover the whole range: [new_start, old_end)
35601 */
35602 @@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
35603 stack_top = arch_align_stack(stack_top);
35604 stack_top = PAGE_ALIGN(stack_top);
35605
35606 - if (unlikely(stack_top < mmap_min_addr) ||
35607 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35608 - return -ENOMEM;
35609 -
35610 stack_shift = vma->vm_end - stack_top;
35611
35612 bprm->p -= stack_shift;
35613 @@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
35614 bprm->exec -= stack_shift;
35615
35616 down_write(&mm->mmap_sem);
35617 +
35618 + /* Move stack pages down in memory. */
35619 + if (stack_shift) {
35620 + ret = shift_arg_pages(vma, stack_shift);
35621 + if (ret)
35622 + goto out_unlock;
35623 + }
35624 +
35625 vm_flags = VM_STACK_FLAGS;
35626
35627 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35628 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35629 + vm_flags &= ~VM_EXEC;
35630 +
35631 +#ifdef CONFIG_PAX_MPROTECT
35632 + if (mm->pax_flags & MF_PAX_MPROTECT)
35633 + vm_flags &= ~VM_MAYEXEC;
35634 +#endif
35635 +
35636 + }
35637 +#endif
35638 +
35639 /*
35640 * Adjust stack execute permissions; explicitly enable for
35641 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35642 @@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
35643 goto out_unlock;
35644 BUG_ON(prev != vma);
35645
35646 - /* Move stack pages down in memory. */
35647 - if (stack_shift) {
35648 - ret = shift_arg_pages(vma, stack_shift);
35649 - if (ret)
35650 - goto out_unlock;
35651 - }
35652 -
35653 /* mprotect_fixup is overkill to remove the temporary stack flags */
35654 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35655
35656 @@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
35657 struct file *file;
35658 int err;
35659 static const struct open_flags open_exec_flags = {
35660 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35661 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35662 .acc_mode = MAY_EXEC | MAY_OPEN,
35663 .intent = LOOKUP_OPEN
35664 };
35665 @@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
35666 old_fs = get_fs();
35667 set_fs(get_ds());
35668 /* The cast to a user pointer is valid due to the set_fs() */
35669 - result = vfs_read(file, (void __user *)addr, count, &pos);
35670 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
35671 set_fs(old_fs);
35672 return result;
35673 }
35674 @@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
35675 }
35676 rcu_read_unlock();
35677
35678 - if (p->fs->users > n_fs) {
35679 + if (atomic_read(&p->fs->users) > n_fs) {
35680 bprm->unsafe |= LSM_UNSAFE_SHARE;
35681 } else {
35682 res = -EAGAIN;
35683 @@ -1428,6 +1445,11 @@ static int do_execve_common(const char *
35684 struct user_arg_ptr envp,
35685 struct pt_regs *regs)
35686 {
35687 +#ifdef CONFIG_GRKERNSEC
35688 + struct file *old_exec_file;
35689 + struct acl_subject_label *old_acl;
35690 + struct rlimit old_rlim[RLIM_NLIMITS];
35691 +#endif
35692 struct linux_binprm *bprm;
35693 struct file *file;
35694 struct files_struct *displaced;
35695 @@ -1464,6 +1486,23 @@ static int do_execve_common(const char *
35696 bprm->filename = filename;
35697 bprm->interp = filename;
35698
35699 + if (gr_process_user_ban()) {
35700 + retval = -EPERM;
35701 + goto out_file;
35702 + }
35703 +
35704 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35705 +
35706 + if (gr_handle_nproc()) {
35707 + retval = -EAGAIN;
35708 + goto out_file;
35709 + }
35710 +
35711 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35712 + retval = -EACCES;
35713 + goto out_file;
35714 + }
35715 +
35716 retval = bprm_mm_init(bprm);
35717 if (retval)
35718 goto out_file;
35719 @@ -1493,9 +1532,40 @@ static int do_execve_common(const char *
35720 if (retval < 0)
35721 goto out;
35722
35723 + if (!gr_tpe_allow(file)) {
35724 + retval = -EACCES;
35725 + goto out;
35726 + }
35727 +
35728 + if (gr_check_crash_exec(file)) {
35729 + retval = -EACCES;
35730 + goto out;
35731 + }
35732 +
35733 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35734 +
35735 + gr_handle_exec_args(bprm, argv);
35736 +
35737 +#ifdef CONFIG_GRKERNSEC
35738 + old_acl = current->acl;
35739 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35740 + old_exec_file = current->exec_file;
35741 + get_file(file);
35742 + current->exec_file = file;
35743 +#endif
35744 +
35745 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35746 + bprm->unsafe & LSM_UNSAFE_SHARE);
35747 + if (retval < 0)
35748 + goto out_fail;
35749 +
35750 retval = search_binary_handler(bprm,regs);
35751 if (retval < 0)
35752 - goto out;
35753 + goto out_fail;
35754 +#ifdef CONFIG_GRKERNSEC
35755 + if (old_exec_file)
35756 + fput(old_exec_file);
35757 +#endif
35758
35759 /* execve succeeded */
35760 current->fs->in_exec = 0;
35761 @@ -1506,6 +1576,14 @@ static int do_execve_common(const char *
35762 put_files_struct(displaced);
35763 return retval;
35764
35765 +out_fail:
35766 +#ifdef CONFIG_GRKERNSEC
35767 + current->acl = old_acl;
35768 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35769 + fput(current->exec_file);
35770 + current->exec_file = old_exec_file;
35771 +#endif
35772 +
35773 out:
35774 if (bprm->mm) {
35775 acct_arg_size(bprm, 0);
35776 @@ -1579,7 +1657,7 @@ static int expand_corename(struct core_n
35777 {
35778 char *old_corename = cn->corename;
35779
35780 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
35781 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
35782 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
35783
35784 if (!cn->corename) {
35785 @@ -1667,7 +1745,7 @@ static int format_corename(struct core_n
35786 int pid_in_pattern = 0;
35787 int err = 0;
35788
35789 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
35790 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
35791 cn->corename = kmalloc(cn->size, GFP_KERNEL);
35792 cn->used = 0;
35793
35794 @@ -1758,6 +1836,219 @@ out:
35795 return ispipe;
35796 }
35797
35798 +int pax_check_flags(unsigned long *flags)
35799 +{
35800 + int retval = 0;
35801 +
35802 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
35803 + if (*flags & MF_PAX_SEGMEXEC)
35804 + {
35805 + *flags &= ~MF_PAX_SEGMEXEC;
35806 + retval = -EINVAL;
35807 + }
35808 +#endif
35809 +
35810 + if ((*flags & MF_PAX_PAGEEXEC)
35811 +
35812 +#ifdef CONFIG_PAX_PAGEEXEC
35813 + && (*flags & MF_PAX_SEGMEXEC)
35814 +#endif
35815 +
35816 + )
35817 + {
35818 + *flags &= ~MF_PAX_PAGEEXEC;
35819 + retval = -EINVAL;
35820 + }
35821 +
35822 + if ((*flags & MF_PAX_MPROTECT)
35823 +
35824 +#ifdef CONFIG_PAX_MPROTECT
35825 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35826 +#endif
35827 +
35828 + )
35829 + {
35830 + *flags &= ~MF_PAX_MPROTECT;
35831 + retval = -EINVAL;
35832 + }
35833 +
35834 + if ((*flags & MF_PAX_EMUTRAMP)
35835 +
35836 +#ifdef CONFIG_PAX_EMUTRAMP
35837 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35838 +#endif
35839 +
35840 + )
35841 + {
35842 + *flags &= ~MF_PAX_EMUTRAMP;
35843 + retval = -EINVAL;
35844 + }
35845 +
35846 + return retval;
35847 +}
35848 +
35849 +EXPORT_SYMBOL(pax_check_flags);
35850 +
35851 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35852 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
35853 +{
35854 + struct task_struct *tsk = current;
35855 + struct mm_struct *mm = current->mm;
35856 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
35857 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
35858 + char *path_exec = NULL;
35859 + char *path_fault = NULL;
35860 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
35861 +
35862 + if (buffer_exec && buffer_fault) {
35863 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
35864 +
35865 + down_read(&mm->mmap_sem);
35866 + vma = mm->mmap;
35867 + while (vma && (!vma_exec || !vma_fault)) {
35868 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
35869 + vma_exec = vma;
35870 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
35871 + vma_fault = vma;
35872 + vma = vma->vm_next;
35873 + }
35874 + if (vma_exec) {
35875 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
35876 + if (IS_ERR(path_exec))
35877 + path_exec = "<path too long>";
35878 + else {
35879 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
35880 + if (path_exec) {
35881 + *path_exec = 0;
35882 + path_exec = buffer_exec;
35883 + } else
35884 + path_exec = "<path too long>";
35885 + }
35886 + }
35887 + if (vma_fault) {
35888 + start = vma_fault->vm_start;
35889 + end = vma_fault->vm_end;
35890 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
35891 + if (vma_fault->vm_file) {
35892 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
35893 + if (IS_ERR(path_fault))
35894 + path_fault = "<path too long>";
35895 + else {
35896 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
35897 + if (path_fault) {
35898 + *path_fault = 0;
35899 + path_fault = buffer_fault;
35900 + } else
35901 + path_fault = "<path too long>";
35902 + }
35903 + } else
35904 + path_fault = "<anonymous mapping>";
35905 + }
35906 + up_read(&mm->mmap_sem);
35907 + }
35908 + if (tsk->signal->curr_ip)
35909 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
35910 + else
35911 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
35912 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
35913 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
35914 + task_uid(tsk), task_euid(tsk), pc, sp);
35915 + free_page((unsigned long)buffer_exec);
35916 + free_page((unsigned long)buffer_fault);
35917 + pax_report_insns(pc, sp);
35918 + do_coredump(SIGKILL, SIGKILL, regs);
35919 +}
35920 +#endif
35921 +
35922 +#ifdef CONFIG_PAX_REFCOUNT
35923 +void pax_report_refcount_overflow(struct pt_regs *regs)
35924 +{
35925 + if (current->signal->curr_ip)
35926 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
35927 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
35928 + else
35929 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
35930 + current->comm, task_pid_nr(current), current_uid(), current_euid());
35931 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
35932 + show_regs(regs);
35933 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
35934 +}
35935 +#endif
35936 +
35937 +#ifdef CONFIG_PAX_USERCOPY
35938 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
35939 +int object_is_on_stack(const void *obj, unsigned long len)
35940 +{
35941 + const void * const stack = task_stack_page(current);
35942 + const void * const stackend = stack + THREAD_SIZE;
35943 +
35944 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
35945 + const void *frame = NULL;
35946 + const void *oldframe;
35947 +#endif
35948 +
35949 + if (obj + len < obj)
35950 + return -1;
35951 +
35952 + if (obj + len <= stack || stackend <= obj)
35953 + return 0;
35954 +
35955 + if (obj < stack || stackend < obj + len)
35956 + return -1;
35957 +
35958 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
35959 + oldframe = __builtin_frame_address(1);
35960 + if (oldframe)
35961 + frame = __builtin_frame_address(2);
35962 + /*
35963 + low ----------------------------------------------> high
35964 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
35965 + ^----------------^
35966 + allow copies only within here
35967 + */
35968 + while (stack <= frame && frame < stackend) {
35969 + /* if obj + len extends past the last frame, this
35970 + check won't pass and the next frame will be 0,
35971 + causing us to bail out and correctly report
35972 + the copy as invalid
35973 + */
35974 + if (obj + len <= frame)
35975 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
35976 + oldframe = frame;
35977 + frame = *(const void * const *)frame;
35978 + }
35979 + return -1;
35980 +#else
35981 + return 1;
35982 +#endif
35983 +}
35984 +
35985 +
35986 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
35987 +{
35988 + if (current->signal->curr_ip)
35989 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
35990 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
35991 + else
35992 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
35993 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
35994 + dump_stack();
35995 + gr_handle_kernel_exploit();
35996 + do_group_exit(SIGKILL);
35997 +}
35998 +#endif
35999 +
36000 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36001 +void pax_track_stack(void)
36002 +{
36003 + unsigned long sp = (unsigned long)&sp;
36004 + if (sp < current_thread_info()->lowest_stack &&
36005 + sp > (unsigned long)task_stack_page(current))
36006 + current_thread_info()->lowest_stack = sp;
36007 +}
36008 +EXPORT_SYMBOL(pax_track_stack);
36009 +#endif
36010 +
36011 static int zap_process(struct task_struct *start, int exit_code)
36012 {
36013 struct task_struct *t;
36014 @@ -1969,17 +2260,17 @@ static void wait_for_dump_helpers(struct
36015 pipe = file->f_path.dentry->d_inode->i_pipe;
36016
36017 pipe_lock(pipe);
36018 - pipe->readers++;
36019 - pipe->writers--;
36020 + atomic_inc(&pipe->readers);
36021 + atomic_dec(&pipe->writers);
36022
36023 - while ((pipe->readers > 1) && (!signal_pending(current))) {
36024 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36025 wake_up_interruptible_sync(&pipe->wait);
36026 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36027 pipe_wait(pipe);
36028 }
36029
36030 - pipe->readers--;
36031 - pipe->writers++;
36032 + atomic_dec(&pipe->readers);
36033 + atomic_inc(&pipe->writers);
36034 pipe_unlock(pipe);
36035
36036 }
36037 @@ -2040,7 +2331,7 @@ void do_coredump(long signr, int exit_co
36038 int retval = 0;
36039 int flag = 0;
36040 int ispipe;
36041 - static atomic_t core_dump_count = ATOMIC_INIT(0);
36042 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36043 struct coredump_params cprm = {
36044 .signr = signr,
36045 .regs = regs,
36046 @@ -2055,6 +2346,9 @@ void do_coredump(long signr, int exit_co
36047
36048 audit_core_dumps(signr);
36049
36050 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36051 + gr_handle_brute_attach(current, cprm.mm_flags);
36052 +
36053 binfmt = mm->binfmt;
36054 if (!binfmt || !binfmt->core_dump)
36055 goto fail;
36056 @@ -2095,6 +2389,8 @@ void do_coredump(long signr, int exit_co
36057 goto fail_corename;
36058 }
36059
36060 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36061 +
36062 if (ispipe) {
36063 int dump_count;
36064 char **helper_argv;
36065 @@ -2122,7 +2418,7 @@ void do_coredump(long signr, int exit_co
36066 }
36067 cprm.limit = RLIM_INFINITY;
36068
36069 - dump_count = atomic_inc_return(&core_dump_count);
36070 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
36071 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36072 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36073 task_tgid_vnr(current), current->comm);
36074 @@ -2192,7 +2488,7 @@ close_fail:
36075 filp_close(cprm.file, NULL);
36076 fail_dropcount:
36077 if (ispipe)
36078 - atomic_dec(&core_dump_count);
36079 + atomic_dec_unchecked(&core_dump_count);
36080 fail_unlock:
36081 kfree(cn.corename);
36082 fail_corename:
36083 diff -urNp linux-3.0.3/fs/ext2/balloc.c linux-3.0.3/fs/ext2/balloc.c
36084 --- linux-3.0.3/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
36085 +++ linux-3.0.3/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
36086 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36087
36088 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36089 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36090 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36091 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36092 sbi->s_resuid != current_fsuid() &&
36093 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36094 return 0;
36095 diff -urNp linux-3.0.3/fs/ext3/balloc.c linux-3.0.3/fs/ext3/balloc.c
36096 --- linux-3.0.3/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
36097 +++ linux-3.0.3/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
36098 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36099
36100 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36101 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36102 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36103 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36104 sbi->s_resuid != current_fsuid() &&
36105 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36106 return 0;
36107 diff -urNp linux-3.0.3/fs/ext4/balloc.c linux-3.0.3/fs/ext4/balloc.c
36108 --- linux-3.0.3/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
36109 +++ linux-3.0.3/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
36110 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
36111 /* Hm, nope. Are (enough) root reserved blocks available? */
36112 if (sbi->s_resuid == current_fsuid() ||
36113 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36114 - capable(CAP_SYS_RESOURCE) ||
36115 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
36116 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
36117 + capable_nolog(CAP_SYS_RESOURCE)) {
36118
36119 if (free_blocks >= (nblocks + dirty_blocks))
36120 return 1;
36121 diff -urNp linux-3.0.3/fs/ext4/ext4.h linux-3.0.3/fs/ext4/ext4.h
36122 --- linux-3.0.3/fs/ext4/ext4.h 2011-08-23 21:44:40.000000000 -0400
36123 +++ linux-3.0.3/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
36124 @@ -1177,19 +1177,19 @@ struct ext4_sb_info {
36125 unsigned long s_mb_last_start;
36126
36127 /* stats for buddy allocator */
36128 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36129 - atomic_t s_bal_success; /* we found long enough chunks */
36130 - atomic_t s_bal_allocated; /* in blocks */
36131 - atomic_t s_bal_ex_scanned; /* total extents scanned */
36132 - atomic_t s_bal_goals; /* goal hits */
36133 - atomic_t s_bal_breaks; /* too long searches */
36134 - atomic_t s_bal_2orders; /* 2^order hits */
36135 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36136 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36137 + atomic_unchecked_t s_bal_allocated; /* in blocks */
36138 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36139 + atomic_unchecked_t s_bal_goals; /* goal hits */
36140 + atomic_unchecked_t s_bal_breaks; /* too long searches */
36141 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36142 spinlock_t s_bal_lock;
36143 unsigned long s_mb_buddies_generated;
36144 unsigned long long s_mb_generation_time;
36145 - atomic_t s_mb_lost_chunks;
36146 - atomic_t s_mb_preallocated;
36147 - atomic_t s_mb_discarded;
36148 + atomic_unchecked_t s_mb_lost_chunks;
36149 + atomic_unchecked_t s_mb_preallocated;
36150 + atomic_unchecked_t s_mb_discarded;
36151 atomic_t s_lock_busy;
36152
36153 /* locality groups */
36154 diff -urNp linux-3.0.3/fs/ext4/mballoc.c linux-3.0.3/fs/ext4/mballoc.c
36155 --- linux-3.0.3/fs/ext4/mballoc.c 2011-08-23 21:44:40.000000000 -0400
36156 +++ linux-3.0.3/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
36157 @@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
36158 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36159
36160 if (EXT4_SB(sb)->s_mb_stats)
36161 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36162 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36163
36164 break;
36165 }
36166 @@ -2087,7 +2087,7 @@ repeat:
36167 ac->ac_status = AC_STATUS_CONTINUE;
36168 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36169 cr = 3;
36170 - atomic_inc(&sbi->s_mb_lost_chunks);
36171 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36172 goto repeat;
36173 }
36174 }
36175 @@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
36176 ext4_grpblk_t counters[16];
36177 } sg;
36178
36179 + pax_track_stack();
36180 +
36181 group--;
36182 if (group == 0)
36183 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36184 @@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
36185 if (sbi->s_mb_stats) {
36186 printk(KERN_INFO
36187 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36188 - atomic_read(&sbi->s_bal_allocated),
36189 - atomic_read(&sbi->s_bal_reqs),
36190 - atomic_read(&sbi->s_bal_success));
36191 + atomic_read_unchecked(&sbi->s_bal_allocated),
36192 + atomic_read_unchecked(&sbi->s_bal_reqs),
36193 + atomic_read_unchecked(&sbi->s_bal_success));
36194 printk(KERN_INFO
36195 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36196 "%u 2^N hits, %u breaks, %u lost\n",
36197 - atomic_read(&sbi->s_bal_ex_scanned),
36198 - atomic_read(&sbi->s_bal_goals),
36199 - atomic_read(&sbi->s_bal_2orders),
36200 - atomic_read(&sbi->s_bal_breaks),
36201 - atomic_read(&sbi->s_mb_lost_chunks));
36202 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36203 + atomic_read_unchecked(&sbi->s_bal_goals),
36204 + atomic_read_unchecked(&sbi->s_bal_2orders),
36205 + atomic_read_unchecked(&sbi->s_bal_breaks),
36206 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36207 printk(KERN_INFO
36208 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36209 sbi->s_mb_buddies_generated++,
36210 sbi->s_mb_generation_time);
36211 printk(KERN_INFO
36212 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36213 - atomic_read(&sbi->s_mb_preallocated),
36214 - atomic_read(&sbi->s_mb_discarded));
36215 + atomic_read_unchecked(&sbi->s_mb_preallocated),
36216 + atomic_read_unchecked(&sbi->s_mb_discarded));
36217 }
36218
36219 free_percpu(sbi->s_locality_groups);
36220 @@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
36221 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36222
36223 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36224 - atomic_inc(&sbi->s_bal_reqs);
36225 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36226 + atomic_inc_unchecked(&sbi->s_bal_reqs);
36227 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36228 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36229 - atomic_inc(&sbi->s_bal_success);
36230 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36231 + atomic_inc_unchecked(&sbi->s_bal_success);
36232 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36233 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36234 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36235 - atomic_inc(&sbi->s_bal_goals);
36236 + atomic_inc_unchecked(&sbi->s_bal_goals);
36237 if (ac->ac_found > sbi->s_mb_max_to_scan)
36238 - atomic_inc(&sbi->s_bal_breaks);
36239 + atomic_inc_unchecked(&sbi->s_bal_breaks);
36240 }
36241
36242 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36243 @@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36244 trace_ext4_mb_new_inode_pa(ac, pa);
36245
36246 ext4_mb_use_inode_pa(ac, pa);
36247 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36248 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36249
36250 ei = EXT4_I(ac->ac_inode);
36251 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36252 @@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36253 trace_ext4_mb_new_group_pa(ac, pa);
36254
36255 ext4_mb_use_group_pa(ac, pa);
36256 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36257 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36258
36259 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36260 lg = ac->ac_lg;
36261 @@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36262 * from the bitmap and continue.
36263 */
36264 }
36265 - atomic_add(free, &sbi->s_mb_discarded);
36266 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
36267
36268 return err;
36269 }
36270 @@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36271 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36272 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36273 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36274 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36275 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36276 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36277
36278 return 0;
36279 diff -urNp linux-3.0.3/fs/fcntl.c linux-3.0.3/fs/fcntl.c
36280 --- linux-3.0.3/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
36281 +++ linux-3.0.3/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
36282 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36283 if (err)
36284 return err;
36285
36286 + if (gr_handle_chroot_fowner(pid, type))
36287 + return -ENOENT;
36288 + if (gr_check_protected_task_fowner(pid, type))
36289 + return -EACCES;
36290 +
36291 f_modown(filp, pid, type, force);
36292 return 0;
36293 }
36294 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36295 switch (cmd) {
36296 case F_DUPFD:
36297 case F_DUPFD_CLOEXEC:
36298 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36299 if (arg >= rlimit(RLIMIT_NOFILE))
36300 break;
36301 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36302 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36303 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36304 * is defined as O_NONBLOCK on some platforms and not on others.
36305 */
36306 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36307 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36308 O_RDONLY | O_WRONLY | O_RDWR |
36309 O_CREAT | O_EXCL | O_NOCTTY |
36310 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36311 __O_SYNC | O_DSYNC | FASYNC |
36312 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36313 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36314 - __FMODE_EXEC | O_PATH
36315 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
36316 ));
36317
36318 fasync_cache = kmem_cache_create("fasync_cache",
36319 diff -urNp linux-3.0.3/fs/fifo.c linux-3.0.3/fs/fifo.c
36320 --- linux-3.0.3/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
36321 +++ linux-3.0.3/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
36322 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36323 */
36324 filp->f_op = &read_pipefifo_fops;
36325 pipe->r_counter++;
36326 - if (pipe->readers++ == 0)
36327 + if (atomic_inc_return(&pipe->readers) == 1)
36328 wake_up_partner(inode);
36329
36330 - if (!pipe->writers) {
36331 + if (!atomic_read(&pipe->writers)) {
36332 if ((filp->f_flags & O_NONBLOCK)) {
36333 /* suppress POLLHUP until we have
36334 * seen a writer */
36335 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36336 * errno=ENXIO when there is no process reading the FIFO.
36337 */
36338 ret = -ENXIO;
36339 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36340 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36341 goto err;
36342
36343 filp->f_op = &write_pipefifo_fops;
36344 pipe->w_counter++;
36345 - if (!pipe->writers++)
36346 + if (atomic_inc_return(&pipe->writers) == 1)
36347 wake_up_partner(inode);
36348
36349 - if (!pipe->readers) {
36350 + if (!atomic_read(&pipe->readers)) {
36351 wait_for_partner(inode, &pipe->r_counter);
36352 if (signal_pending(current))
36353 goto err_wr;
36354 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36355 */
36356 filp->f_op = &rdwr_pipefifo_fops;
36357
36358 - pipe->readers++;
36359 - pipe->writers++;
36360 + atomic_inc(&pipe->readers);
36361 + atomic_inc(&pipe->writers);
36362 pipe->r_counter++;
36363 pipe->w_counter++;
36364 - if (pipe->readers == 1 || pipe->writers == 1)
36365 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36366 wake_up_partner(inode);
36367 break;
36368
36369 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36370 return 0;
36371
36372 err_rd:
36373 - if (!--pipe->readers)
36374 + if (atomic_dec_and_test(&pipe->readers))
36375 wake_up_interruptible(&pipe->wait);
36376 ret = -ERESTARTSYS;
36377 goto err;
36378
36379 err_wr:
36380 - if (!--pipe->writers)
36381 + if (atomic_dec_and_test(&pipe->writers))
36382 wake_up_interruptible(&pipe->wait);
36383 ret = -ERESTARTSYS;
36384 goto err;
36385
36386 err:
36387 - if (!pipe->readers && !pipe->writers)
36388 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36389 free_pipe_info(inode);
36390
36391 err_nocleanup:
36392 diff -urNp linux-3.0.3/fs/file.c linux-3.0.3/fs/file.c
36393 --- linux-3.0.3/fs/file.c 2011-07-21 22:17:23.000000000 -0400
36394 +++ linux-3.0.3/fs/file.c 2011-08-23 21:48:14.000000000 -0400
36395 @@ -15,6 +15,7 @@
36396 #include <linux/slab.h>
36397 #include <linux/vmalloc.h>
36398 #include <linux/file.h>
36399 +#include <linux/security.h>
36400 #include <linux/fdtable.h>
36401 #include <linux/bitops.h>
36402 #include <linux/interrupt.h>
36403 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36404 * N.B. For clone tasks sharing a files structure, this test
36405 * will limit the total number of files that can be opened.
36406 */
36407 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36408 if (nr >= rlimit(RLIMIT_NOFILE))
36409 return -EMFILE;
36410
36411 diff -urNp linux-3.0.3/fs/filesystems.c linux-3.0.3/fs/filesystems.c
36412 --- linux-3.0.3/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
36413 +++ linux-3.0.3/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
36414 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36415 int len = dot ? dot - name : strlen(name);
36416
36417 fs = __get_fs_type(name, len);
36418 +
36419 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
36420 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36421 +#else
36422 if (!fs && (request_module("%.*s", len, name) == 0))
36423 +#endif
36424 fs = __get_fs_type(name, len);
36425
36426 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36427 diff -urNp linux-3.0.3/fs/fscache/cookie.c linux-3.0.3/fs/fscache/cookie.c
36428 --- linux-3.0.3/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
36429 +++ linux-3.0.3/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
36430 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36431 parent ? (char *) parent->def->name : "<no-parent>",
36432 def->name, netfs_data);
36433
36434 - fscache_stat(&fscache_n_acquires);
36435 + fscache_stat_unchecked(&fscache_n_acquires);
36436
36437 /* if there's no parent cookie, then we don't create one here either */
36438 if (!parent) {
36439 - fscache_stat(&fscache_n_acquires_null);
36440 + fscache_stat_unchecked(&fscache_n_acquires_null);
36441 _leave(" [no parent]");
36442 return NULL;
36443 }
36444 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36445 /* allocate and initialise a cookie */
36446 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36447 if (!cookie) {
36448 - fscache_stat(&fscache_n_acquires_oom);
36449 + fscache_stat_unchecked(&fscache_n_acquires_oom);
36450 _leave(" [ENOMEM]");
36451 return NULL;
36452 }
36453 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36454
36455 switch (cookie->def->type) {
36456 case FSCACHE_COOKIE_TYPE_INDEX:
36457 - fscache_stat(&fscache_n_cookie_index);
36458 + fscache_stat_unchecked(&fscache_n_cookie_index);
36459 break;
36460 case FSCACHE_COOKIE_TYPE_DATAFILE:
36461 - fscache_stat(&fscache_n_cookie_data);
36462 + fscache_stat_unchecked(&fscache_n_cookie_data);
36463 break;
36464 default:
36465 - fscache_stat(&fscache_n_cookie_special);
36466 + fscache_stat_unchecked(&fscache_n_cookie_special);
36467 break;
36468 }
36469
36470 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36471 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36472 atomic_dec(&parent->n_children);
36473 __fscache_cookie_put(cookie);
36474 - fscache_stat(&fscache_n_acquires_nobufs);
36475 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36476 _leave(" = NULL");
36477 return NULL;
36478 }
36479 }
36480
36481 - fscache_stat(&fscache_n_acquires_ok);
36482 + fscache_stat_unchecked(&fscache_n_acquires_ok);
36483 _leave(" = %p", cookie);
36484 return cookie;
36485 }
36486 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36487 cache = fscache_select_cache_for_object(cookie->parent);
36488 if (!cache) {
36489 up_read(&fscache_addremove_sem);
36490 - fscache_stat(&fscache_n_acquires_no_cache);
36491 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36492 _leave(" = -ENOMEDIUM [no cache]");
36493 return -ENOMEDIUM;
36494 }
36495 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36496 object = cache->ops->alloc_object(cache, cookie);
36497 fscache_stat_d(&fscache_n_cop_alloc_object);
36498 if (IS_ERR(object)) {
36499 - fscache_stat(&fscache_n_object_no_alloc);
36500 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
36501 ret = PTR_ERR(object);
36502 goto error;
36503 }
36504
36505 - fscache_stat(&fscache_n_object_alloc);
36506 + fscache_stat_unchecked(&fscache_n_object_alloc);
36507
36508 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36509
36510 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36511 struct fscache_object *object;
36512 struct hlist_node *_p;
36513
36514 - fscache_stat(&fscache_n_updates);
36515 + fscache_stat_unchecked(&fscache_n_updates);
36516
36517 if (!cookie) {
36518 - fscache_stat(&fscache_n_updates_null);
36519 + fscache_stat_unchecked(&fscache_n_updates_null);
36520 _leave(" [no cookie]");
36521 return;
36522 }
36523 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36524 struct fscache_object *object;
36525 unsigned long event;
36526
36527 - fscache_stat(&fscache_n_relinquishes);
36528 + fscache_stat_unchecked(&fscache_n_relinquishes);
36529 if (retire)
36530 - fscache_stat(&fscache_n_relinquishes_retire);
36531 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36532
36533 if (!cookie) {
36534 - fscache_stat(&fscache_n_relinquishes_null);
36535 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
36536 _leave(" [no cookie]");
36537 return;
36538 }
36539 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36540
36541 /* wait for the cookie to finish being instantiated (or to fail) */
36542 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36543 - fscache_stat(&fscache_n_relinquishes_waitcrt);
36544 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36545 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36546 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36547 }
36548 diff -urNp linux-3.0.3/fs/fscache/internal.h linux-3.0.3/fs/fscache/internal.h
36549 --- linux-3.0.3/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
36550 +++ linux-3.0.3/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
36551 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36552 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36553 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36554
36555 -extern atomic_t fscache_n_op_pend;
36556 -extern atomic_t fscache_n_op_run;
36557 -extern atomic_t fscache_n_op_enqueue;
36558 -extern atomic_t fscache_n_op_deferred_release;
36559 -extern atomic_t fscache_n_op_release;
36560 -extern atomic_t fscache_n_op_gc;
36561 -extern atomic_t fscache_n_op_cancelled;
36562 -extern atomic_t fscache_n_op_rejected;
36563 -
36564 -extern atomic_t fscache_n_attr_changed;
36565 -extern atomic_t fscache_n_attr_changed_ok;
36566 -extern atomic_t fscache_n_attr_changed_nobufs;
36567 -extern atomic_t fscache_n_attr_changed_nomem;
36568 -extern atomic_t fscache_n_attr_changed_calls;
36569 -
36570 -extern atomic_t fscache_n_allocs;
36571 -extern atomic_t fscache_n_allocs_ok;
36572 -extern atomic_t fscache_n_allocs_wait;
36573 -extern atomic_t fscache_n_allocs_nobufs;
36574 -extern atomic_t fscache_n_allocs_intr;
36575 -extern atomic_t fscache_n_allocs_object_dead;
36576 -extern atomic_t fscache_n_alloc_ops;
36577 -extern atomic_t fscache_n_alloc_op_waits;
36578 -
36579 -extern atomic_t fscache_n_retrievals;
36580 -extern atomic_t fscache_n_retrievals_ok;
36581 -extern atomic_t fscache_n_retrievals_wait;
36582 -extern atomic_t fscache_n_retrievals_nodata;
36583 -extern atomic_t fscache_n_retrievals_nobufs;
36584 -extern atomic_t fscache_n_retrievals_intr;
36585 -extern atomic_t fscache_n_retrievals_nomem;
36586 -extern atomic_t fscache_n_retrievals_object_dead;
36587 -extern atomic_t fscache_n_retrieval_ops;
36588 -extern atomic_t fscache_n_retrieval_op_waits;
36589 -
36590 -extern atomic_t fscache_n_stores;
36591 -extern atomic_t fscache_n_stores_ok;
36592 -extern atomic_t fscache_n_stores_again;
36593 -extern atomic_t fscache_n_stores_nobufs;
36594 -extern atomic_t fscache_n_stores_oom;
36595 -extern atomic_t fscache_n_store_ops;
36596 -extern atomic_t fscache_n_store_calls;
36597 -extern atomic_t fscache_n_store_pages;
36598 -extern atomic_t fscache_n_store_radix_deletes;
36599 -extern atomic_t fscache_n_store_pages_over_limit;
36600 -
36601 -extern atomic_t fscache_n_store_vmscan_not_storing;
36602 -extern atomic_t fscache_n_store_vmscan_gone;
36603 -extern atomic_t fscache_n_store_vmscan_busy;
36604 -extern atomic_t fscache_n_store_vmscan_cancelled;
36605 -
36606 -extern atomic_t fscache_n_marks;
36607 -extern atomic_t fscache_n_uncaches;
36608 -
36609 -extern atomic_t fscache_n_acquires;
36610 -extern atomic_t fscache_n_acquires_null;
36611 -extern atomic_t fscache_n_acquires_no_cache;
36612 -extern atomic_t fscache_n_acquires_ok;
36613 -extern atomic_t fscache_n_acquires_nobufs;
36614 -extern atomic_t fscache_n_acquires_oom;
36615 -
36616 -extern atomic_t fscache_n_updates;
36617 -extern atomic_t fscache_n_updates_null;
36618 -extern atomic_t fscache_n_updates_run;
36619 -
36620 -extern atomic_t fscache_n_relinquishes;
36621 -extern atomic_t fscache_n_relinquishes_null;
36622 -extern atomic_t fscache_n_relinquishes_waitcrt;
36623 -extern atomic_t fscache_n_relinquishes_retire;
36624 -
36625 -extern atomic_t fscache_n_cookie_index;
36626 -extern atomic_t fscache_n_cookie_data;
36627 -extern atomic_t fscache_n_cookie_special;
36628 -
36629 -extern atomic_t fscache_n_object_alloc;
36630 -extern atomic_t fscache_n_object_no_alloc;
36631 -extern atomic_t fscache_n_object_lookups;
36632 -extern atomic_t fscache_n_object_lookups_negative;
36633 -extern atomic_t fscache_n_object_lookups_positive;
36634 -extern atomic_t fscache_n_object_lookups_timed_out;
36635 -extern atomic_t fscache_n_object_created;
36636 -extern atomic_t fscache_n_object_avail;
36637 -extern atomic_t fscache_n_object_dead;
36638 -
36639 -extern atomic_t fscache_n_checkaux_none;
36640 -extern atomic_t fscache_n_checkaux_okay;
36641 -extern atomic_t fscache_n_checkaux_update;
36642 -extern atomic_t fscache_n_checkaux_obsolete;
36643 +extern atomic_unchecked_t fscache_n_op_pend;
36644 +extern atomic_unchecked_t fscache_n_op_run;
36645 +extern atomic_unchecked_t fscache_n_op_enqueue;
36646 +extern atomic_unchecked_t fscache_n_op_deferred_release;
36647 +extern atomic_unchecked_t fscache_n_op_release;
36648 +extern atomic_unchecked_t fscache_n_op_gc;
36649 +extern atomic_unchecked_t fscache_n_op_cancelled;
36650 +extern atomic_unchecked_t fscache_n_op_rejected;
36651 +
36652 +extern atomic_unchecked_t fscache_n_attr_changed;
36653 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
36654 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36655 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36656 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
36657 +
36658 +extern atomic_unchecked_t fscache_n_allocs;
36659 +extern atomic_unchecked_t fscache_n_allocs_ok;
36660 +extern atomic_unchecked_t fscache_n_allocs_wait;
36661 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
36662 +extern atomic_unchecked_t fscache_n_allocs_intr;
36663 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
36664 +extern atomic_unchecked_t fscache_n_alloc_ops;
36665 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
36666 +
36667 +extern atomic_unchecked_t fscache_n_retrievals;
36668 +extern atomic_unchecked_t fscache_n_retrievals_ok;
36669 +extern atomic_unchecked_t fscache_n_retrievals_wait;
36670 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
36671 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36672 +extern atomic_unchecked_t fscache_n_retrievals_intr;
36673 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
36674 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36675 +extern atomic_unchecked_t fscache_n_retrieval_ops;
36676 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36677 +
36678 +extern atomic_unchecked_t fscache_n_stores;
36679 +extern atomic_unchecked_t fscache_n_stores_ok;
36680 +extern atomic_unchecked_t fscache_n_stores_again;
36681 +extern atomic_unchecked_t fscache_n_stores_nobufs;
36682 +extern atomic_unchecked_t fscache_n_stores_oom;
36683 +extern atomic_unchecked_t fscache_n_store_ops;
36684 +extern atomic_unchecked_t fscache_n_store_calls;
36685 +extern atomic_unchecked_t fscache_n_store_pages;
36686 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
36687 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36688 +
36689 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36690 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36691 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36692 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36693 +
36694 +extern atomic_unchecked_t fscache_n_marks;
36695 +extern atomic_unchecked_t fscache_n_uncaches;
36696 +
36697 +extern atomic_unchecked_t fscache_n_acquires;
36698 +extern atomic_unchecked_t fscache_n_acquires_null;
36699 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
36700 +extern atomic_unchecked_t fscache_n_acquires_ok;
36701 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
36702 +extern atomic_unchecked_t fscache_n_acquires_oom;
36703 +
36704 +extern atomic_unchecked_t fscache_n_updates;
36705 +extern atomic_unchecked_t fscache_n_updates_null;
36706 +extern atomic_unchecked_t fscache_n_updates_run;
36707 +
36708 +extern atomic_unchecked_t fscache_n_relinquishes;
36709 +extern atomic_unchecked_t fscache_n_relinquishes_null;
36710 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36711 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
36712 +
36713 +extern atomic_unchecked_t fscache_n_cookie_index;
36714 +extern atomic_unchecked_t fscache_n_cookie_data;
36715 +extern atomic_unchecked_t fscache_n_cookie_special;
36716 +
36717 +extern atomic_unchecked_t fscache_n_object_alloc;
36718 +extern atomic_unchecked_t fscache_n_object_no_alloc;
36719 +extern atomic_unchecked_t fscache_n_object_lookups;
36720 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
36721 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
36722 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36723 +extern atomic_unchecked_t fscache_n_object_created;
36724 +extern atomic_unchecked_t fscache_n_object_avail;
36725 +extern atomic_unchecked_t fscache_n_object_dead;
36726 +
36727 +extern atomic_unchecked_t fscache_n_checkaux_none;
36728 +extern atomic_unchecked_t fscache_n_checkaux_okay;
36729 +extern atomic_unchecked_t fscache_n_checkaux_update;
36730 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36731
36732 extern atomic_t fscache_n_cop_alloc_object;
36733 extern atomic_t fscache_n_cop_lookup_object;
36734 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36735 atomic_inc(stat);
36736 }
36737
36738 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36739 +{
36740 + atomic_inc_unchecked(stat);
36741 +}
36742 +
36743 static inline void fscache_stat_d(atomic_t *stat)
36744 {
36745 atomic_dec(stat);
36746 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
36747
36748 #define __fscache_stat(stat) (NULL)
36749 #define fscache_stat(stat) do {} while (0)
36750 +#define fscache_stat_unchecked(stat) do {} while (0)
36751 #define fscache_stat_d(stat) do {} while (0)
36752 #endif
36753
36754 diff -urNp linux-3.0.3/fs/fscache/object.c linux-3.0.3/fs/fscache/object.c
36755 --- linux-3.0.3/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
36756 +++ linux-3.0.3/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
36757 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
36758 /* update the object metadata on disk */
36759 case FSCACHE_OBJECT_UPDATING:
36760 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36761 - fscache_stat(&fscache_n_updates_run);
36762 + fscache_stat_unchecked(&fscache_n_updates_run);
36763 fscache_stat(&fscache_n_cop_update_object);
36764 object->cache->ops->update_object(object);
36765 fscache_stat_d(&fscache_n_cop_update_object);
36766 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
36767 spin_lock(&object->lock);
36768 object->state = FSCACHE_OBJECT_DEAD;
36769 spin_unlock(&object->lock);
36770 - fscache_stat(&fscache_n_object_dead);
36771 + fscache_stat_unchecked(&fscache_n_object_dead);
36772 goto terminal_transit;
36773
36774 /* handle the parent cache of this object being withdrawn from
36775 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
36776 spin_lock(&object->lock);
36777 object->state = FSCACHE_OBJECT_DEAD;
36778 spin_unlock(&object->lock);
36779 - fscache_stat(&fscache_n_object_dead);
36780 + fscache_stat_unchecked(&fscache_n_object_dead);
36781 goto terminal_transit;
36782
36783 /* complain about the object being woken up once it is
36784 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
36785 parent->cookie->def->name, cookie->def->name,
36786 object->cache->tag->name);
36787
36788 - fscache_stat(&fscache_n_object_lookups);
36789 + fscache_stat_unchecked(&fscache_n_object_lookups);
36790 fscache_stat(&fscache_n_cop_lookup_object);
36791 ret = object->cache->ops->lookup_object(object);
36792 fscache_stat_d(&fscache_n_cop_lookup_object);
36793 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
36794 if (ret == -ETIMEDOUT) {
36795 /* probably stuck behind another object, so move this one to
36796 * the back of the queue */
36797 - fscache_stat(&fscache_n_object_lookups_timed_out);
36798 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
36799 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36800 }
36801
36802 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
36803
36804 spin_lock(&object->lock);
36805 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36806 - fscache_stat(&fscache_n_object_lookups_negative);
36807 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
36808
36809 /* transit here to allow write requests to begin stacking up
36810 * and read requests to begin returning ENODATA */
36811 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
36812 * result, in which case there may be data available */
36813 spin_lock(&object->lock);
36814 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36815 - fscache_stat(&fscache_n_object_lookups_positive);
36816 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
36817
36818 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
36819
36820 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
36821 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36822 } else {
36823 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
36824 - fscache_stat(&fscache_n_object_created);
36825 + fscache_stat_unchecked(&fscache_n_object_created);
36826
36827 object->state = FSCACHE_OBJECT_AVAILABLE;
36828 spin_unlock(&object->lock);
36829 @@ -602,7 +602,7 @@ static void fscache_object_available(str
36830 fscache_enqueue_dependents(object);
36831
36832 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
36833 - fscache_stat(&fscache_n_object_avail);
36834 + fscache_stat_unchecked(&fscache_n_object_avail);
36835
36836 _leave("");
36837 }
36838 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
36839 enum fscache_checkaux result;
36840
36841 if (!object->cookie->def->check_aux) {
36842 - fscache_stat(&fscache_n_checkaux_none);
36843 + fscache_stat_unchecked(&fscache_n_checkaux_none);
36844 return FSCACHE_CHECKAUX_OKAY;
36845 }
36846
36847 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
36848 switch (result) {
36849 /* entry okay as is */
36850 case FSCACHE_CHECKAUX_OKAY:
36851 - fscache_stat(&fscache_n_checkaux_okay);
36852 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
36853 break;
36854
36855 /* entry requires update */
36856 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
36857 - fscache_stat(&fscache_n_checkaux_update);
36858 + fscache_stat_unchecked(&fscache_n_checkaux_update);
36859 break;
36860
36861 /* entry requires deletion */
36862 case FSCACHE_CHECKAUX_OBSOLETE:
36863 - fscache_stat(&fscache_n_checkaux_obsolete);
36864 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
36865 break;
36866
36867 default:
36868 diff -urNp linux-3.0.3/fs/fscache/operation.c linux-3.0.3/fs/fscache/operation.c
36869 --- linux-3.0.3/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
36870 +++ linux-3.0.3/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
36871 @@ -17,7 +17,7 @@
36872 #include <linux/slab.h>
36873 #include "internal.h"
36874
36875 -atomic_t fscache_op_debug_id;
36876 +atomic_unchecked_t fscache_op_debug_id;
36877 EXPORT_SYMBOL(fscache_op_debug_id);
36878
36879 /**
36880 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
36881 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
36882 ASSERTCMP(atomic_read(&op->usage), >, 0);
36883
36884 - fscache_stat(&fscache_n_op_enqueue);
36885 + fscache_stat_unchecked(&fscache_n_op_enqueue);
36886 switch (op->flags & FSCACHE_OP_TYPE) {
36887 case FSCACHE_OP_ASYNC:
36888 _debug("queue async");
36889 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
36890 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
36891 if (op->processor)
36892 fscache_enqueue_operation(op);
36893 - fscache_stat(&fscache_n_op_run);
36894 + fscache_stat_unchecked(&fscache_n_op_run);
36895 }
36896
36897 /*
36898 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
36899 if (object->n_ops > 1) {
36900 atomic_inc(&op->usage);
36901 list_add_tail(&op->pend_link, &object->pending_ops);
36902 - fscache_stat(&fscache_n_op_pend);
36903 + fscache_stat_unchecked(&fscache_n_op_pend);
36904 } else if (!list_empty(&object->pending_ops)) {
36905 atomic_inc(&op->usage);
36906 list_add_tail(&op->pend_link, &object->pending_ops);
36907 - fscache_stat(&fscache_n_op_pend);
36908 + fscache_stat_unchecked(&fscache_n_op_pend);
36909 fscache_start_operations(object);
36910 } else {
36911 ASSERTCMP(object->n_in_progress, ==, 0);
36912 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
36913 object->n_exclusive++; /* reads and writes must wait */
36914 atomic_inc(&op->usage);
36915 list_add_tail(&op->pend_link, &object->pending_ops);
36916 - fscache_stat(&fscache_n_op_pend);
36917 + fscache_stat_unchecked(&fscache_n_op_pend);
36918 ret = 0;
36919 } else {
36920 /* not allowed to submit ops in any other state */
36921 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
36922 if (object->n_exclusive > 0) {
36923 atomic_inc(&op->usage);
36924 list_add_tail(&op->pend_link, &object->pending_ops);
36925 - fscache_stat(&fscache_n_op_pend);
36926 + fscache_stat_unchecked(&fscache_n_op_pend);
36927 } else if (!list_empty(&object->pending_ops)) {
36928 atomic_inc(&op->usage);
36929 list_add_tail(&op->pend_link, &object->pending_ops);
36930 - fscache_stat(&fscache_n_op_pend);
36931 + fscache_stat_unchecked(&fscache_n_op_pend);
36932 fscache_start_operations(object);
36933 } else {
36934 ASSERTCMP(object->n_exclusive, ==, 0);
36935 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
36936 object->n_ops++;
36937 atomic_inc(&op->usage);
36938 list_add_tail(&op->pend_link, &object->pending_ops);
36939 - fscache_stat(&fscache_n_op_pend);
36940 + fscache_stat_unchecked(&fscache_n_op_pend);
36941 ret = 0;
36942 } else if (object->state == FSCACHE_OBJECT_DYING ||
36943 object->state == FSCACHE_OBJECT_LC_DYING ||
36944 object->state == FSCACHE_OBJECT_WITHDRAWING) {
36945 - fscache_stat(&fscache_n_op_rejected);
36946 + fscache_stat_unchecked(&fscache_n_op_rejected);
36947 ret = -ENOBUFS;
36948 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
36949 fscache_report_unexpected_submission(object, op, ostate);
36950 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
36951
36952 ret = -EBUSY;
36953 if (!list_empty(&op->pend_link)) {
36954 - fscache_stat(&fscache_n_op_cancelled);
36955 + fscache_stat_unchecked(&fscache_n_op_cancelled);
36956 list_del_init(&op->pend_link);
36957 object->n_ops--;
36958 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
36959 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
36960 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
36961 BUG();
36962
36963 - fscache_stat(&fscache_n_op_release);
36964 + fscache_stat_unchecked(&fscache_n_op_release);
36965
36966 if (op->release) {
36967 op->release(op);
36968 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
36969 * lock, and defer it otherwise */
36970 if (!spin_trylock(&object->lock)) {
36971 _debug("defer put");
36972 - fscache_stat(&fscache_n_op_deferred_release);
36973 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
36974
36975 cache = object->cache;
36976 spin_lock(&cache->op_gc_list_lock);
36977 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
36978
36979 _debug("GC DEFERRED REL OBJ%x OP%x",
36980 object->debug_id, op->debug_id);
36981 - fscache_stat(&fscache_n_op_gc);
36982 + fscache_stat_unchecked(&fscache_n_op_gc);
36983
36984 ASSERTCMP(atomic_read(&op->usage), ==, 0);
36985
36986 diff -urNp linux-3.0.3/fs/fscache/page.c linux-3.0.3/fs/fscache/page.c
36987 --- linux-3.0.3/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
36988 +++ linux-3.0.3/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
36989 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
36990 val = radix_tree_lookup(&cookie->stores, page->index);
36991 if (!val) {
36992 rcu_read_unlock();
36993 - fscache_stat(&fscache_n_store_vmscan_not_storing);
36994 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
36995 __fscache_uncache_page(cookie, page);
36996 return true;
36997 }
36998 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
36999 spin_unlock(&cookie->stores_lock);
37000
37001 if (xpage) {
37002 - fscache_stat(&fscache_n_store_vmscan_cancelled);
37003 - fscache_stat(&fscache_n_store_radix_deletes);
37004 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37005 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37006 ASSERTCMP(xpage, ==, page);
37007 } else {
37008 - fscache_stat(&fscache_n_store_vmscan_gone);
37009 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37010 }
37011
37012 wake_up_bit(&cookie->flags, 0);
37013 @@ -107,7 +107,7 @@ page_busy:
37014 /* we might want to wait here, but that could deadlock the allocator as
37015 * the work threads writing to the cache may all end up sleeping
37016 * on memory allocation */
37017 - fscache_stat(&fscache_n_store_vmscan_busy);
37018 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37019 return false;
37020 }
37021 EXPORT_SYMBOL(__fscache_maybe_release_page);
37022 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37023 FSCACHE_COOKIE_STORING_TAG);
37024 if (!radix_tree_tag_get(&cookie->stores, page->index,
37025 FSCACHE_COOKIE_PENDING_TAG)) {
37026 - fscache_stat(&fscache_n_store_radix_deletes);
37027 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37028 xpage = radix_tree_delete(&cookie->stores, page->index);
37029 }
37030 spin_unlock(&cookie->stores_lock);
37031 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37032
37033 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37034
37035 - fscache_stat(&fscache_n_attr_changed_calls);
37036 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37037
37038 if (fscache_object_is_active(object)) {
37039 fscache_stat(&fscache_n_cop_attr_changed);
37040 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
37041
37042 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37043
37044 - fscache_stat(&fscache_n_attr_changed);
37045 + fscache_stat_unchecked(&fscache_n_attr_changed);
37046
37047 op = kzalloc(sizeof(*op), GFP_KERNEL);
37048 if (!op) {
37049 - fscache_stat(&fscache_n_attr_changed_nomem);
37050 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37051 _leave(" = -ENOMEM");
37052 return -ENOMEM;
37053 }
37054 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
37055 if (fscache_submit_exclusive_op(object, op) < 0)
37056 goto nobufs;
37057 spin_unlock(&cookie->lock);
37058 - fscache_stat(&fscache_n_attr_changed_ok);
37059 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37060 fscache_put_operation(op);
37061 _leave(" = 0");
37062 return 0;
37063 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
37064 nobufs:
37065 spin_unlock(&cookie->lock);
37066 kfree(op);
37067 - fscache_stat(&fscache_n_attr_changed_nobufs);
37068 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37069 _leave(" = %d", -ENOBUFS);
37070 return -ENOBUFS;
37071 }
37072 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
37073 /* allocate a retrieval operation and attempt to submit it */
37074 op = kzalloc(sizeof(*op), GFP_NOIO);
37075 if (!op) {
37076 - fscache_stat(&fscache_n_retrievals_nomem);
37077 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37078 return NULL;
37079 }
37080
37081 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
37082 return 0;
37083 }
37084
37085 - fscache_stat(&fscache_n_retrievals_wait);
37086 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
37087
37088 jif = jiffies;
37089 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37090 fscache_wait_bit_interruptible,
37091 TASK_INTERRUPTIBLE) != 0) {
37092 - fscache_stat(&fscache_n_retrievals_intr);
37093 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37094 _leave(" = -ERESTARTSYS");
37095 return -ERESTARTSYS;
37096 }
37097 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
37098 */
37099 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37100 struct fscache_retrieval *op,
37101 - atomic_t *stat_op_waits,
37102 - atomic_t *stat_object_dead)
37103 + atomic_unchecked_t *stat_op_waits,
37104 + atomic_unchecked_t *stat_object_dead)
37105 {
37106 int ret;
37107
37108 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
37109 goto check_if_dead;
37110
37111 _debug(">>> WT");
37112 - fscache_stat(stat_op_waits);
37113 + fscache_stat_unchecked(stat_op_waits);
37114 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37115 fscache_wait_bit_interruptible,
37116 TASK_INTERRUPTIBLE) < 0) {
37117 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
37118
37119 check_if_dead:
37120 if (unlikely(fscache_object_is_dead(object))) {
37121 - fscache_stat(stat_object_dead);
37122 + fscache_stat_unchecked(stat_object_dead);
37123 return -ENOBUFS;
37124 }
37125 return 0;
37126 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
37127
37128 _enter("%p,%p,,,", cookie, page);
37129
37130 - fscache_stat(&fscache_n_retrievals);
37131 + fscache_stat_unchecked(&fscache_n_retrievals);
37132
37133 if (hlist_empty(&cookie->backing_objects))
37134 goto nobufs;
37135 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
37136 goto nobufs_unlock;
37137 spin_unlock(&cookie->lock);
37138
37139 - fscache_stat(&fscache_n_retrieval_ops);
37140 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37141
37142 /* pin the netfs read context in case we need to do the actual netfs
37143 * read because we've encountered a cache read failure */
37144 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
37145
37146 error:
37147 if (ret == -ENOMEM)
37148 - fscache_stat(&fscache_n_retrievals_nomem);
37149 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37150 else if (ret == -ERESTARTSYS)
37151 - fscache_stat(&fscache_n_retrievals_intr);
37152 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37153 else if (ret == -ENODATA)
37154 - fscache_stat(&fscache_n_retrievals_nodata);
37155 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37156 else if (ret < 0)
37157 - fscache_stat(&fscache_n_retrievals_nobufs);
37158 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37159 else
37160 - fscache_stat(&fscache_n_retrievals_ok);
37161 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37162
37163 fscache_put_retrieval(op);
37164 _leave(" = %d", ret);
37165 @@ -429,7 +429,7 @@ nobufs_unlock:
37166 spin_unlock(&cookie->lock);
37167 kfree(op);
37168 nobufs:
37169 - fscache_stat(&fscache_n_retrievals_nobufs);
37170 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37171 _leave(" = -ENOBUFS");
37172 return -ENOBUFS;
37173 }
37174 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
37175
37176 _enter("%p,,%d,,,", cookie, *nr_pages);
37177
37178 - fscache_stat(&fscache_n_retrievals);
37179 + fscache_stat_unchecked(&fscache_n_retrievals);
37180
37181 if (hlist_empty(&cookie->backing_objects))
37182 goto nobufs;
37183 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
37184 goto nobufs_unlock;
37185 spin_unlock(&cookie->lock);
37186
37187 - fscache_stat(&fscache_n_retrieval_ops);
37188 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37189
37190 /* pin the netfs read context in case we need to do the actual netfs
37191 * read because we've encountered a cache read failure */
37192 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
37193
37194 error:
37195 if (ret == -ENOMEM)
37196 - fscache_stat(&fscache_n_retrievals_nomem);
37197 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37198 else if (ret == -ERESTARTSYS)
37199 - fscache_stat(&fscache_n_retrievals_intr);
37200 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37201 else if (ret == -ENODATA)
37202 - fscache_stat(&fscache_n_retrievals_nodata);
37203 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37204 else if (ret < 0)
37205 - fscache_stat(&fscache_n_retrievals_nobufs);
37206 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37207 else
37208 - fscache_stat(&fscache_n_retrievals_ok);
37209 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37210
37211 fscache_put_retrieval(op);
37212 _leave(" = %d", ret);
37213 @@ -545,7 +545,7 @@ nobufs_unlock:
37214 spin_unlock(&cookie->lock);
37215 kfree(op);
37216 nobufs:
37217 - fscache_stat(&fscache_n_retrievals_nobufs);
37218 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37219 _leave(" = -ENOBUFS");
37220 return -ENOBUFS;
37221 }
37222 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
37223
37224 _enter("%p,%p,,,", cookie, page);
37225
37226 - fscache_stat(&fscache_n_allocs);
37227 + fscache_stat_unchecked(&fscache_n_allocs);
37228
37229 if (hlist_empty(&cookie->backing_objects))
37230 goto nobufs;
37231 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
37232 goto nobufs_unlock;
37233 spin_unlock(&cookie->lock);
37234
37235 - fscache_stat(&fscache_n_alloc_ops);
37236 + fscache_stat_unchecked(&fscache_n_alloc_ops);
37237
37238 ret = fscache_wait_for_retrieval_activation(
37239 object, op,
37240 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
37241
37242 error:
37243 if (ret == -ERESTARTSYS)
37244 - fscache_stat(&fscache_n_allocs_intr);
37245 + fscache_stat_unchecked(&fscache_n_allocs_intr);
37246 else if (ret < 0)
37247 - fscache_stat(&fscache_n_allocs_nobufs);
37248 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37249 else
37250 - fscache_stat(&fscache_n_allocs_ok);
37251 + fscache_stat_unchecked(&fscache_n_allocs_ok);
37252
37253 fscache_put_retrieval(op);
37254 _leave(" = %d", ret);
37255 @@ -625,7 +625,7 @@ nobufs_unlock:
37256 spin_unlock(&cookie->lock);
37257 kfree(op);
37258 nobufs:
37259 - fscache_stat(&fscache_n_allocs_nobufs);
37260 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37261 _leave(" = -ENOBUFS");
37262 return -ENOBUFS;
37263 }
37264 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
37265
37266 spin_lock(&cookie->stores_lock);
37267
37268 - fscache_stat(&fscache_n_store_calls);
37269 + fscache_stat_unchecked(&fscache_n_store_calls);
37270
37271 /* find a page to store */
37272 page = NULL;
37273 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
37274 page = results[0];
37275 _debug("gang %d [%lx]", n, page->index);
37276 if (page->index > op->store_limit) {
37277 - fscache_stat(&fscache_n_store_pages_over_limit);
37278 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37279 goto superseded;
37280 }
37281
37282 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
37283 spin_unlock(&cookie->stores_lock);
37284 spin_unlock(&object->lock);
37285
37286 - fscache_stat(&fscache_n_store_pages);
37287 + fscache_stat_unchecked(&fscache_n_store_pages);
37288 fscache_stat(&fscache_n_cop_write_page);
37289 ret = object->cache->ops->write_page(op, page);
37290 fscache_stat_d(&fscache_n_cop_write_page);
37291 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
37292 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37293 ASSERT(PageFsCache(page));
37294
37295 - fscache_stat(&fscache_n_stores);
37296 + fscache_stat_unchecked(&fscache_n_stores);
37297
37298 op = kzalloc(sizeof(*op), GFP_NOIO);
37299 if (!op)
37300 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
37301 spin_unlock(&cookie->stores_lock);
37302 spin_unlock(&object->lock);
37303
37304 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37305 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37306 op->store_limit = object->store_limit;
37307
37308 if (fscache_submit_op(object, &op->op) < 0)
37309 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
37310
37311 spin_unlock(&cookie->lock);
37312 radix_tree_preload_end();
37313 - fscache_stat(&fscache_n_store_ops);
37314 - fscache_stat(&fscache_n_stores_ok);
37315 + fscache_stat_unchecked(&fscache_n_store_ops);
37316 + fscache_stat_unchecked(&fscache_n_stores_ok);
37317
37318 /* the work queue now carries its own ref on the object */
37319 fscache_put_operation(&op->op);
37320 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
37321 return 0;
37322
37323 already_queued:
37324 - fscache_stat(&fscache_n_stores_again);
37325 + fscache_stat_unchecked(&fscache_n_stores_again);
37326 already_pending:
37327 spin_unlock(&cookie->stores_lock);
37328 spin_unlock(&object->lock);
37329 spin_unlock(&cookie->lock);
37330 radix_tree_preload_end();
37331 kfree(op);
37332 - fscache_stat(&fscache_n_stores_ok);
37333 + fscache_stat_unchecked(&fscache_n_stores_ok);
37334 _leave(" = 0");
37335 return 0;
37336
37337 @@ -851,14 +851,14 @@ nobufs:
37338 spin_unlock(&cookie->lock);
37339 radix_tree_preload_end();
37340 kfree(op);
37341 - fscache_stat(&fscache_n_stores_nobufs);
37342 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
37343 _leave(" = -ENOBUFS");
37344 return -ENOBUFS;
37345
37346 nomem_free:
37347 kfree(op);
37348 nomem:
37349 - fscache_stat(&fscache_n_stores_oom);
37350 + fscache_stat_unchecked(&fscache_n_stores_oom);
37351 _leave(" = -ENOMEM");
37352 return -ENOMEM;
37353 }
37354 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
37355 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37356 ASSERTCMP(page, !=, NULL);
37357
37358 - fscache_stat(&fscache_n_uncaches);
37359 + fscache_stat_unchecked(&fscache_n_uncaches);
37360
37361 /* cache withdrawal may beat us to it */
37362 if (!PageFsCache(page))
37363 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
37364 unsigned long loop;
37365
37366 #ifdef CONFIG_FSCACHE_STATS
37367 - atomic_add(pagevec->nr, &fscache_n_marks);
37368 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37369 #endif
37370
37371 for (loop = 0; loop < pagevec->nr; loop++) {
37372 diff -urNp linux-3.0.3/fs/fscache/stats.c linux-3.0.3/fs/fscache/stats.c
37373 --- linux-3.0.3/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
37374 +++ linux-3.0.3/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
37375 @@ -18,95 +18,95 @@
37376 /*
37377 * operation counters
37378 */
37379 -atomic_t fscache_n_op_pend;
37380 -atomic_t fscache_n_op_run;
37381 -atomic_t fscache_n_op_enqueue;
37382 -atomic_t fscache_n_op_requeue;
37383 -atomic_t fscache_n_op_deferred_release;
37384 -atomic_t fscache_n_op_release;
37385 -atomic_t fscache_n_op_gc;
37386 -atomic_t fscache_n_op_cancelled;
37387 -atomic_t fscache_n_op_rejected;
37388 -
37389 -atomic_t fscache_n_attr_changed;
37390 -atomic_t fscache_n_attr_changed_ok;
37391 -atomic_t fscache_n_attr_changed_nobufs;
37392 -atomic_t fscache_n_attr_changed_nomem;
37393 -atomic_t fscache_n_attr_changed_calls;
37394 -
37395 -atomic_t fscache_n_allocs;
37396 -atomic_t fscache_n_allocs_ok;
37397 -atomic_t fscache_n_allocs_wait;
37398 -atomic_t fscache_n_allocs_nobufs;
37399 -atomic_t fscache_n_allocs_intr;
37400 -atomic_t fscache_n_allocs_object_dead;
37401 -atomic_t fscache_n_alloc_ops;
37402 -atomic_t fscache_n_alloc_op_waits;
37403 -
37404 -atomic_t fscache_n_retrievals;
37405 -atomic_t fscache_n_retrievals_ok;
37406 -atomic_t fscache_n_retrievals_wait;
37407 -atomic_t fscache_n_retrievals_nodata;
37408 -atomic_t fscache_n_retrievals_nobufs;
37409 -atomic_t fscache_n_retrievals_intr;
37410 -atomic_t fscache_n_retrievals_nomem;
37411 -atomic_t fscache_n_retrievals_object_dead;
37412 -atomic_t fscache_n_retrieval_ops;
37413 -atomic_t fscache_n_retrieval_op_waits;
37414 -
37415 -atomic_t fscache_n_stores;
37416 -atomic_t fscache_n_stores_ok;
37417 -atomic_t fscache_n_stores_again;
37418 -atomic_t fscache_n_stores_nobufs;
37419 -atomic_t fscache_n_stores_oom;
37420 -atomic_t fscache_n_store_ops;
37421 -atomic_t fscache_n_store_calls;
37422 -atomic_t fscache_n_store_pages;
37423 -atomic_t fscache_n_store_radix_deletes;
37424 -atomic_t fscache_n_store_pages_over_limit;
37425 -
37426 -atomic_t fscache_n_store_vmscan_not_storing;
37427 -atomic_t fscache_n_store_vmscan_gone;
37428 -atomic_t fscache_n_store_vmscan_busy;
37429 -atomic_t fscache_n_store_vmscan_cancelled;
37430 -
37431 -atomic_t fscache_n_marks;
37432 -atomic_t fscache_n_uncaches;
37433 -
37434 -atomic_t fscache_n_acquires;
37435 -atomic_t fscache_n_acquires_null;
37436 -atomic_t fscache_n_acquires_no_cache;
37437 -atomic_t fscache_n_acquires_ok;
37438 -atomic_t fscache_n_acquires_nobufs;
37439 -atomic_t fscache_n_acquires_oom;
37440 -
37441 -atomic_t fscache_n_updates;
37442 -atomic_t fscache_n_updates_null;
37443 -atomic_t fscache_n_updates_run;
37444 -
37445 -atomic_t fscache_n_relinquishes;
37446 -atomic_t fscache_n_relinquishes_null;
37447 -atomic_t fscache_n_relinquishes_waitcrt;
37448 -atomic_t fscache_n_relinquishes_retire;
37449 -
37450 -atomic_t fscache_n_cookie_index;
37451 -atomic_t fscache_n_cookie_data;
37452 -atomic_t fscache_n_cookie_special;
37453 -
37454 -atomic_t fscache_n_object_alloc;
37455 -atomic_t fscache_n_object_no_alloc;
37456 -atomic_t fscache_n_object_lookups;
37457 -atomic_t fscache_n_object_lookups_negative;
37458 -atomic_t fscache_n_object_lookups_positive;
37459 -atomic_t fscache_n_object_lookups_timed_out;
37460 -atomic_t fscache_n_object_created;
37461 -atomic_t fscache_n_object_avail;
37462 -atomic_t fscache_n_object_dead;
37463 -
37464 -atomic_t fscache_n_checkaux_none;
37465 -atomic_t fscache_n_checkaux_okay;
37466 -atomic_t fscache_n_checkaux_update;
37467 -atomic_t fscache_n_checkaux_obsolete;
37468 +atomic_unchecked_t fscache_n_op_pend;
37469 +atomic_unchecked_t fscache_n_op_run;
37470 +atomic_unchecked_t fscache_n_op_enqueue;
37471 +atomic_unchecked_t fscache_n_op_requeue;
37472 +atomic_unchecked_t fscache_n_op_deferred_release;
37473 +atomic_unchecked_t fscache_n_op_release;
37474 +atomic_unchecked_t fscache_n_op_gc;
37475 +atomic_unchecked_t fscache_n_op_cancelled;
37476 +atomic_unchecked_t fscache_n_op_rejected;
37477 +
37478 +atomic_unchecked_t fscache_n_attr_changed;
37479 +atomic_unchecked_t fscache_n_attr_changed_ok;
37480 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
37481 +atomic_unchecked_t fscache_n_attr_changed_nomem;
37482 +atomic_unchecked_t fscache_n_attr_changed_calls;
37483 +
37484 +atomic_unchecked_t fscache_n_allocs;
37485 +atomic_unchecked_t fscache_n_allocs_ok;
37486 +atomic_unchecked_t fscache_n_allocs_wait;
37487 +atomic_unchecked_t fscache_n_allocs_nobufs;
37488 +atomic_unchecked_t fscache_n_allocs_intr;
37489 +atomic_unchecked_t fscache_n_allocs_object_dead;
37490 +atomic_unchecked_t fscache_n_alloc_ops;
37491 +atomic_unchecked_t fscache_n_alloc_op_waits;
37492 +
37493 +atomic_unchecked_t fscache_n_retrievals;
37494 +atomic_unchecked_t fscache_n_retrievals_ok;
37495 +atomic_unchecked_t fscache_n_retrievals_wait;
37496 +atomic_unchecked_t fscache_n_retrievals_nodata;
37497 +atomic_unchecked_t fscache_n_retrievals_nobufs;
37498 +atomic_unchecked_t fscache_n_retrievals_intr;
37499 +atomic_unchecked_t fscache_n_retrievals_nomem;
37500 +atomic_unchecked_t fscache_n_retrievals_object_dead;
37501 +atomic_unchecked_t fscache_n_retrieval_ops;
37502 +atomic_unchecked_t fscache_n_retrieval_op_waits;
37503 +
37504 +atomic_unchecked_t fscache_n_stores;
37505 +atomic_unchecked_t fscache_n_stores_ok;
37506 +atomic_unchecked_t fscache_n_stores_again;
37507 +atomic_unchecked_t fscache_n_stores_nobufs;
37508 +atomic_unchecked_t fscache_n_stores_oom;
37509 +atomic_unchecked_t fscache_n_store_ops;
37510 +atomic_unchecked_t fscache_n_store_calls;
37511 +atomic_unchecked_t fscache_n_store_pages;
37512 +atomic_unchecked_t fscache_n_store_radix_deletes;
37513 +atomic_unchecked_t fscache_n_store_pages_over_limit;
37514 +
37515 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37516 +atomic_unchecked_t fscache_n_store_vmscan_gone;
37517 +atomic_unchecked_t fscache_n_store_vmscan_busy;
37518 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37519 +
37520 +atomic_unchecked_t fscache_n_marks;
37521 +atomic_unchecked_t fscache_n_uncaches;
37522 +
37523 +atomic_unchecked_t fscache_n_acquires;
37524 +atomic_unchecked_t fscache_n_acquires_null;
37525 +atomic_unchecked_t fscache_n_acquires_no_cache;
37526 +atomic_unchecked_t fscache_n_acquires_ok;
37527 +atomic_unchecked_t fscache_n_acquires_nobufs;
37528 +atomic_unchecked_t fscache_n_acquires_oom;
37529 +
37530 +atomic_unchecked_t fscache_n_updates;
37531 +atomic_unchecked_t fscache_n_updates_null;
37532 +atomic_unchecked_t fscache_n_updates_run;
37533 +
37534 +atomic_unchecked_t fscache_n_relinquishes;
37535 +atomic_unchecked_t fscache_n_relinquishes_null;
37536 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37537 +atomic_unchecked_t fscache_n_relinquishes_retire;
37538 +
37539 +atomic_unchecked_t fscache_n_cookie_index;
37540 +atomic_unchecked_t fscache_n_cookie_data;
37541 +atomic_unchecked_t fscache_n_cookie_special;
37542 +
37543 +atomic_unchecked_t fscache_n_object_alloc;
37544 +atomic_unchecked_t fscache_n_object_no_alloc;
37545 +atomic_unchecked_t fscache_n_object_lookups;
37546 +atomic_unchecked_t fscache_n_object_lookups_negative;
37547 +atomic_unchecked_t fscache_n_object_lookups_positive;
37548 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
37549 +atomic_unchecked_t fscache_n_object_created;
37550 +atomic_unchecked_t fscache_n_object_avail;
37551 +atomic_unchecked_t fscache_n_object_dead;
37552 +
37553 +atomic_unchecked_t fscache_n_checkaux_none;
37554 +atomic_unchecked_t fscache_n_checkaux_okay;
37555 +atomic_unchecked_t fscache_n_checkaux_update;
37556 +atomic_unchecked_t fscache_n_checkaux_obsolete;
37557
37558 atomic_t fscache_n_cop_alloc_object;
37559 atomic_t fscache_n_cop_lookup_object;
37560 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37561 seq_puts(m, "FS-Cache statistics\n");
37562
37563 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37564 - atomic_read(&fscache_n_cookie_index),
37565 - atomic_read(&fscache_n_cookie_data),
37566 - atomic_read(&fscache_n_cookie_special));
37567 + atomic_read_unchecked(&fscache_n_cookie_index),
37568 + atomic_read_unchecked(&fscache_n_cookie_data),
37569 + atomic_read_unchecked(&fscache_n_cookie_special));
37570
37571 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37572 - atomic_read(&fscache_n_object_alloc),
37573 - atomic_read(&fscache_n_object_no_alloc),
37574 - atomic_read(&fscache_n_object_avail),
37575 - atomic_read(&fscache_n_object_dead));
37576 + atomic_read_unchecked(&fscache_n_object_alloc),
37577 + atomic_read_unchecked(&fscache_n_object_no_alloc),
37578 + atomic_read_unchecked(&fscache_n_object_avail),
37579 + atomic_read_unchecked(&fscache_n_object_dead));
37580 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37581 - atomic_read(&fscache_n_checkaux_none),
37582 - atomic_read(&fscache_n_checkaux_okay),
37583 - atomic_read(&fscache_n_checkaux_update),
37584 - atomic_read(&fscache_n_checkaux_obsolete));
37585 + atomic_read_unchecked(&fscache_n_checkaux_none),
37586 + atomic_read_unchecked(&fscache_n_checkaux_okay),
37587 + atomic_read_unchecked(&fscache_n_checkaux_update),
37588 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37589
37590 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37591 - atomic_read(&fscache_n_marks),
37592 - atomic_read(&fscache_n_uncaches));
37593 + atomic_read_unchecked(&fscache_n_marks),
37594 + atomic_read_unchecked(&fscache_n_uncaches));
37595
37596 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37597 " oom=%u\n",
37598 - atomic_read(&fscache_n_acquires),
37599 - atomic_read(&fscache_n_acquires_null),
37600 - atomic_read(&fscache_n_acquires_no_cache),
37601 - atomic_read(&fscache_n_acquires_ok),
37602 - atomic_read(&fscache_n_acquires_nobufs),
37603 - atomic_read(&fscache_n_acquires_oom));
37604 + atomic_read_unchecked(&fscache_n_acquires),
37605 + atomic_read_unchecked(&fscache_n_acquires_null),
37606 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
37607 + atomic_read_unchecked(&fscache_n_acquires_ok),
37608 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
37609 + atomic_read_unchecked(&fscache_n_acquires_oom));
37610
37611 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37612 - atomic_read(&fscache_n_object_lookups),
37613 - atomic_read(&fscache_n_object_lookups_negative),
37614 - atomic_read(&fscache_n_object_lookups_positive),
37615 - atomic_read(&fscache_n_object_created),
37616 - atomic_read(&fscache_n_object_lookups_timed_out));
37617 + atomic_read_unchecked(&fscache_n_object_lookups),
37618 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
37619 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
37620 + atomic_read_unchecked(&fscache_n_object_created),
37621 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37622
37623 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37624 - atomic_read(&fscache_n_updates),
37625 - atomic_read(&fscache_n_updates_null),
37626 - atomic_read(&fscache_n_updates_run));
37627 + atomic_read_unchecked(&fscache_n_updates),
37628 + atomic_read_unchecked(&fscache_n_updates_null),
37629 + atomic_read_unchecked(&fscache_n_updates_run));
37630
37631 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37632 - atomic_read(&fscache_n_relinquishes),
37633 - atomic_read(&fscache_n_relinquishes_null),
37634 - atomic_read(&fscache_n_relinquishes_waitcrt),
37635 - atomic_read(&fscache_n_relinquishes_retire));
37636 + atomic_read_unchecked(&fscache_n_relinquishes),
37637 + atomic_read_unchecked(&fscache_n_relinquishes_null),
37638 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37639 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
37640
37641 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37642 - atomic_read(&fscache_n_attr_changed),
37643 - atomic_read(&fscache_n_attr_changed_ok),
37644 - atomic_read(&fscache_n_attr_changed_nobufs),
37645 - atomic_read(&fscache_n_attr_changed_nomem),
37646 - atomic_read(&fscache_n_attr_changed_calls));
37647 + atomic_read_unchecked(&fscache_n_attr_changed),
37648 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
37649 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37650 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37651 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
37652
37653 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37654 - atomic_read(&fscache_n_allocs),
37655 - atomic_read(&fscache_n_allocs_ok),
37656 - atomic_read(&fscache_n_allocs_wait),
37657 - atomic_read(&fscache_n_allocs_nobufs),
37658 - atomic_read(&fscache_n_allocs_intr));
37659 + atomic_read_unchecked(&fscache_n_allocs),
37660 + atomic_read_unchecked(&fscache_n_allocs_ok),
37661 + atomic_read_unchecked(&fscache_n_allocs_wait),
37662 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
37663 + atomic_read_unchecked(&fscache_n_allocs_intr));
37664 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37665 - atomic_read(&fscache_n_alloc_ops),
37666 - atomic_read(&fscache_n_alloc_op_waits),
37667 - atomic_read(&fscache_n_allocs_object_dead));
37668 + atomic_read_unchecked(&fscache_n_alloc_ops),
37669 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
37670 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
37671
37672 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37673 " int=%u oom=%u\n",
37674 - atomic_read(&fscache_n_retrievals),
37675 - atomic_read(&fscache_n_retrievals_ok),
37676 - atomic_read(&fscache_n_retrievals_wait),
37677 - atomic_read(&fscache_n_retrievals_nodata),
37678 - atomic_read(&fscache_n_retrievals_nobufs),
37679 - atomic_read(&fscache_n_retrievals_intr),
37680 - atomic_read(&fscache_n_retrievals_nomem));
37681 + atomic_read_unchecked(&fscache_n_retrievals),
37682 + atomic_read_unchecked(&fscache_n_retrievals_ok),
37683 + atomic_read_unchecked(&fscache_n_retrievals_wait),
37684 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
37685 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37686 + atomic_read_unchecked(&fscache_n_retrievals_intr),
37687 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
37688 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37689 - atomic_read(&fscache_n_retrieval_ops),
37690 - atomic_read(&fscache_n_retrieval_op_waits),
37691 - atomic_read(&fscache_n_retrievals_object_dead));
37692 + atomic_read_unchecked(&fscache_n_retrieval_ops),
37693 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37694 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37695
37696 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37697 - atomic_read(&fscache_n_stores),
37698 - atomic_read(&fscache_n_stores_ok),
37699 - atomic_read(&fscache_n_stores_again),
37700 - atomic_read(&fscache_n_stores_nobufs),
37701 - atomic_read(&fscache_n_stores_oom));
37702 + atomic_read_unchecked(&fscache_n_stores),
37703 + atomic_read_unchecked(&fscache_n_stores_ok),
37704 + atomic_read_unchecked(&fscache_n_stores_again),
37705 + atomic_read_unchecked(&fscache_n_stores_nobufs),
37706 + atomic_read_unchecked(&fscache_n_stores_oom));
37707 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37708 - atomic_read(&fscache_n_store_ops),
37709 - atomic_read(&fscache_n_store_calls),
37710 - atomic_read(&fscache_n_store_pages),
37711 - atomic_read(&fscache_n_store_radix_deletes),
37712 - atomic_read(&fscache_n_store_pages_over_limit));
37713 + atomic_read_unchecked(&fscache_n_store_ops),
37714 + atomic_read_unchecked(&fscache_n_store_calls),
37715 + atomic_read_unchecked(&fscache_n_store_pages),
37716 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
37717 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37718
37719 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37720 - atomic_read(&fscache_n_store_vmscan_not_storing),
37721 - atomic_read(&fscache_n_store_vmscan_gone),
37722 - atomic_read(&fscache_n_store_vmscan_busy),
37723 - atomic_read(&fscache_n_store_vmscan_cancelled));
37724 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37725 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37726 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37727 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37728
37729 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37730 - atomic_read(&fscache_n_op_pend),
37731 - atomic_read(&fscache_n_op_run),
37732 - atomic_read(&fscache_n_op_enqueue),
37733 - atomic_read(&fscache_n_op_cancelled),
37734 - atomic_read(&fscache_n_op_rejected));
37735 + atomic_read_unchecked(&fscache_n_op_pend),
37736 + atomic_read_unchecked(&fscache_n_op_run),
37737 + atomic_read_unchecked(&fscache_n_op_enqueue),
37738 + atomic_read_unchecked(&fscache_n_op_cancelled),
37739 + atomic_read_unchecked(&fscache_n_op_rejected));
37740 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37741 - atomic_read(&fscache_n_op_deferred_release),
37742 - atomic_read(&fscache_n_op_release),
37743 - atomic_read(&fscache_n_op_gc));
37744 + atomic_read_unchecked(&fscache_n_op_deferred_release),
37745 + atomic_read_unchecked(&fscache_n_op_release),
37746 + atomic_read_unchecked(&fscache_n_op_gc));
37747
37748 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37749 atomic_read(&fscache_n_cop_alloc_object),
37750 diff -urNp linux-3.0.3/fs/fs_struct.c linux-3.0.3/fs/fs_struct.c
37751 --- linux-3.0.3/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
37752 +++ linux-3.0.3/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
37753 @@ -4,6 +4,7 @@
37754 #include <linux/path.h>
37755 #include <linux/slab.h>
37756 #include <linux/fs_struct.h>
37757 +#include <linux/grsecurity.h>
37758 #include "internal.h"
37759
37760 static inline void path_get_longterm(struct path *path)
37761 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37762 old_root = fs->root;
37763 fs->root = *path;
37764 path_get_longterm(path);
37765 + gr_set_chroot_entries(current, path);
37766 write_seqcount_end(&fs->seq);
37767 spin_unlock(&fs->lock);
37768 if (old_root.dentry)
37769 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37770 && fs->root.mnt == old_root->mnt) {
37771 path_get_longterm(new_root);
37772 fs->root = *new_root;
37773 + gr_set_chroot_entries(p, new_root);
37774 count++;
37775 }
37776 if (fs->pwd.dentry == old_root->dentry
37777 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
37778 spin_lock(&fs->lock);
37779 write_seqcount_begin(&fs->seq);
37780 tsk->fs = NULL;
37781 - kill = !--fs->users;
37782 + gr_clear_chroot_entries(tsk);
37783 + kill = !atomic_dec_return(&fs->users);
37784 write_seqcount_end(&fs->seq);
37785 spin_unlock(&fs->lock);
37786 task_unlock(tsk);
37787 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
37788 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
37789 /* We don't need to lock fs - think why ;-) */
37790 if (fs) {
37791 - fs->users = 1;
37792 + atomic_set(&fs->users, 1);
37793 fs->in_exec = 0;
37794 spin_lock_init(&fs->lock);
37795 seqcount_init(&fs->seq);
37796 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
37797 spin_lock(&old->lock);
37798 fs->root = old->root;
37799 path_get_longterm(&fs->root);
37800 + /* instead of calling gr_set_chroot_entries here,
37801 + we call it from every caller of this function
37802 + */
37803 fs->pwd = old->pwd;
37804 path_get_longterm(&fs->pwd);
37805 spin_unlock(&old->lock);
37806 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
37807
37808 task_lock(current);
37809 spin_lock(&fs->lock);
37810 - kill = !--fs->users;
37811 + kill = !atomic_dec_return(&fs->users);
37812 current->fs = new_fs;
37813 + gr_set_chroot_entries(current, &new_fs->root);
37814 spin_unlock(&fs->lock);
37815 task_unlock(current);
37816
37817 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
37818
37819 /* to be mentioned only in INIT_TASK */
37820 struct fs_struct init_fs = {
37821 - .users = 1,
37822 + .users = ATOMIC_INIT(1),
37823 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
37824 .seq = SEQCNT_ZERO,
37825 .umask = 0022,
37826 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
37827 task_lock(current);
37828
37829 spin_lock(&init_fs.lock);
37830 - init_fs.users++;
37831 + atomic_inc(&init_fs.users);
37832 spin_unlock(&init_fs.lock);
37833
37834 spin_lock(&fs->lock);
37835 current->fs = &init_fs;
37836 - kill = !--fs->users;
37837 + gr_set_chroot_entries(current, &current->fs->root);
37838 + kill = !atomic_dec_return(&fs->users);
37839 spin_unlock(&fs->lock);
37840
37841 task_unlock(current);
37842 diff -urNp linux-3.0.3/fs/fuse/cuse.c linux-3.0.3/fs/fuse/cuse.c
37843 --- linux-3.0.3/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
37844 +++ linux-3.0.3/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
37845 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
37846 INIT_LIST_HEAD(&cuse_conntbl[i]);
37847
37848 /* inherit and extend fuse_dev_operations */
37849 - cuse_channel_fops = fuse_dev_operations;
37850 - cuse_channel_fops.owner = THIS_MODULE;
37851 - cuse_channel_fops.open = cuse_channel_open;
37852 - cuse_channel_fops.release = cuse_channel_release;
37853 + pax_open_kernel();
37854 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
37855 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
37856 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
37857 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
37858 + pax_close_kernel();
37859
37860 cuse_class = class_create(THIS_MODULE, "cuse");
37861 if (IS_ERR(cuse_class))
37862 diff -urNp linux-3.0.3/fs/fuse/dev.c linux-3.0.3/fs/fuse/dev.c
37863 --- linux-3.0.3/fs/fuse/dev.c 2011-07-21 22:17:23.000000000 -0400
37864 +++ linux-3.0.3/fs/fuse/dev.c 2011-08-23 21:47:56.000000000 -0400
37865 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
37866 ret = 0;
37867 pipe_lock(pipe);
37868
37869 - if (!pipe->readers) {
37870 + if (!atomic_read(&pipe->readers)) {
37871 send_sig(SIGPIPE, current, 0);
37872 if (!ret)
37873 ret = -EPIPE;
37874 diff -urNp linux-3.0.3/fs/fuse/dir.c linux-3.0.3/fs/fuse/dir.c
37875 --- linux-3.0.3/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
37876 +++ linux-3.0.3/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
37877 @@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
37878 return link;
37879 }
37880
37881 -static void free_link(char *link)
37882 +static void free_link(const char *link)
37883 {
37884 if (!IS_ERR(link))
37885 free_page((unsigned long) link);
37886 diff -urNp linux-3.0.3/fs/gfs2/inode.c linux-3.0.3/fs/gfs2/inode.c
37887 --- linux-3.0.3/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
37888 +++ linux-3.0.3/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
37889 @@ -1525,7 +1525,7 @@ out:
37890
37891 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
37892 {
37893 - char *s = nd_get_link(nd);
37894 + const char *s = nd_get_link(nd);
37895 if (!IS_ERR(s))
37896 kfree(s);
37897 }
37898 diff -urNp linux-3.0.3/fs/hfsplus/catalog.c linux-3.0.3/fs/hfsplus/catalog.c
37899 --- linux-3.0.3/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
37900 +++ linux-3.0.3/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
37901 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
37902 int err;
37903 u16 type;
37904
37905 + pax_track_stack();
37906 +
37907 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
37908 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
37909 if (err)
37910 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
37911 int entry_size;
37912 int err;
37913
37914 + pax_track_stack();
37915 +
37916 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
37917 str->name, cnid, inode->i_nlink);
37918 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
37919 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
37920 int entry_size, type;
37921 int err = 0;
37922
37923 + pax_track_stack();
37924 +
37925 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
37926 cnid, src_dir->i_ino, src_name->name,
37927 dst_dir->i_ino, dst_name->name);
37928 diff -urNp linux-3.0.3/fs/hfsplus/dir.c linux-3.0.3/fs/hfsplus/dir.c
37929 --- linux-3.0.3/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
37930 +++ linux-3.0.3/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
37931 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
37932 struct hfsplus_readdir_data *rd;
37933 u16 type;
37934
37935 + pax_track_stack();
37936 +
37937 if (filp->f_pos >= inode->i_size)
37938 return 0;
37939
37940 diff -urNp linux-3.0.3/fs/hfsplus/inode.c linux-3.0.3/fs/hfsplus/inode.c
37941 --- linux-3.0.3/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
37942 +++ linux-3.0.3/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
37943 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
37944 int res = 0;
37945 u16 type;
37946
37947 + pax_track_stack();
37948 +
37949 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
37950
37951 HFSPLUS_I(inode)->linkid = 0;
37952 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
37953 struct hfs_find_data fd;
37954 hfsplus_cat_entry entry;
37955
37956 + pax_track_stack();
37957 +
37958 if (HFSPLUS_IS_RSRC(inode))
37959 main_inode = HFSPLUS_I(inode)->rsrc_inode;
37960
37961 diff -urNp linux-3.0.3/fs/hfsplus/ioctl.c linux-3.0.3/fs/hfsplus/ioctl.c
37962 --- linux-3.0.3/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
37963 +++ linux-3.0.3/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
37964 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
37965 struct hfsplus_cat_file *file;
37966 int res;
37967
37968 + pax_track_stack();
37969 +
37970 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
37971 return -EOPNOTSUPP;
37972
37973 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
37974 struct hfsplus_cat_file *file;
37975 ssize_t res = 0;
37976
37977 + pax_track_stack();
37978 +
37979 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
37980 return -EOPNOTSUPP;
37981
37982 diff -urNp linux-3.0.3/fs/hfsplus/super.c linux-3.0.3/fs/hfsplus/super.c
37983 --- linux-3.0.3/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
37984 +++ linux-3.0.3/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
37985 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
37986 struct nls_table *nls = NULL;
37987 int err;
37988
37989 + pax_track_stack();
37990 +
37991 err = -EINVAL;
37992 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
37993 if (!sbi)
37994 diff -urNp linux-3.0.3/fs/hugetlbfs/inode.c linux-3.0.3/fs/hugetlbfs/inode.c
37995 --- linux-3.0.3/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
37996 +++ linux-3.0.3/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
37997 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
37998 .kill_sb = kill_litter_super,
37999 };
38000
38001 -static struct vfsmount *hugetlbfs_vfsmount;
38002 +struct vfsmount *hugetlbfs_vfsmount;
38003
38004 static int can_do_hugetlb_shm(void)
38005 {
38006 diff -urNp linux-3.0.3/fs/inode.c linux-3.0.3/fs/inode.c
38007 --- linux-3.0.3/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
38008 +++ linux-3.0.3/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
38009 @@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
38010
38011 #ifdef CONFIG_SMP
38012 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38013 - static atomic_t shared_last_ino;
38014 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38015 + static atomic_unchecked_t shared_last_ino;
38016 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38017
38018 res = next - LAST_INO_BATCH;
38019 }
38020 diff -urNp linux-3.0.3/fs/jbd/checkpoint.c linux-3.0.3/fs/jbd/checkpoint.c
38021 --- linux-3.0.3/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
38022 +++ linux-3.0.3/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
38023 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38024 tid_t this_tid;
38025 int result;
38026
38027 + pax_track_stack();
38028 +
38029 jbd_debug(1, "Start checkpoint\n");
38030
38031 /*
38032 diff -urNp linux-3.0.3/fs/jffs2/compr_rtime.c linux-3.0.3/fs/jffs2/compr_rtime.c
38033 --- linux-3.0.3/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
38034 +++ linux-3.0.3/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
38035 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38036 int outpos = 0;
38037 int pos=0;
38038
38039 + pax_track_stack();
38040 +
38041 memset(positions,0,sizeof(positions));
38042
38043 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38044 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38045 int outpos = 0;
38046 int pos=0;
38047
38048 + pax_track_stack();
38049 +
38050 memset(positions,0,sizeof(positions));
38051
38052 while (outpos<destlen) {
38053 diff -urNp linux-3.0.3/fs/jffs2/compr_rubin.c linux-3.0.3/fs/jffs2/compr_rubin.c
38054 --- linux-3.0.3/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
38055 +++ linux-3.0.3/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
38056 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38057 int ret;
38058 uint32_t mysrclen, mydstlen;
38059
38060 + pax_track_stack();
38061 +
38062 mysrclen = *sourcelen;
38063 mydstlen = *dstlen - 8;
38064
38065 diff -urNp linux-3.0.3/fs/jffs2/erase.c linux-3.0.3/fs/jffs2/erase.c
38066 --- linux-3.0.3/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
38067 +++ linux-3.0.3/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
38068 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38069 struct jffs2_unknown_node marker = {
38070 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38071 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38072 - .totlen = cpu_to_je32(c->cleanmarker_size)
38073 + .totlen = cpu_to_je32(c->cleanmarker_size),
38074 + .hdr_crc = cpu_to_je32(0)
38075 };
38076
38077 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38078 diff -urNp linux-3.0.3/fs/jffs2/wbuf.c linux-3.0.3/fs/jffs2/wbuf.c
38079 --- linux-3.0.3/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
38080 +++ linux-3.0.3/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
38081 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38082 {
38083 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38084 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38085 - .totlen = constant_cpu_to_je32(8)
38086 + .totlen = constant_cpu_to_je32(8),
38087 + .hdr_crc = constant_cpu_to_je32(0)
38088 };
38089
38090 /*
38091 diff -urNp linux-3.0.3/fs/jffs2/xattr.c linux-3.0.3/fs/jffs2/xattr.c
38092 --- linux-3.0.3/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
38093 +++ linux-3.0.3/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
38094 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38095
38096 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38097
38098 + pax_track_stack();
38099 +
38100 /* Phase.1 : Merge same xref */
38101 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38102 xref_tmphash[i] = NULL;
38103 diff -urNp linux-3.0.3/fs/jfs/super.c linux-3.0.3/fs/jfs/super.c
38104 --- linux-3.0.3/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
38105 +++ linux-3.0.3/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
38106 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38107
38108 jfs_inode_cachep =
38109 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38110 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38111 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38112 init_once);
38113 if (jfs_inode_cachep == NULL)
38114 return -ENOMEM;
38115 diff -urNp linux-3.0.3/fs/Kconfig.binfmt linux-3.0.3/fs/Kconfig.binfmt
38116 --- linux-3.0.3/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
38117 +++ linux-3.0.3/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
38118 @@ -86,7 +86,7 @@ config HAVE_AOUT
38119
38120 config BINFMT_AOUT
38121 tristate "Kernel support for a.out and ECOFF binaries"
38122 - depends on HAVE_AOUT
38123 + depends on HAVE_AOUT && BROKEN
38124 ---help---
38125 A.out (Assembler.OUTput) is a set of formats for libraries and
38126 executables used in the earliest versions of UNIX. Linux used
38127 diff -urNp linux-3.0.3/fs/libfs.c linux-3.0.3/fs/libfs.c
38128 --- linux-3.0.3/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
38129 +++ linux-3.0.3/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
38130 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38131
38132 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38133 struct dentry *next;
38134 + char d_name[sizeof(next->d_iname)];
38135 + const unsigned char *name;
38136 +
38137 next = list_entry(p, struct dentry, d_u.d_child);
38138 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38139 if (!simple_positive(next)) {
38140 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38141
38142 spin_unlock(&next->d_lock);
38143 spin_unlock(&dentry->d_lock);
38144 - if (filldir(dirent, next->d_name.name,
38145 + name = next->d_name.name;
38146 + if (name == next->d_iname) {
38147 + memcpy(d_name, name, next->d_name.len);
38148 + name = d_name;
38149 + }
38150 + if (filldir(dirent, name,
38151 next->d_name.len, filp->f_pos,
38152 next->d_inode->i_ino,
38153 dt_type(next->d_inode)) < 0)
38154 diff -urNp linux-3.0.3/fs/lockd/clntproc.c linux-3.0.3/fs/lockd/clntproc.c
38155 --- linux-3.0.3/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
38156 +++ linux-3.0.3/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
38157 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38158 /*
38159 * Cookie counter for NLM requests
38160 */
38161 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38162 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38163
38164 void nlmclnt_next_cookie(struct nlm_cookie *c)
38165 {
38166 - u32 cookie = atomic_inc_return(&nlm_cookie);
38167 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38168
38169 memcpy(c->data, &cookie, 4);
38170 c->len=4;
38171 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38172 struct nlm_rqst reqst, *req;
38173 int status;
38174
38175 + pax_track_stack();
38176 +
38177 req = &reqst;
38178 memset(req, 0, sizeof(*req));
38179 locks_init_lock(&req->a_args.lock.fl);
38180 diff -urNp linux-3.0.3/fs/locks.c linux-3.0.3/fs/locks.c
38181 --- linux-3.0.3/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
38182 +++ linux-3.0.3/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
38183 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38184 return;
38185
38186 if (filp->f_op && filp->f_op->flock) {
38187 - struct file_lock fl = {
38188 + struct file_lock flock = {
38189 .fl_pid = current->tgid,
38190 .fl_file = filp,
38191 .fl_flags = FL_FLOCK,
38192 .fl_type = F_UNLCK,
38193 .fl_end = OFFSET_MAX,
38194 };
38195 - filp->f_op->flock(filp, F_SETLKW, &fl);
38196 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
38197 - fl.fl_ops->fl_release_private(&fl);
38198 + filp->f_op->flock(filp, F_SETLKW, &flock);
38199 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
38200 + flock.fl_ops->fl_release_private(&flock);
38201 }
38202
38203 lock_flocks();
38204 diff -urNp linux-3.0.3/fs/logfs/super.c linux-3.0.3/fs/logfs/super.c
38205 --- linux-3.0.3/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
38206 +++ linux-3.0.3/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
38207 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38208 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38209 int err, valid0, valid1;
38210
38211 + pax_track_stack();
38212 +
38213 /* read first superblock */
38214 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38215 if (err)
38216 diff -urNp linux-3.0.3/fs/namei.c linux-3.0.3/fs/namei.c
38217 --- linux-3.0.3/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
38218 +++ linux-3.0.3/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
38219 @@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
38220 return ret;
38221
38222 /*
38223 - * Read/write DACs are always overridable.
38224 - * Executable DACs are overridable for all directories and
38225 - * for non-directories that have least one exec bit set.
38226 + * Searching includes executable on directories, else just read.
38227 */
38228 - if (!(mask & MAY_EXEC) || execute_ok(inode))
38229 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38230 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38231 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38232 +#ifdef CONFIG_GRKERNSEC
38233 + if (flags & IPERM_FLAG_RCU)
38234 + return -ECHILD;
38235 +#endif
38236 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38237 return 0;
38238 + }
38239
38240 /*
38241 - * Searching includes executable on directories, else just read.
38242 + * Read/write DACs are always overridable.
38243 + * Executable DACs are overridable for all directories and
38244 + * for non-directories that have least one exec bit set.
38245 */
38246 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38247 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38248 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38249 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38250 +#ifdef CONFIG_GRKERNSEC
38251 + if (flags & IPERM_FLAG_RCU)
38252 + return -ECHILD;
38253 +#endif
38254 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38255 return 0;
38256 + }
38257
38258 return -EACCES;
38259 }
38260 @@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
38261 br_read_unlock(vfsmount_lock);
38262 }
38263
38264 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38265 + return -ENOENT;
38266 +
38267 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38268 return 0;
38269
38270 @@ -593,9 +606,16 @@ static inline int exec_permission(struct
38271 if (ret == -ECHILD)
38272 return ret;
38273
38274 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38275 - ns_capable(ns, CAP_DAC_READ_SEARCH))
38276 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38277 goto ok;
38278 + else {
38279 +#ifdef CONFIG_GRKERNSEC
38280 + if (flags & IPERM_FLAG_RCU)
38281 + return -ECHILD;
38282 +#endif
38283 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38284 + goto ok;
38285 + }
38286
38287 return ret;
38288 ok:
38289 @@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
38290 return error;
38291 }
38292
38293 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
38294 + dentry->d_inode, dentry, nd->path.mnt)) {
38295 + error = -EACCES;
38296 + *p = ERR_PTR(error); /* no ->put_link(), please */
38297 + path_put(&nd->path);
38298 + return error;
38299 + }
38300 +
38301 nd->last_type = LAST_BIND;
38302 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38303 error = PTR_ERR(*p);
38304 if (!IS_ERR(*p)) {
38305 - char *s = nd_get_link(nd);
38306 + const char *s = nd_get_link(nd);
38307 error = 0;
38308 if (s)
38309 error = __vfs_follow_link(nd, s);
38310 @@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
38311 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38312
38313 if (likely(!retval)) {
38314 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38315 + return -ENOENT;
38316 +
38317 if (unlikely(!audit_dummy_context())) {
38318 if (nd->path.dentry && nd->inode)
38319 audit_inode(name, nd->path.dentry);
38320 @@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
38321 return error;
38322 }
38323
38324 +/*
38325 + * Note that while the flag value (low two bits) for sys_open means:
38326 + * 00 - read-only
38327 + * 01 - write-only
38328 + * 10 - read-write
38329 + * 11 - special
38330 + * it is changed into
38331 + * 00 - no permissions needed
38332 + * 01 - read-permission
38333 + * 10 - write-permission
38334 + * 11 - read-write
38335 + * for the internal routines (ie open_namei()/follow_link() etc)
38336 + * This is more logical, and also allows the 00 "no perm needed"
38337 + * to be used for symlinks (where the permissions are checked
38338 + * later).
38339 + *
38340 +*/
38341 +static inline int open_to_namei_flags(int flag)
38342 +{
38343 + if ((flag+1) & O_ACCMODE)
38344 + flag++;
38345 + return flag;
38346 +}
38347 +
38348 static int may_open(struct path *path, int acc_mode, int flag)
38349 {
38350 struct dentry *dentry = path->dentry;
38351 @@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
38352 /*
38353 * Ensure there are no outstanding leases on the file.
38354 */
38355 - return break_lease(inode, flag);
38356 + error = break_lease(inode, flag);
38357 +
38358 + if (error)
38359 + return error;
38360 +
38361 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38362 + error = -EPERM;
38363 + goto exit;
38364 + }
38365 +
38366 + if (gr_handle_rawio(inode)) {
38367 + error = -EPERM;
38368 + goto exit;
38369 + }
38370 +
38371 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38372 + error = -EACCES;
38373 + goto exit;
38374 + }
38375 +exit:
38376 + return error;
38377 }
38378
38379 static int handle_truncate(struct file *filp)
38380 @@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
38381 }
38382
38383 /*
38384 - * Note that while the flag value (low two bits) for sys_open means:
38385 - * 00 - read-only
38386 - * 01 - write-only
38387 - * 10 - read-write
38388 - * 11 - special
38389 - * it is changed into
38390 - * 00 - no permissions needed
38391 - * 01 - read-permission
38392 - * 10 - write-permission
38393 - * 11 - read-write
38394 - * for the internal routines (ie open_namei()/follow_link() etc)
38395 - * This is more logical, and also allows the 00 "no perm needed"
38396 - * to be used for symlinks (where the permissions are checked
38397 - * later).
38398 - *
38399 -*/
38400 -static inline int open_to_namei_flags(int flag)
38401 -{
38402 - if ((flag+1) & O_ACCMODE)
38403 - flag++;
38404 - return flag;
38405 -}
38406 -
38407 -/*
38408 * Handle the last step of open()
38409 */
38410 static struct file *do_last(struct nameidata *nd, struct path *path,
38411 @@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
38412 struct dentry *dir = nd->path.dentry;
38413 struct dentry *dentry;
38414 int open_flag = op->open_flag;
38415 + int flag = open_to_namei_flags(open_flag);
38416 int will_truncate = open_flag & O_TRUNC;
38417 int want_write = 0;
38418 int acc_mode = op->acc_mode;
38419 @@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
38420 /* Negative dentry, just create the file */
38421 if (!dentry->d_inode) {
38422 int mode = op->mode;
38423 +
38424 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38425 + error = -EACCES;
38426 + goto exit_mutex_unlock;
38427 + }
38428 +
38429 if (!IS_POSIXACL(dir->d_inode))
38430 mode &= ~current_umask();
38431 /*
38432 @@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
38433 error = vfs_create(dir->d_inode, dentry, mode, nd);
38434 if (error)
38435 goto exit_mutex_unlock;
38436 + else
38437 + gr_handle_create(path->dentry, path->mnt);
38438 mutex_unlock(&dir->d_inode->i_mutex);
38439 dput(nd->path.dentry);
38440 nd->path.dentry = dentry;
38441 @@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
38442 /*
38443 * It already exists.
38444 */
38445 +
38446 + /* only check if O_CREAT is specified, all other checks need to go
38447 + into may_open */
38448 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38449 + error = -EACCES;
38450 + goto exit_mutex_unlock;
38451 + }
38452 +
38453 mutex_unlock(&dir->d_inode->i_mutex);
38454 audit_inode(pathname, path->dentry);
38455
38456 @@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38457 error = may_mknod(mode);
38458 if (error)
38459 goto out_dput;
38460 +
38461 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38462 + error = -EPERM;
38463 + goto out_dput;
38464 + }
38465 +
38466 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38467 + error = -EACCES;
38468 + goto out_dput;
38469 + }
38470 +
38471 error = mnt_want_write(nd.path.mnt);
38472 if (error)
38473 goto out_dput;
38474 @@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38475 }
38476 out_drop_write:
38477 mnt_drop_write(nd.path.mnt);
38478 +
38479 + if (!error)
38480 + gr_handle_create(dentry, nd.path.mnt);
38481 out_dput:
38482 dput(dentry);
38483 out_unlock:
38484 @@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38485 if (IS_ERR(dentry))
38486 goto out_unlock;
38487
38488 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38489 + error = -EACCES;
38490 + goto out_dput;
38491 + }
38492 +
38493 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38494 mode &= ~current_umask();
38495 error = mnt_want_write(nd.path.mnt);
38496 @@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38497 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38498 out_drop_write:
38499 mnt_drop_write(nd.path.mnt);
38500 +
38501 + if (!error)
38502 + gr_handle_create(dentry, nd.path.mnt);
38503 +
38504 out_dput:
38505 dput(dentry);
38506 out_unlock:
38507 @@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
38508 char * name;
38509 struct dentry *dentry;
38510 struct nameidata nd;
38511 + ino_t saved_ino = 0;
38512 + dev_t saved_dev = 0;
38513
38514 error = user_path_parent(dfd, pathname, &nd, &name);
38515 if (error)
38516 @@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
38517 error = -ENOENT;
38518 goto exit3;
38519 }
38520 +
38521 + if (dentry->d_inode->i_nlink <= 1) {
38522 + saved_ino = dentry->d_inode->i_ino;
38523 + saved_dev = gr_get_dev_from_dentry(dentry);
38524 + }
38525 +
38526 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38527 + error = -EACCES;
38528 + goto exit3;
38529 + }
38530 +
38531 error = mnt_want_write(nd.path.mnt);
38532 if (error)
38533 goto exit3;
38534 @@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
38535 if (error)
38536 goto exit4;
38537 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38538 + if (!error && (saved_dev || saved_ino))
38539 + gr_handle_delete(saved_ino, saved_dev);
38540 exit4:
38541 mnt_drop_write(nd.path.mnt);
38542 exit3:
38543 @@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
38544 struct dentry *dentry;
38545 struct nameidata nd;
38546 struct inode *inode = NULL;
38547 + ino_t saved_ino = 0;
38548 + dev_t saved_dev = 0;
38549
38550 error = user_path_parent(dfd, pathname, &nd, &name);
38551 if (error)
38552 @@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
38553 if (!inode)
38554 goto slashes;
38555 ihold(inode);
38556 +
38557 + if (inode->i_nlink <= 1) {
38558 + saved_ino = inode->i_ino;
38559 + saved_dev = gr_get_dev_from_dentry(dentry);
38560 + }
38561 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38562 + error = -EACCES;
38563 + goto exit2;
38564 + }
38565 +
38566 error = mnt_want_write(nd.path.mnt);
38567 if (error)
38568 goto exit2;
38569 @@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
38570 if (error)
38571 goto exit3;
38572 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38573 + if (!error && (saved_ino || saved_dev))
38574 + gr_handle_delete(saved_ino, saved_dev);
38575 exit3:
38576 mnt_drop_write(nd.path.mnt);
38577 exit2:
38578 @@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38579 if (IS_ERR(dentry))
38580 goto out_unlock;
38581
38582 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38583 + error = -EACCES;
38584 + goto out_dput;
38585 + }
38586 +
38587 error = mnt_want_write(nd.path.mnt);
38588 if (error)
38589 goto out_dput;
38590 @@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38591 if (error)
38592 goto out_drop_write;
38593 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38594 + if (!error)
38595 + gr_handle_create(dentry, nd.path.mnt);
38596 out_drop_write:
38597 mnt_drop_write(nd.path.mnt);
38598 out_dput:
38599 @@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38600 error = PTR_ERR(new_dentry);
38601 if (IS_ERR(new_dentry))
38602 goto out_unlock;
38603 +
38604 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38605 + old_path.dentry->d_inode,
38606 + old_path.dentry->d_inode->i_mode, to)) {
38607 + error = -EACCES;
38608 + goto out_dput;
38609 + }
38610 +
38611 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38612 + old_path.dentry, old_path.mnt, to)) {
38613 + error = -EACCES;
38614 + goto out_dput;
38615 + }
38616 +
38617 error = mnt_want_write(nd.path.mnt);
38618 if (error)
38619 goto out_dput;
38620 @@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38621 if (error)
38622 goto out_drop_write;
38623 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38624 + if (!error)
38625 + gr_handle_create(new_dentry, nd.path.mnt);
38626 out_drop_write:
38627 mnt_drop_write(nd.path.mnt);
38628 out_dput:
38629 @@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38630 char *to;
38631 int error;
38632
38633 + pax_track_stack();
38634 +
38635 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38636 if (error)
38637 goto exit;
38638 @@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38639 if (new_dentry == trap)
38640 goto exit5;
38641
38642 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38643 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
38644 + to);
38645 + if (error)
38646 + goto exit5;
38647 +
38648 error = mnt_want_write(oldnd.path.mnt);
38649 if (error)
38650 goto exit5;
38651 @@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38652 goto exit6;
38653 error = vfs_rename(old_dir->d_inode, old_dentry,
38654 new_dir->d_inode, new_dentry);
38655 + if (!error)
38656 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38657 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38658 exit6:
38659 mnt_drop_write(oldnd.path.mnt);
38660 exit5:
38661 @@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
38662
38663 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38664 {
38665 + char tmpbuf[64];
38666 + const char *newlink;
38667 int len;
38668
38669 len = PTR_ERR(link);
38670 @@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
38671 len = strlen(link);
38672 if (len > (unsigned) buflen)
38673 len = buflen;
38674 - if (copy_to_user(buffer, link, len))
38675 +
38676 + if (len < sizeof(tmpbuf)) {
38677 + memcpy(tmpbuf, link, len);
38678 + newlink = tmpbuf;
38679 + } else
38680 + newlink = link;
38681 +
38682 + if (copy_to_user(buffer, newlink, len))
38683 len = -EFAULT;
38684 out:
38685 return len;
38686 diff -urNp linux-3.0.3/fs/namespace.c linux-3.0.3/fs/namespace.c
38687 --- linux-3.0.3/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
38688 +++ linux-3.0.3/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
38689 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38690 if (!(sb->s_flags & MS_RDONLY))
38691 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38692 up_write(&sb->s_umount);
38693 +
38694 + gr_log_remount(mnt->mnt_devname, retval);
38695 +
38696 return retval;
38697 }
38698
38699 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38700 br_write_unlock(vfsmount_lock);
38701 up_write(&namespace_sem);
38702 release_mounts(&umount_list);
38703 +
38704 + gr_log_unmount(mnt->mnt_devname, retval);
38705 +
38706 return retval;
38707 }
38708
38709 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38710 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38711 MS_STRICTATIME);
38712
38713 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38714 + retval = -EPERM;
38715 + goto dput_out;
38716 + }
38717 +
38718 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38719 + retval = -EPERM;
38720 + goto dput_out;
38721 + }
38722 +
38723 if (flags & MS_REMOUNT)
38724 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38725 data_page);
38726 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38727 dev_name, data_page);
38728 dput_out:
38729 path_put(&path);
38730 +
38731 + gr_log_mount(dev_name, dir_name, retval);
38732 +
38733 return retval;
38734 }
38735
38736 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38737 if (error)
38738 goto out2;
38739
38740 + if (gr_handle_chroot_pivot()) {
38741 + error = -EPERM;
38742 + goto out2;
38743 + }
38744 +
38745 get_fs_root(current->fs, &root);
38746 error = lock_mount(&old);
38747 if (error)
38748 diff -urNp linux-3.0.3/fs/ncpfs/dir.c linux-3.0.3/fs/ncpfs/dir.c
38749 --- linux-3.0.3/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38750 +++ linux-3.0.3/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
38751 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38752 int res, val = 0, len;
38753 __u8 __name[NCP_MAXPATHLEN + 1];
38754
38755 + pax_track_stack();
38756 +
38757 if (dentry == dentry->d_sb->s_root)
38758 return 1;
38759
38760 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
38761 int error, res, len;
38762 __u8 __name[NCP_MAXPATHLEN + 1];
38763
38764 + pax_track_stack();
38765 +
38766 error = -EIO;
38767 if (!ncp_conn_valid(server))
38768 goto finished;
38769 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
38770 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
38771 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
38772
38773 + pax_track_stack();
38774 +
38775 ncp_age_dentry(server, dentry);
38776 len = sizeof(__name);
38777 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
38778 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
38779 int error, len;
38780 __u8 __name[NCP_MAXPATHLEN + 1];
38781
38782 + pax_track_stack();
38783 +
38784 DPRINTK("ncp_mkdir: making %s/%s\n",
38785 dentry->d_parent->d_name.name, dentry->d_name.name);
38786
38787 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
38788 int old_len, new_len;
38789 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
38790
38791 + pax_track_stack();
38792 +
38793 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
38794 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
38795 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
38796 diff -urNp linux-3.0.3/fs/ncpfs/inode.c linux-3.0.3/fs/ncpfs/inode.c
38797 --- linux-3.0.3/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38798 +++ linux-3.0.3/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38799 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
38800 #endif
38801 struct ncp_entry_info finfo;
38802
38803 + pax_track_stack();
38804 +
38805 memset(&data, 0, sizeof(data));
38806 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
38807 if (!server)
38808 diff -urNp linux-3.0.3/fs/nfs/inode.c linux-3.0.3/fs/nfs/inode.c
38809 --- linux-3.0.3/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38810 +++ linux-3.0.3/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38811 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
38812 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
38813 nfsi->attrtimeo_timestamp = jiffies;
38814
38815 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
38816 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
38817 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
38818 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
38819 else
38820 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
38821 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
38822 }
38823
38824 -static atomic_long_t nfs_attr_generation_counter;
38825 +static atomic_long_unchecked_t nfs_attr_generation_counter;
38826
38827 static unsigned long nfs_read_attr_generation_counter(void)
38828 {
38829 - return atomic_long_read(&nfs_attr_generation_counter);
38830 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
38831 }
38832
38833 unsigned long nfs_inc_attr_generation_counter(void)
38834 {
38835 - return atomic_long_inc_return(&nfs_attr_generation_counter);
38836 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
38837 }
38838
38839 void nfs_fattr_init(struct nfs_fattr *fattr)
38840 diff -urNp linux-3.0.3/fs/nfsd/nfs4state.c linux-3.0.3/fs/nfsd/nfs4state.c
38841 --- linux-3.0.3/fs/nfsd/nfs4state.c 2011-08-23 21:44:40.000000000 -0400
38842 +++ linux-3.0.3/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
38843 @@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
38844 unsigned int strhashval;
38845 int err;
38846
38847 + pax_track_stack();
38848 +
38849 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
38850 (long long) lock->lk_offset,
38851 (long long) lock->lk_length);
38852 diff -urNp linux-3.0.3/fs/nfsd/nfs4xdr.c linux-3.0.3/fs/nfsd/nfs4xdr.c
38853 --- linux-3.0.3/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
38854 +++ linux-3.0.3/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
38855 @@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
38856 .dentry = dentry,
38857 };
38858
38859 + pax_track_stack();
38860 +
38861 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
38862 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
38863 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
38864 diff -urNp linux-3.0.3/fs/nfsd/vfs.c linux-3.0.3/fs/nfsd/vfs.c
38865 --- linux-3.0.3/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
38866 +++ linux-3.0.3/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
38867 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
38868 } else {
38869 oldfs = get_fs();
38870 set_fs(KERNEL_DS);
38871 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
38872 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
38873 set_fs(oldfs);
38874 }
38875
38876 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
38877
38878 /* Write the data. */
38879 oldfs = get_fs(); set_fs(KERNEL_DS);
38880 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
38881 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
38882 set_fs(oldfs);
38883 if (host_err < 0)
38884 goto out_nfserr;
38885 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
38886 */
38887
38888 oldfs = get_fs(); set_fs(KERNEL_DS);
38889 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
38890 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
38891 set_fs(oldfs);
38892
38893 if (host_err < 0)
38894 diff -urNp linux-3.0.3/fs/notify/fanotify/fanotify_user.c linux-3.0.3/fs/notify/fanotify/fanotify_user.c
38895 --- linux-3.0.3/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
38896 +++ linux-3.0.3/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
38897 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
38898 goto out_close_fd;
38899
38900 ret = -EFAULT;
38901 - if (copy_to_user(buf, &fanotify_event_metadata,
38902 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
38903 + copy_to_user(buf, &fanotify_event_metadata,
38904 fanotify_event_metadata.event_len))
38905 goto out_kill_access_response;
38906
38907 diff -urNp linux-3.0.3/fs/notify/notification.c linux-3.0.3/fs/notify/notification.c
38908 --- linux-3.0.3/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
38909 +++ linux-3.0.3/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
38910 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
38911 * get set to 0 so it will never get 'freed'
38912 */
38913 static struct fsnotify_event *q_overflow_event;
38914 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
38915 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
38916
38917 /**
38918 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
38919 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
38920 */
38921 u32 fsnotify_get_cookie(void)
38922 {
38923 - return atomic_inc_return(&fsnotify_sync_cookie);
38924 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
38925 }
38926 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
38927
38928 diff -urNp linux-3.0.3/fs/ntfs/dir.c linux-3.0.3/fs/ntfs/dir.c
38929 --- linux-3.0.3/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38930 +++ linux-3.0.3/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
38931 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
38932 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
38933 ~(s64)(ndir->itype.index.block_size - 1)));
38934 /* Bounds checks. */
38935 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
38936 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
38937 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
38938 "inode 0x%lx or driver bug.", vdir->i_ino);
38939 goto err_out;
38940 diff -urNp linux-3.0.3/fs/ntfs/file.c linux-3.0.3/fs/ntfs/file.c
38941 --- linux-3.0.3/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
38942 +++ linux-3.0.3/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
38943 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
38944 #endif /* NTFS_RW */
38945 };
38946
38947 -const struct file_operations ntfs_empty_file_ops = {};
38948 +const struct file_operations ntfs_empty_file_ops __read_only;
38949
38950 -const struct inode_operations ntfs_empty_inode_ops = {};
38951 +const struct inode_operations ntfs_empty_inode_ops __read_only;
38952 diff -urNp linux-3.0.3/fs/ocfs2/localalloc.c linux-3.0.3/fs/ocfs2/localalloc.c
38953 --- linux-3.0.3/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
38954 +++ linux-3.0.3/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
38955 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
38956 goto bail;
38957 }
38958
38959 - atomic_inc(&osb->alloc_stats.moves);
38960 + atomic_inc_unchecked(&osb->alloc_stats.moves);
38961
38962 bail:
38963 if (handle)
38964 diff -urNp linux-3.0.3/fs/ocfs2/namei.c linux-3.0.3/fs/ocfs2/namei.c
38965 --- linux-3.0.3/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
38966 +++ linux-3.0.3/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
38967 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
38968 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
38969 struct ocfs2_dir_lookup_result target_insert = { NULL, };
38970
38971 + pax_track_stack();
38972 +
38973 /* At some point it might be nice to break this function up a
38974 * bit. */
38975
38976 diff -urNp linux-3.0.3/fs/ocfs2/ocfs2.h linux-3.0.3/fs/ocfs2/ocfs2.h
38977 --- linux-3.0.3/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
38978 +++ linux-3.0.3/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
38979 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
38980
38981 struct ocfs2_alloc_stats
38982 {
38983 - atomic_t moves;
38984 - atomic_t local_data;
38985 - atomic_t bitmap_data;
38986 - atomic_t bg_allocs;
38987 - atomic_t bg_extends;
38988 + atomic_unchecked_t moves;
38989 + atomic_unchecked_t local_data;
38990 + atomic_unchecked_t bitmap_data;
38991 + atomic_unchecked_t bg_allocs;
38992 + atomic_unchecked_t bg_extends;
38993 };
38994
38995 enum ocfs2_local_alloc_state
38996 diff -urNp linux-3.0.3/fs/ocfs2/suballoc.c linux-3.0.3/fs/ocfs2/suballoc.c
38997 --- linux-3.0.3/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
38998 +++ linux-3.0.3/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
38999 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39000 mlog_errno(status);
39001 goto bail;
39002 }
39003 - atomic_inc(&osb->alloc_stats.bg_extends);
39004 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39005
39006 /* You should never ask for this much metadata */
39007 BUG_ON(bits_wanted >
39008 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39009 mlog_errno(status);
39010 goto bail;
39011 }
39012 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39013 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39014
39015 *suballoc_loc = res.sr_bg_blkno;
39016 *suballoc_bit_start = res.sr_bit_offset;
39017 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39018 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39019 res->sr_bits);
39020
39021 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39022 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39023
39024 BUG_ON(res->sr_bits != 1);
39025
39026 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39027 mlog_errno(status);
39028 goto bail;
39029 }
39030 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39031 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39032
39033 BUG_ON(res.sr_bits != 1);
39034
39035 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39036 cluster_start,
39037 num_clusters);
39038 if (!status)
39039 - atomic_inc(&osb->alloc_stats.local_data);
39040 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
39041 } else {
39042 if (min_clusters > (osb->bitmap_cpg - 1)) {
39043 /* The only paths asking for contiguousness
39044 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39045 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39046 res.sr_bg_blkno,
39047 res.sr_bit_offset);
39048 - atomic_inc(&osb->alloc_stats.bitmap_data);
39049 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39050 *num_clusters = res.sr_bits;
39051 }
39052 }
39053 diff -urNp linux-3.0.3/fs/ocfs2/super.c linux-3.0.3/fs/ocfs2/super.c
39054 --- linux-3.0.3/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
39055 +++ linux-3.0.3/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
39056 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39057 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39058 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39059 "Stats",
39060 - atomic_read(&osb->alloc_stats.bitmap_data),
39061 - atomic_read(&osb->alloc_stats.local_data),
39062 - atomic_read(&osb->alloc_stats.bg_allocs),
39063 - atomic_read(&osb->alloc_stats.moves),
39064 - atomic_read(&osb->alloc_stats.bg_extends));
39065 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39066 + atomic_read_unchecked(&osb->alloc_stats.local_data),
39067 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39068 + atomic_read_unchecked(&osb->alloc_stats.moves),
39069 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39070
39071 out += snprintf(buf + out, len - out,
39072 "%10s => State: %u Descriptor: %llu Size: %u bits "
39073 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
39074 spin_lock_init(&osb->osb_xattr_lock);
39075 ocfs2_init_steal_slots(osb);
39076
39077 - atomic_set(&osb->alloc_stats.moves, 0);
39078 - atomic_set(&osb->alloc_stats.local_data, 0);
39079 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
39080 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
39081 - atomic_set(&osb->alloc_stats.bg_extends, 0);
39082 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39083 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39084 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39085 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39086 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39087
39088 /* Copy the blockcheck stats from the superblock probe */
39089 osb->osb_ecc_stats = *stats;
39090 diff -urNp linux-3.0.3/fs/ocfs2/symlink.c linux-3.0.3/fs/ocfs2/symlink.c
39091 --- linux-3.0.3/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
39092 +++ linux-3.0.3/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
39093 @@ -142,7 +142,7 @@ bail:
39094
39095 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39096 {
39097 - char *link = nd_get_link(nd);
39098 + const char *link = nd_get_link(nd);
39099 if (!IS_ERR(link))
39100 kfree(link);
39101 }
39102 diff -urNp linux-3.0.3/fs/open.c linux-3.0.3/fs/open.c
39103 --- linux-3.0.3/fs/open.c 2011-07-21 22:17:23.000000000 -0400
39104 +++ linux-3.0.3/fs/open.c 2011-08-23 21:48:14.000000000 -0400
39105 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39106 error = locks_verify_truncate(inode, NULL, length);
39107 if (!error)
39108 error = security_path_truncate(&path);
39109 +
39110 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39111 + error = -EACCES;
39112 +
39113 if (!error)
39114 error = do_truncate(path.dentry, length, 0, NULL);
39115
39116 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39117 if (__mnt_is_readonly(path.mnt))
39118 res = -EROFS;
39119
39120 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39121 + res = -EACCES;
39122 +
39123 out_path_release:
39124 path_put(&path);
39125 out:
39126 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39127 if (error)
39128 goto dput_and_out;
39129
39130 + gr_log_chdir(path.dentry, path.mnt);
39131 +
39132 set_fs_pwd(current->fs, &path);
39133
39134 dput_and_out:
39135 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39136 goto out_putf;
39137
39138 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39139 +
39140 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39141 + error = -EPERM;
39142 +
39143 + if (!error)
39144 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39145 +
39146 if (!error)
39147 set_fs_pwd(current->fs, &file->f_path);
39148 out_putf:
39149 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39150 if (error)
39151 goto dput_and_out;
39152
39153 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39154 + goto dput_and_out;
39155 +
39156 + if (gr_handle_chroot_caps(&path)) {
39157 + error = -ENOMEM;
39158 + goto dput_and_out;
39159 + }
39160 +
39161 set_fs_root(current->fs, &path);
39162 +
39163 + gr_handle_chroot_chdir(&path);
39164 +
39165 error = 0;
39166 dput_and_out:
39167 path_put(&path);
39168 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39169 err = mnt_want_write_file(file);
39170 if (err)
39171 goto out_putf;
39172 +
39173 mutex_lock(&inode->i_mutex);
39174 +
39175 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39176 + err = -EACCES;
39177 + goto out_unlock;
39178 + }
39179 +
39180 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39181 if (err)
39182 goto out_unlock;
39183 if (mode == (mode_t) -1)
39184 mode = inode->i_mode;
39185 +
39186 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39187 + err = -EACCES;
39188 + goto out_unlock;
39189 + }
39190 +
39191 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39192 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39193 err = notify_change(dentry, &newattrs);
39194 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39195 error = mnt_want_write(path.mnt);
39196 if (error)
39197 goto dput_and_out;
39198 +
39199 mutex_lock(&inode->i_mutex);
39200 +
39201 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39202 + error = -EACCES;
39203 + goto out_unlock;
39204 + }
39205 +
39206 error = security_path_chmod(path.dentry, path.mnt, mode);
39207 if (error)
39208 goto out_unlock;
39209 if (mode == (mode_t) -1)
39210 mode = inode->i_mode;
39211 +
39212 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39213 + error = -EACCES;
39214 + goto out_unlock;
39215 + }
39216 +
39217 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39218 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39219 error = notify_change(path.dentry, &newattrs);
39220 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39221 int error;
39222 struct iattr newattrs;
39223
39224 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
39225 + return -EACCES;
39226 +
39227 newattrs.ia_valid = ATTR_CTIME;
39228 if (user != (uid_t) -1) {
39229 newattrs.ia_valid |= ATTR_UID;
39230 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39231 if (!IS_ERR(tmp)) {
39232 fd = get_unused_fd_flags(flags);
39233 if (fd >= 0) {
39234 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39235 + struct file *f;
39236 + /* don't allow to be set by userland */
39237 + flags &= ~FMODE_GREXEC;
39238 + f = do_filp_open(dfd, tmp, &op, lookup);
39239 if (IS_ERR(f)) {
39240 put_unused_fd(fd);
39241 fd = PTR_ERR(f);
39242 diff -urNp linux-3.0.3/fs/partitions/ldm.c linux-3.0.3/fs/partitions/ldm.c
39243 --- linux-3.0.3/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
39244 +++ linux-3.0.3/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
39245 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39246 ldm_error ("A VBLK claims to have %d parts.", num);
39247 return false;
39248 }
39249 +
39250 if (rec >= num) {
39251 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39252 return false;
39253 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39254 goto found;
39255 }
39256
39257 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39258 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39259 if (!f) {
39260 ldm_crit ("Out of memory.");
39261 return false;
39262 diff -urNp linux-3.0.3/fs/pipe.c linux-3.0.3/fs/pipe.c
39263 --- linux-3.0.3/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
39264 +++ linux-3.0.3/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
39265 @@ -420,9 +420,9 @@ redo:
39266 }
39267 if (bufs) /* More to do? */
39268 continue;
39269 - if (!pipe->writers)
39270 + if (!atomic_read(&pipe->writers))
39271 break;
39272 - if (!pipe->waiting_writers) {
39273 + if (!atomic_read(&pipe->waiting_writers)) {
39274 /* syscall merging: Usually we must not sleep
39275 * if O_NONBLOCK is set, or if we got some data.
39276 * But if a writer sleeps in kernel space, then
39277 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39278 mutex_lock(&inode->i_mutex);
39279 pipe = inode->i_pipe;
39280
39281 - if (!pipe->readers) {
39282 + if (!atomic_read(&pipe->readers)) {
39283 send_sig(SIGPIPE, current, 0);
39284 ret = -EPIPE;
39285 goto out;
39286 @@ -530,7 +530,7 @@ redo1:
39287 for (;;) {
39288 int bufs;
39289
39290 - if (!pipe->readers) {
39291 + if (!atomic_read(&pipe->readers)) {
39292 send_sig(SIGPIPE, current, 0);
39293 if (!ret)
39294 ret = -EPIPE;
39295 @@ -616,9 +616,9 @@ redo2:
39296 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39297 do_wakeup = 0;
39298 }
39299 - pipe->waiting_writers++;
39300 + atomic_inc(&pipe->waiting_writers);
39301 pipe_wait(pipe);
39302 - pipe->waiting_writers--;
39303 + atomic_dec(&pipe->waiting_writers);
39304 }
39305 out:
39306 mutex_unlock(&inode->i_mutex);
39307 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39308 mask = 0;
39309 if (filp->f_mode & FMODE_READ) {
39310 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39311 - if (!pipe->writers && filp->f_version != pipe->w_counter)
39312 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39313 mask |= POLLHUP;
39314 }
39315
39316 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39317 * Most Unices do not set POLLERR for FIFOs but on Linux they
39318 * behave exactly like pipes for poll().
39319 */
39320 - if (!pipe->readers)
39321 + if (!atomic_read(&pipe->readers))
39322 mask |= POLLERR;
39323 }
39324
39325 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39326
39327 mutex_lock(&inode->i_mutex);
39328 pipe = inode->i_pipe;
39329 - pipe->readers -= decr;
39330 - pipe->writers -= decw;
39331 + atomic_sub(decr, &pipe->readers);
39332 + atomic_sub(decw, &pipe->writers);
39333
39334 - if (!pipe->readers && !pipe->writers) {
39335 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39336 free_pipe_info(inode);
39337 } else {
39338 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39339 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39340
39341 if (inode->i_pipe) {
39342 ret = 0;
39343 - inode->i_pipe->readers++;
39344 + atomic_inc(&inode->i_pipe->readers);
39345 }
39346
39347 mutex_unlock(&inode->i_mutex);
39348 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39349
39350 if (inode->i_pipe) {
39351 ret = 0;
39352 - inode->i_pipe->writers++;
39353 + atomic_inc(&inode->i_pipe->writers);
39354 }
39355
39356 mutex_unlock(&inode->i_mutex);
39357 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39358 if (inode->i_pipe) {
39359 ret = 0;
39360 if (filp->f_mode & FMODE_READ)
39361 - inode->i_pipe->readers++;
39362 + atomic_inc(&inode->i_pipe->readers);
39363 if (filp->f_mode & FMODE_WRITE)
39364 - inode->i_pipe->writers++;
39365 + atomic_inc(&inode->i_pipe->writers);
39366 }
39367
39368 mutex_unlock(&inode->i_mutex);
39369 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39370 inode->i_pipe = NULL;
39371 }
39372
39373 -static struct vfsmount *pipe_mnt __read_mostly;
39374 +struct vfsmount *pipe_mnt __read_mostly;
39375
39376 /*
39377 * pipefs_dname() is called from d_path().
39378 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39379 goto fail_iput;
39380 inode->i_pipe = pipe;
39381
39382 - pipe->readers = pipe->writers = 1;
39383 + atomic_set(&pipe->readers, 1);
39384 + atomic_set(&pipe->writers, 1);
39385 inode->i_fop = &rdwr_pipefifo_fops;
39386
39387 /*
39388 diff -urNp linux-3.0.3/fs/proc/array.c linux-3.0.3/fs/proc/array.c
39389 --- linux-3.0.3/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
39390 +++ linux-3.0.3/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
39391 @@ -60,6 +60,7 @@
39392 #include <linux/tty.h>
39393 #include <linux/string.h>
39394 #include <linux/mman.h>
39395 +#include <linux/grsecurity.h>
39396 #include <linux/proc_fs.h>
39397 #include <linux/ioport.h>
39398 #include <linux/uaccess.h>
39399 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39400 seq_putc(m, '\n');
39401 }
39402
39403 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39404 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
39405 +{
39406 + if (p->mm)
39407 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39408 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39409 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39410 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39411 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39412 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39413 + else
39414 + seq_printf(m, "PaX:\t-----\n");
39415 +}
39416 +#endif
39417 +
39418 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39419 struct pid *pid, struct task_struct *task)
39420 {
39421 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39422 task_cpus_allowed(m, task);
39423 cpuset_task_status_allowed(m, task);
39424 task_context_switch_counts(m, task);
39425 +
39426 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39427 + task_pax(m, task);
39428 +#endif
39429 +
39430 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39431 + task_grsec_rbac(m, task);
39432 +#endif
39433 +
39434 return 0;
39435 }
39436
39437 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39438 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39439 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39440 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39441 +#endif
39442 +
39443 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39444 struct pid *pid, struct task_struct *task, int whole)
39445 {
39446 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39447 cputime_t cutime, cstime, utime, stime;
39448 cputime_t cgtime, gtime;
39449 unsigned long rsslim = 0;
39450 - char tcomm[sizeof(task->comm)];
39451 + char tcomm[sizeof(task->comm)] = { 0 };
39452 unsigned long flags;
39453
39454 + pax_track_stack();
39455 +
39456 state = *get_task_state(task);
39457 vsize = eip = esp = 0;
39458 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39459 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39460 gtime = task->gtime;
39461 }
39462
39463 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39464 + if (PAX_RAND_FLAGS(mm)) {
39465 + eip = 0;
39466 + esp = 0;
39467 + wchan = 0;
39468 + }
39469 +#endif
39470 +#ifdef CONFIG_GRKERNSEC_HIDESYM
39471 + wchan = 0;
39472 + eip =0;
39473 + esp =0;
39474 +#endif
39475 +
39476 /* scale priority and nice values from timeslices to -20..20 */
39477 /* to make it look like a "normal" Unix priority/nice value */
39478 priority = task_prio(task);
39479 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39480 vsize,
39481 mm ? get_mm_rss(mm) : 0,
39482 rsslim,
39483 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39484 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39485 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39486 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39487 +#else
39488 mm ? (permitted ? mm->start_code : 1) : 0,
39489 mm ? (permitted ? mm->end_code : 1) : 0,
39490 (permitted && mm) ? mm->start_stack : 0,
39491 +#endif
39492 esp,
39493 eip,
39494 /* The signal information here is obsolete.
39495 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39496
39497 return 0;
39498 }
39499 +
39500 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39501 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39502 +{
39503 + u32 curr_ip = 0;
39504 + unsigned long flags;
39505 +
39506 + if (lock_task_sighand(task, &flags)) {
39507 + curr_ip = task->signal->curr_ip;
39508 + unlock_task_sighand(task, &flags);
39509 + }
39510 +
39511 + return sprintf(buffer, "%pI4\n", &curr_ip);
39512 +}
39513 +#endif
39514 diff -urNp linux-3.0.3/fs/proc/base.c linux-3.0.3/fs/proc/base.c
39515 --- linux-3.0.3/fs/proc/base.c 2011-08-23 21:44:40.000000000 -0400
39516 +++ linux-3.0.3/fs/proc/base.c 2011-08-23 21:48:14.000000000 -0400
39517 @@ -107,6 +107,22 @@ struct pid_entry {
39518 union proc_op op;
39519 };
39520
39521 +struct getdents_callback {
39522 + struct linux_dirent __user * current_dir;
39523 + struct linux_dirent __user * previous;
39524 + struct file * file;
39525 + int count;
39526 + int error;
39527 +};
39528 +
39529 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39530 + loff_t offset, u64 ino, unsigned int d_type)
39531 +{
39532 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
39533 + buf->error = -EINVAL;
39534 + return 0;
39535 +}
39536 +
39537 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39538 .name = (NAME), \
39539 .len = sizeof(NAME) - 1, \
39540 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
39541 if (task == current)
39542 return mm;
39543
39544 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39545 + return ERR_PTR(-EPERM);
39546 +
39547 /*
39548 * If current is actively ptrace'ing, and would also be
39549 * permitted to freshly attach with ptrace now, permit it.
39550 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
39551 if (!mm->arg_end)
39552 goto out_mm; /* Shh! No looking before we're done */
39553
39554 + if (gr_acl_handle_procpidmem(task))
39555 + goto out_mm;
39556 +
39557 len = mm->arg_end - mm->arg_start;
39558
39559 if (len > PAGE_SIZE)
39560 @@ -309,12 +331,28 @@ out:
39561 return res;
39562 }
39563
39564 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39565 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39566 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39567 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39568 +#endif
39569 +
39570 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39571 {
39572 struct mm_struct *mm = mm_for_maps(task);
39573 int res = PTR_ERR(mm);
39574 if (mm && !IS_ERR(mm)) {
39575 unsigned int nwords = 0;
39576 +
39577 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39578 + /* allow if we're currently ptracing this task */
39579 + if (PAX_RAND_FLAGS(mm) &&
39580 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39581 + mmput(mm);
39582 + return res;
39583 + }
39584 +#endif
39585 +
39586 do {
39587 nwords += 2;
39588 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39589 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
39590 }
39591
39592
39593 -#ifdef CONFIG_KALLSYMS
39594 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39595 /*
39596 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39597 * Returns the resolved symbol. If that fails, simply return the address.
39598 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
39599 mutex_unlock(&task->signal->cred_guard_mutex);
39600 }
39601
39602 -#ifdef CONFIG_STACKTRACE
39603 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39604
39605 #define MAX_STACK_TRACE_DEPTH 64
39606
39607 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
39608 return count;
39609 }
39610
39611 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39612 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39613 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39614 {
39615 long nr;
39616 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
39617 /************************************************************************/
39618
39619 /* permission checks */
39620 -static int proc_fd_access_allowed(struct inode *inode)
39621 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39622 {
39623 struct task_struct *task;
39624 int allowed = 0;
39625 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
39626 */
39627 task = get_proc_task(inode);
39628 if (task) {
39629 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39630 + if (log)
39631 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39632 + else
39633 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39634 put_task_struct(task);
39635 }
39636 return allowed;
39637 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
39638 if (!task)
39639 goto out_no_task;
39640
39641 + if (gr_acl_handle_procpidmem(task))
39642 + goto out;
39643 +
39644 ret = -ENOMEM;
39645 page = (char *)__get_free_page(GFP_TEMPORARY);
39646 if (!page)
39647 @@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
39648 path_put(&nd->path);
39649
39650 /* Are we allowed to snoop on the tasks file descriptors? */
39651 - if (!proc_fd_access_allowed(inode))
39652 + if (!proc_fd_access_allowed(inode,0))
39653 goto out;
39654
39655 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39656 @@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
39657 struct path path;
39658
39659 /* Are we allowed to snoop on the tasks file descriptors? */
39660 - if (!proc_fd_access_allowed(inode))
39661 - goto out;
39662 + /* logging this is needed for learning on chromium to work properly,
39663 + but we don't want to flood the logs from 'ps' which does a readlink
39664 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39665 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
39666 + */
39667 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39668 + if (!proc_fd_access_allowed(inode,0))
39669 + goto out;
39670 + } else {
39671 + if (!proc_fd_access_allowed(inode,1))
39672 + goto out;
39673 + }
39674
39675 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39676 if (error)
39677 @@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
39678 rcu_read_lock();
39679 cred = __task_cred(task);
39680 inode->i_uid = cred->euid;
39681 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39682 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39683 +#else
39684 inode->i_gid = cred->egid;
39685 +#endif
39686 rcu_read_unlock();
39687 }
39688 security_task_to_inode(task, inode);
39689 @@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
39690 struct inode *inode = dentry->d_inode;
39691 struct task_struct *task;
39692 const struct cred *cred;
39693 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39694 + const struct cred *tmpcred = current_cred();
39695 +#endif
39696
39697 generic_fillattr(inode, stat);
39698
39699 @@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
39700 stat->uid = 0;
39701 stat->gid = 0;
39702 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39703 +
39704 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39705 + rcu_read_unlock();
39706 + return -ENOENT;
39707 + }
39708 +
39709 if (task) {
39710 + cred = __task_cred(task);
39711 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39712 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39713 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39714 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39715 +#endif
39716 + ) {
39717 +#endif
39718 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39719 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39720 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39721 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39722 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39723 +#endif
39724 task_dumpable(task)) {
39725 - cred = __task_cred(task);
39726 stat->uid = cred->euid;
39727 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39728 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39729 +#else
39730 stat->gid = cred->egid;
39731 +#endif
39732 }
39733 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39734 + } else {
39735 + rcu_read_unlock();
39736 + return -ENOENT;
39737 + }
39738 +#endif
39739 }
39740 rcu_read_unlock();
39741 return 0;
39742 @@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
39743
39744 if (task) {
39745 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39746 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39747 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39748 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39749 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39750 +#endif
39751 task_dumpable(task)) {
39752 rcu_read_lock();
39753 cred = __task_cred(task);
39754 inode->i_uid = cred->euid;
39755 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39756 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39757 +#else
39758 inode->i_gid = cred->egid;
39759 +#endif
39760 rcu_read_unlock();
39761 } else {
39762 inode->i_uid = 0;
39763 @@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
39764 int fd = proc_fd(inode);
39765
39766 if (task) {
39767 - files = get_files_struct(task);
39768 + if (!gr_acl_handle_procpidmem(task))
39769 + files = get_files_struct(task);
39770 put_task_struct(task);
39771 }
39772 if (files) {
39773 @@ -2169,11 +2268,21 @@ static const struct file_operations proc
39774 */
39775 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
39776 {
39777 + struct task_struct *task;
39778 int rv = generic_permission(inode, mask, flags, NULL);
39779 - if (rv == 0)
39780 - return 0;
39781 +
39782 if (task_pid(current) == proc_pid(inode))
39783 rv = 0;
39784 +
39785 + task = get_proc_task(inode);
39786 + if (task == NULL)
39787 + return rv;
39788 +
39789 + if (gr_acl_handle_procpidmem(task))
39790 + rv = -EACCES;
39791 +
39792 + put_task_struct(task);
39793 +
39794 return rv;
39795 }
39796
39797 @@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
39798 if (!task)
39799 goto out_no_task;
39800
39801 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39802 + goto out;
39803 +
39804 /*
39805 * Yes, it does not scale. And it should not. Don't add
39806 * new entries into /proc/<tgid>/ without very good reasons.
39807 @@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
39808 if (!task)
39809 goto out_no_task;
39810
39811 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39812 + goto out;
39813 +
39814 ret = 0;
39815 i = filp->f_pos;
39816 switch (i) {
39817 @@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
39818 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
39819 void *cookie)
39820 {
39821 - char *s = nd_get_link(nd);
39822 + const char *s = nd_get_link(nd);
39823 if (!IS_ERR(s))
39824 __putname(s);
39825 }
39826 @@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
39827 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
39828 #endif
39829 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
39830 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39831 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39832 INF("syscall", S_IRUGO, proc_pid_syscall),
39833 #endif
39834 INF("cmdline", S_IRUGO, proc_pid_cmdline),
39835 @@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
39836 #ifdef CONFIG_SECURITY
39837 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
39838 #endif
39839 -#ifdef CONFIG_KALLSYMS
39840 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39841 INF("wchan", S_IRUGO, proc_pid_wchan),
39842 #endif
39843 -#ifdef CONFIG_STACKTRACE
39844 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39845 ONE("stack", S_IRUGO, proc_pid_stack),
39846 #endif
39847 #ifdef CONFIG_SCHEDSTATS
39848 @@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
39849 #ifdef CONFIG_HARDWALL
39850 INF("hardwall", S_IRUGO, proc_pid_hardwall),
39851 #endif
39852 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39853 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
39854 +#endif
39855 };
39856
39857 static int proc_tgid_base_readdir(struct file * filp,
39858 @@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
39859 if (!inode)
39860 goto out;
39861
39862 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39863 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
39864 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39865 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39866 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
39867 +#else
39868 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
39869 +#endif
39870 inode->i_op = &proc_tgid_base_inode_operations;
39871 inode->i_fop = &proc_tgid_base_operations;
39872 inode->i_flags|=S_IMMUTABLE;
39873 @@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
39874 if (!task)
39875 goto out;
39876
39877 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39878 + goto out_put_task;
39879 +
39880 result = proc_pid_instantiate(dir, dentry, task, NULL);
39881 +out_put_task:
39882 put_task_struct(task);
39883 out:
39884 return result;
39885 @@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
39886 {
39887 unsigned int nr;
39888 struct task_struct *reaper;
39889 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39890 + const struct cred *tmpcred = current_cred();
39891 + const struct cred *itercred;
39892 +#endif
39893 + filldir_t __filldir = filldir;
39894 struct tgid_iter iter;
39895 struct pid_namespace *ns;
39896
39897 @@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
39898 for (iter = next_tgid(ns, iter);
39899 iter.task;
39900 iter.tgid += 1, iter = next_tgid(ns, iter)) {
39901 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39902 + rcu_read_lock();
39903 + itercred = __task_cred(iter.task);
39904 +#endif
39905 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
39906 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39907 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
39908 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39909 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39910 +#endif
39911 + )
39912 +#endif
39913 + )
39914 + __filldir = &gr_fake_filldir;
39915 + else
39916 + __filldir = filldir;
39917 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39918 + rcu_read_unlock();
39919 +#endif
39920 filp->f_pos = iter.tgid + TGID_OFFSET;
39921 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
39922 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
39923 put_task_struct(iter.task);
39924 goto out;
39925 }
39926 @@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
39927 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
39928 #endif
39929 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
39930 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39931 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39932 INF("syscall", S_IRUGO, proc_pid_syscall),
39933 #endif
39934 INF("cmdline", S_IRUGO, proc_pid_cmdline),
39935 @@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
39936 #ifdef CONFIG_SECURITY
39937 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
39938 #endif
39939 -#ifdef CONFIG_KALLSYMS
39940 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39941 INF("wchan", S_IRUGO, proc_pid_wchan),
39942 #endif
39943 -#ifdef CONFIG_STACKTRACE
39944 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39945 ONE("stack", S_IRUGO, proc_pid_stack),
39946 #endif
39947 #ifdef CONFIG_SCHEDSTATS
39948 diff -urNp linux-3.0.3/fs/proc/cmdline.c linux-3.0.3/fs/proc/cmdline.c
39949 --- linux-3.0.3/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
39950 +++ linux-3.0.3/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
39951 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
39952
39953 static int __init proc_cmdline_init(void)
39954 {
39955 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
39956 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
39957 +#else
39958 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
39959 +#endif
39960 return 0;
39961 }
39962 module_init(proc_cmdline_init);
39963 diff -urNp linux-3.0.3/fs/proc/devices.c linux-3.0.3/fs/proc/devices.c
39964 --- linux-3.0.3/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
39965 +++ linux-3.0.3/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
39966 @@ -64,7 +64,11 @@ static const struct file_operations proc
39967
39968 static int __init proc_devices_init(void)
39969 {
39970 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
39971 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
39972 +#else
39973 proc_create("devices", 0, NULL, &proc_devinfo_operations);
39974 +#endif
39975 return 0;
39976 }
39977 module_init(proc_devices_init);
39978 diff -urNp linux-3.0.3/fs/proc/inode.c linux-3.0.3/fs/proc/inode.c
39979 --- linux-3.0.3/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
39980 +++ linux-3.0.3/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
39981 @@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
39982 if (de->mode) {
39983 inode->i_mode = de->mode;
39984 inode->i_uid = de->uid;
39985 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39986 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39987 +#else
39988 inode->i_gid = de->gid;
39989 +#endif
39990 }
39991 if (de->size)
39992 inode->i_size = de->size;
39993 diff -urNp linux-3.0.3/fs/proc/internal.h linux-3.0.3/fs/proc/internal.h
39994 --- linux-3.0.3/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
39995 +++ linux-3.0.3/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
39996 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
39997 struct pid *pid, struct task_struct *task);
39998 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
39999 struct pid *pid, struct task_struct *task);
40000 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40001 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40002 +#endif
40003 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40004
40005 extern const struct file_operations proc_maps_operations;
40006 diff -urNp linux-3.0.3/fs/proc/Kconfig linux-3.0.3/fs/proc/Kconfig
40007 --- linux-3.0.3/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
40008 +++ linux-3.0.3/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
40009 @@ -30,12 +30,12 @@ config PROC_FS
40010
40011 config PROC_KCORE
40012 bool "/proc/kcore support" if !ARM
40013 - depends on PROC_FS && MMU
40014 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40015
40016 config PROC_VMCORE
40017 bool "/proc/vmcore support"
40018 - depends on PROC_FS && CRASH_DUMP
40019 - default y
40020 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40021 + default n
40022 help
40023 Exports the dump image of crashed kernel in ELF format.
40024
40025 @@ -59,8 +59,8 @@ config PROC_SYSCTL
40026 limited in memory.
40027
40028 config PROC_PAGE_MONITOR
40029 - default y
40030 - depends on PROC_FS && MMU
40031 + default n
40032 + depends on PROC_FS && MMU && !GRKERNSEC
40033 bool "Enable /proc page monitoring" if EXPERT
40034 help
40035 Various /proc files exist to monitor process memory utilization:
40036 diff -urNp linux-3.0.3/fs/proc/kcore.c linux-3.0.3/fs/proc/kcore.c
40037 --- linux-3.0.3/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
40038 +++ linux-3.0.3/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
40039 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40040 off_t offset = 0;
40041 struct kcore_list *m;
40042
40043 + pax_track_stack();
40044 +
40045 /* setup ELF header */
40046 elf = (struct elfhdr *) bufp;
40047 bufp += sizeof(struct elfhdr);
40048 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40049 * the addresses in the elf_phdr on our list.
40050 */
40051 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40052 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40053 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40054 + if (tsz > buflen)
40055 tsz = buflen;
40056 -
40057 +
40058 while (buflen) {
40059 struct kcore_list *m;
40060
40061 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40062 kfree(elf_buf);
40063 } else {
40064 if (kern_addr_valid(start)) {
40065 - unsigned long n;
40066 + char *elf_buf;
40067 + mm_segment_t oldfs;
40068
40069 - n = copy_to_user(buffer, (char *)start, tsz);
40070 - /*
40071 - * We cannot distingush between fault on source
40072 - * and fault on destination. When this happens
40073 - * we clear too and hope it will trigger the
40074 - * EFAULT again.
40075 - */
40076 - if (n) {
40077 - if (clear_user(buffer + tsz - n,
40078 - n))
40079 + elf_buf = kmalloc(tsz, GFP_KERNEL);
40080 + if (!elf_buf)
40081 + return -ENOMEM;
40082 + oldfs = get_fs();
40083 + set_fs(KERNEL_DS);
40084 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40085 + set_fs(oldfs);
40086 + if (copy_to_user(buffer, elf_buf, tsz)) {
40087 + kfree(elf_buf);
40088 return -EFAULT;
40089 + }
40090 }
40091 + set_fs(oldfs);
40092 + kfree(elf_buf);
40093 } else {
40094 if (clear_user(buffer, tsz))
40095 return -EFAULT;
40096 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40097
40098 static int open_kcore(struct inode *inode, struct file *filp)
40099 {
40100 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40101 + return -EPERM;
40102 +#endif
40103 if (!capable(CAP_SYS_RAWIO))
40104 return -EPERM;
40105 if (kcore_need_update)
40106 diff -urNp linux-3.0.3/fs/proc/meminfo.c linux-3.0.3/fs/proc/meminfo.c
40107 --- linux-3.0.3/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
40108 +++ linux-3.0.3/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
40109 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40110 unsigned long pages[NR_LRU_LISTS];
40111 int lru;
40112
40113 + pax_track_stack();
40114 +
40115 /*
40116 * display in kilobytes.
40117 */
40118 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40119 vmi.used >> 10,
40120 vmi.largest_chunk >> 10
40121 #ifdef CONFIG_MEMORY_FAILURE
40122 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40123 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40124 #endif
40125 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40126 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40127 diff -urNp linux-3.0.3/fs/proc/nommu.c linux-3.0.3/fs/proc/nommu.c
40128 --- linux-3.0.3/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
40129 +++ linux-3.0.3/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
40130 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40131 if (len < 1)
40132 len = 1;
40133 seq_printf(m, "%*c", len, ' ');
40134 - seq_path(m, &file->f_path, "");
40135 + seq_path(m, &file->f_path, "\n\\");
40136 }
40137
40138 seq_putc(m, '\n');
40139 diff -urNp linux-3.0.3/fs/proc/proc_net.c linux-3.0.3/fs/proc/proc_net.c
40140 --- linux-3.0.3/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
40141 +++ linux-3.0.3/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
40142 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40143 struct task_struct *task;
40144 struct nsproxy *ns;
40145 struct net *net = NULL;
40146 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40147 + const struct cred *cred = current_cred();
40148 +#endif
40149 +
40150 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40151 + if (cred->fsuid)
40152 + return net;
40153 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40154 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40155 + return net;
40156 +#endif
40157
40158 rcu_read_lock();
40159 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40160 diff -urNp linux-3.0.3/fs/proc/proc_sysctl.c linux-3.0.3/fs/proc/proc_sysctl.c
40161 --- linux-3.0.3/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
40162 +++ linux-3.0.3/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
40163 @@ -8,6 +8,8 @@
40164 #include <linux/namei.h>
40165 #include "internal.h"
40166
40167 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40168 +
40169 static const struct dentry_operations proc_sys_dentry_operations;
40170 static const struct file_operations proc_sys_file_operations;
40171 static const struct inode_operations proc_sys_inode_operations;
40172 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40173 if (!p)
40174 goto out;
40175
40176 + if (gr_handle_sysctl(p, MAY_EXEC))
40177 + goto out;
40178 +
40179 err = ERR_PTR(-ENOMEM);
40180 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40181 if (h)
40182 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40183 if (*pos < file->f_pos)
40184 continue;
40185
40186 + if (gr_handle_sysctl(table, 0))
40187 + continue;
40188 +
40189 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40190 if (res)
40191 return res;
40192 @@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
40193 if (IS_ERR(head))
40194 return PTR_ERR(head);
40195
40196 + if (table && gr_handle_sysctl(table, MAY_EXEC))
40197 + return -ENOENT;
40198 +
40199 generic_fillattr(inode, stat);
40200 if (table)
40201 stat->mode = (stat->mode & S_IFMT) | table->mode;
40202 diff -urNp linux-3.0.3/fs/proc/root.c linux-3.0.3/fs/proc/root.c
40203 --- linux-3.0.3/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
40204 +++ linux-3.0.3/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
40205 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
40206 #ifdef CONFIG_PROC_DEVICETREE
40207 proc_device_tree_init();
40208 #endif
40209 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40210 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40211 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40212 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40213 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40214 +#endif
40215 +#else
40216 proc_mkdir("bus", NULL);
40217 +#endif
40218 proc_sys_init();
40219 }
40220
40221 diff -urNp linux-3.0.3/fs/proc/task_mmu.c linux-3.0.3/fs/proc/task_mmu.c
40222 --- linux-3.0.3/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
40223 +++ linux-3.0.3/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
40224 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40225 "VmExe:\t%8lu kB\n"
40226 "VmLib:\t%8lu kB\n"
40227 "VmPTE:\t%8lu kB\n"
40228 - "VmSwap:\t%8lu kB\n",
40229 - hiwater_vm << (PAGE_SHIFT-10),
40230 + "VmSwap:\t%8lu kB\n"
40231 +
40232 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40233 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40234 +#endif
40235 +
40236 + ,hiwater_vm << (PAGE_SHIFT-10),
40237 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40238 mm->locked_vm << (PAGE_SHIFT-10),
40239 hiwater_rss << (PAGE_SHIFT-10),
40240 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40241 data << (PAGE_SHIFT-10),
40242 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40243 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40244 - swap << (PAGE_SHIFT-10));
40245 + swap << (PAGE_SHIFT-10)
40246 +
40247 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40248 + , mm->context.user_cs_base, mm->context.user_cs_limit
40249 +#endif
40250 +
40251 + );
40252 }
40253
40254 unsigned long task_vsize(struct mm_struct *mm)
40255 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40256 return ret;
40257 }
40258
40259 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40260 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40261 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
40262 + _mm->pax_flags & MF_PAX_SEGMEXEC))
40263 +#endif
40264 +
40265 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40266 {
40267 struct mm_struct *mm = vma->vm_mm;
40268 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40269 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40270 }
40271
40272 - /* We don't show the stack guard page in /proc/maps */
40273 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40274 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40275 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40276 +#else
40277 start = vma->vm_start;
40278 - if (stack_guard_page_start(vma, start))
40279 - start += PAGE_SIZE;
40280 end = vma->vm_end;
40281 - if (stack_guard_page_end(vma, end))
40282 - end -= PAGE_SIZE;
40283 +#endif
40284
40285 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40286 start,
40287 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40288 flags & VM_WRITE ? 'w' : '-',
40289 flags & VM_EXEC ? 'x' : '-',
40290 flags & VM_MAYSHARE ? 's' : 'p',
40291 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40292 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40293 +#else
40294 pgoff,
40295 +#endif
40296 MAJOR(dev), MINOR(dev), ino, &len);
40297
40298 /*
40299 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40300 */
40301 if (file) {
40302 pad_len_spaces(m, len);
40303 - seq_path(m, &file->f_path, "\n");
40304 + seq_path(m, &file->f_path, "\n\\");
40305 } else {
40306 const char *name = arch_vma_name(vma);
40307 if (!name) {
40308 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40309 if (vma->vm_start <= mm->brk &&
40310 vma->vm_end >= mm->start_brk) {
40311 name = "[heap]";
40312 - } else if (vma->vm_start <= mm->start_stack &&
40313 - vma->vm_end >= mm->start_stack) {
40314 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40315 + (vma->vm_start <= mm->start_stack &&
40316 + vma->vm_end >= mm->start_stack)) {
40317 name = "[stack]";
40318 }
40319 } else {
40320 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40321 };
40322
40323 memset(&mss, 0, sizeof mss);
40324 - mss.vma = vma;
40325 - /* mmap_sem is held in m_start */
40326 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40327 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40328 -
40329 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40330 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40331 +#endif
40332 + mss.vma = vma;
40333 + /* mmap_sem is held in m_start */
40334 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40335 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40336 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40337 + }
40338 +#endif
40339 show_map_vma(m, vma);
40340
40341 seq_printf(m,
40342 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40343 "KernelPageSize: %8lu kB\n"
40344 "MMUPageSize: %8lu kB\n"
40345 "Locked: %8lu kB\n",
40346 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40347 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40348 +#else
40349 (vma->vm_end - vma->vm_start) >> 10,
40350 +#endif
40351 mss.resident >> 10,
40352 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40353 mss.shared_clean >> 10,
40354 @@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
40355
40356 if (file) {
40357 seq_printf(m, " file=");
40358 - seq_path(m, &file->f_path, "\n\t= ");
40359 + seq_path(m, &file->f_path, "\n\t\\= ");
40360 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
40361 seq_printf(m, " heap");
40362 } else if (vma->vm_start <= mm->start_stack &&
40363 diff -urNp linux-3.0.3/fs/proc/task_nommu.c linux-3.0.3/fs/proc/task_nommu.c
40364 --- linux-3.0.3/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
40365 +++ linux-3.0.3/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
40366 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40367 else
40368 bytes += kobjsize(mm);
40369
40370 - if (current->fs && current->fs->users > 1)
40371 + if (current->fs && atomic_read(&current->fs->users) > 1)
40372 sbytes += kobjsize(current->fs);
40373 else
40374 bytes += kobjsize(current->fs);
40375 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40376
40377 if (file) {
40378 pad_len_spaces(m, len);
40379 - seq_path(m, &file->f_path, "");
40380 + seq_path(m, &file->f_path, "\n\\");
40381 } else if (mm) {
40382 if (vma->vm_start <= mm->start_stack &&
40383 vma->vm_end >= mm->start_stack) {
40384 diff -urNp linux-3.0.3/fs/quota/netlink.c linux-3.0.3/fs/quota/netlink.c
40385 --- linux-3.0.3/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
40386 +++ linux-3.0.3/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
40387 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40388 void quota_send_warning(short type, unsigned int id, dev_t dev,
40389 const char warntype)
40390 {
40391 - static atomic_t seq;
40392 + static atomic_unchecked_t seq;
40393 struct sk_buff *skb;
40394 void *msg_head;
40395 int ret;
40396 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40397 "VFS: Not enough memory to send quota warning.\n");
40398 return;
40399 }
40400 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40401 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40402 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40403 if (!msg_head) {
40404 printk(KERN_ERR
40405 diff -urNp linux-3.0.3/fs/readdir.c linux-3.0.3/fs/readdir.c
40406 --- linux-3.0.3/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
40407 +++ linux-3.0.3/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
40408 @@ -17,6 +17,7 @@
40409 #include <linux/security.h>
40410 #include <linux/syscalls.h>
40411 #include <linux/unistd.h>
40412 +#include <linux/namei.h>
40413
40414 #include <asm/uaccess.h>
40415
40416 @@ -67,6 +68,7 @@ struct old_linux_dirent {
40417
40418 struct readdir_callback {
40419 struct old_linux_dirent __user * dirent;
40420 + struct file * file;
40421 int result;
40422 };
40423
40424 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40425 buf->result = -EOVERFLOW;
40426 return -EOVERFLOW;
40427 }
40428 +
40429 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40430 + return 0;
40431 +
40432 buf->result++;
40433 dirent = buf->dirent;
40434 if (!access_ok(VERIFY_WRITE, dirent,
40435 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40436
40437 buf.result = 0;
40438 buf.dirent = dirent;
40439 + buf.file = file;
40440
40441 error = vfs_readdir(file, fillonedir, &buf);
40442 if (buf.result)
40443 @@ -142,6 +149,7 @@ struct linux_dirent {
40444 struct getdents_callback {
40445 struct linux_dirent __user * current_dir;
40446 struct linux_dirent __user * previous;
40447 + struct file * file;
40448 int count;
40449 int error;
40450 };
40451 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40452 buf->error = -EOVERFLOW;
40453 return -EOVERFLOW;
40454 }
40455 +
40456 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40457 + return 0;
40458 +
40459 dirent = buf->previous;
40460 if (dirent) {
40461 if (__put_user(offset, &dirent->d_off))
40462 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40463 buf.previous = NULL;
40464 buf.count = count;
40465 buf.error = 0;
40466 + buf.file = file;
40467
40468 error = vfs_readdir(file, filldir, &buf);
40469 if (error >= 0)
40470 @@ -229,6 +242,7 @@ out:
40471 struct getdents_callback64 {
40472 struct linux_dirent64 __user * current_dir;
40473 struct linux_dirent64 __user * previous;
40474 + struct file *file;
40475 int count;
40476 int error;
40477 };
40478 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40479 buf->error = -EINVAL; /* only used if we fail.. */
40480 if (reclen > buf->count)
40481 return -EINVAL;
40482 +
40483 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40484 + return 0;
40485 +
40486 dirent = buf->previous;
40487 if (dirent) {
40488 if (__put_user(offset, &dirent->d_off))
40489 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40490
40491 buf.current_dir = dirent;
40492 buf.previous = NULL;
40493 + buf.file = file;
40494 buf.count = count;
40495 buf.error = 0;
40496
40497 diff -urNp linux-3.0.3/fs/reiserfs/dir.c linux-3.0.3/fs/reiserfs/dir.c
40498 --- linux-3.0.3/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
40499 +++ linux-3.0.3/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
40500 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40501 struct reiserfs_dir_entry de;
40502 int ret = 0;
40503
40504 + pax_track_stack();
40505 +
40506 reiserfs_write_lock(inode->i_sb);
40507
40508 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40509 diff -urNp linux-3.0.3/fs/reiserfs/do_balan.c linux-3.0.3/fs/reiserfs/do_balan.c
40510 --- linux-3.0.3/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
40511 +++ linux-3.0.3/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
40512 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40513 return;
40514 }
40515
40516 - atomic_inc(&(fs_generation(tb->tb_sb)));
40517 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40518 do_balance_starts(tb);
40519
40520 /* balance leaf returns 0 except if combining L R and S into
40521 diff -urNp linux-3.0.3/fs/reiserfs/journal.c linux-3.0.3/fs/reiserfs/journal.c
40522 --- linux-3.0.3/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
40523 +++ linux-3.0.3/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
40524 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40525 struct buffer_head *bh;
40526 int i, j;
40527
40528 + pax_track_stack();
40529 +
40530 bh = __getblk(dev, block, bufsize);
40531 if (buffer_uptodate(bh))
40532 return (bh);
40533 diff -urNp linux-3.0.3/fs/reiserfs/namei.c linux-3.0.3/fs/reiserfs/namei.c
40534 --- linux-3.0.3/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
40535 +++ linux-3.0.3/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
40536 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40537 unsigned long savelink = 1;
40538 struct timespec ctime;
40539
40540 + pax_track_stack();
40541 +
40542 /* three balancings: (1) old name removal, (2) new name insertion
40543 and (3) maybe "save" link insertion
40544 stat data updates: (1) old directory,
40545 diff -urNp linux-3.0.3/fs/reiserfs/procfs.c linux-3.0.3/fs/reiserfs/procfs.c
40546 --- linux-3.0.3/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
40547 +++ linux-3.0.3/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
40548 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40549 "SMALL_TAILS " : "NO_TAILS ",
40550 replay_only(sb) ? "REPLAY_ONLY " : "",
40551 convert_reiserfs(sb) ? "CONV " : "",
40552 - atomic_read(&r->s_generation_counter),
40553 + atomic_read_unchecked(&r->s_generation_counter),
40554 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40555 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40556 SF(s_good_search_by_key_reada), SF(s_bmaps),
40557 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40558 struct journal_params *jp = &rs->s_v1.s_journal;
40559 char b[BDEVNAME_SIZE];
40560
40561 + pax_track_stack();
40562 +
40563 seq_printf(m, /* on-disk fields */
40564 "jp_journal_1st_block: \t%i\n"
40565 "jp_journal_dev: \t%s[%x]\n"
40566 diff -urNp linux-3.0.3/fs/reiserfs/stree.c linux-3.0.3/fs/reiserfs/stree.c
40567 --- linux-3.0.3/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
40568 +++ linux-3.0.3/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
40569 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40570 int iter = 0;
40571 #endif
40572
40573 + pax_track_stack();
40574 +
40575 BUG_ON(!th->t_trans_id);
40576
40577 init_tb_struct(th, &s_del_balance, sb, path,
40578 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40579 int retval;
40580 int quota_cut_bytes = 0;
40581
40582 + pax_track_stack();
40583 +
40584 BUG_ON(!th->t_trans_id);
40585
40586 le_key2cpu_key(&cpu_key, key);
40587 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40588 int quota_cut_bytes;
40589 loff_t tail_pos = 0;
40590
40591 + pax_track_stack();
40592 +
40593 BUG_ON(!th->t_trans_id);
40594
40595 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40596 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40597 int retval;
40598 int fs_gen;
40599
40600 + pax_track_stack();
40601 +
40602 BUG_ON(!th->t_trans_id);
40603
40604 fs_gen = get_generation(inode->i_sb);
40605 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40606 int fs_gen = 0;
40607 int quota_bytes = 0;
40608
40609 + pax_track_stack();
40610 +
40611 BUG_ON(!th->t_trans_id);
40612
40613 if (inode) { /* Do we count quotas for item? */
40614 diff -urNp linux-3.0.3/fs/reiserfs/super.c linux-3.0.3/fs/reiserfs/super.c
40615 --- linux-3.0.3/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
40616 +++ linux-3.0.3/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
40617 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40618 {.option_name = NULL}
40619 };
40620
40621 + pax_track_stack();
40622 +
40623 *blocks = 0;
40624 if (!options || !*options)
40625 /* use default configuration: create tails, journaling on, no
40626 diff -urNp linux-3.0.3/fs/select.c linux-3.0.3/fs/select.c
40627 --- linux-3.0.3/fs/select.c 2011-07-21 22:17:23.000000000 -0400
40628 +++ linux-3.0.3/fs/select.c 2011-08-23 21:48:14.000000000 -0400
40629 @@ -20,6 +20,7 @@
40630 #include <linux/module.h>
40631 #include <linux/slab.h>
40632 #include <linux/poll.h>
40633 +#include <linux/security.h>
40634 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40635 #include <linux/file.h>
40636 #include <linux/fdtable.h>
40637 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40638 int retval, i, timed_out = 0;
40639 unsigned long slack = 0;
40640
40641 + pax_track_stack();
40642 +
40643 rcu_read_lock();
40644 retval = max_select_fd(n, fds);
40645 rcu_read_unlock();
40646 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40647 /* Allocate small arguments on the stack to save memory and be faster */
40648 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40649
40650 + pax_track_stack();
40651 +
40652 ret = -EINVAL;
40653 if (n < 0)
40654 goto out_nofds;
40655 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40656 struct poll_list *walk = head;
40657 unsigned long todo = nfds;
40658
40659 + pax_track_stack();
40660 +
40661 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40662 if (nfds > rlimit(RLIMIT_NOFILE))
40663 return -EINVAL;
40664
40665 diff -urNp linux-3.0.3/fs/seq_file.c linux-3.0.3/fs/seq_file.c
40666 --- linux-3.0.3/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
40667 +++ linux-3.0.3/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
40668 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40669 return 0;
40670 }
40671 if (!m->buf) {
40672 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40673 + m->size = PAGE_SIZE;
40674 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40675 if (!m->buf)
40676 return -ENOMEM;
40677 }
40678 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40679 Eoverflow:
40680 m->op->stop(m, p);
40681 kfree(m->buf);
40682 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40683 + m->size <<= 1;
40684 + m->buf = kmalloc(m->size, GFP_KERNEL);
40685 return !m->buf ? -ENOMEM : -EAGAIN;
40686 }
40687
40688 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40689 m->version = file->f_version;
40690 /* grab buffer if we didn't have one */
40691 if (!m->buf) {
40692 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40693 + m->size = PAGE_SIZE;
40694 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40695 if (!m->buf)
40696 goto Enomem;
40697 }
40698 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40699 goto Fill;
40700 m->op->stop(m, p);
40701 kfree(m->buf);
40702 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40703 + m->size <<= 1;
40704 + m->buf = kmalloc(m->size, GFP_KERNEL);
40705 if (!m->buf)
40706 goto Enomem;
40707 m->count = 0;
40708 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40709 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40710 void *data)
40711 {
40712 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40713 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40714 int res = -ENOMEM;
40715
40716 if (op) {
40717 diff -urNp linux-3.0.3/fs/splice.c linux-3.0.3/fs/splice.c
40718 --- linux-3.0.3/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
40719 +++ linux-3.0.3/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
40720 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40721 pipe_lock(pipe);
40722
40723 for (;;) {
40724 - if (!pipe->readers) {
40725 + if (!atomic_read(&pipe->readers)) {
40726 send_sig(SIGPIPE, current, 0);
40727 if (!ret)
40728 ret = -EPIPE;
40729 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40730 do_wakeup = 0;
40731 }
40732
40733 - pipe->waiting_writers++;
40734 + atomic_inc(&pipe->waiting_writers);
40735 pipe_wait(pipe);
40736 - pipe->waiting_writers--;
40737 + atomic_dec(&pipe->waiting_writers);
40738 }
40739
40740 pipe_unlock(pipe);
40741 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
40742 .spd_release = spd_release_page,
40743 };
40744
40745 + pax_track_stack();
40746 +
40747 if (splice_grow_spd(pipe, &spd))
40748 return -ENOMEM;
40749
40750 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
40751 old_fs = get_fs();
40752 set_fs(get_ds());
40753 /* The cast to a user pointer is valid due to the set_fs() */
40754 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40755 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40756 set_fs(old_fs);
40757
40758 return res;
40759 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
40760 old_fs = get_fs();
40761 set_fs(get_ds());
40762 /* The cast to a user pointer is valid due to the set_fs() */
40763 - res = vfs_write(file, (const char __user *)buf, count, &pos);
40764 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
40765 set_fs(old_fs);
40766
40767 return res;
40768 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
40769 .spd_release = spd_release_page,
40770 };
40771
40772 + pax_track_stack();
40773 +
40774 if (splice_grow_spd(pipe, &spd))
40775 return -ENOMEM;
40776
40777 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
40778 goto err;
40779
40780 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
40781 - vec[i].iov_base = (void __user *) page_address(page);
40782 + vec[i].iov_base = (__force void __user *) page_address(page);
40783 vec[i].iov_len = this_len;
40784 spd.pages[i] = page;
40785 spd.nr_pages++;
40786 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
40787 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
40788 {
40789 while (!pipe->nrbufs) {
40790 - if (!pipe->writers)
40791 + if (!atomic_read(&pipe->writers))
40792 return 0;
40793
40794 - if (!pipe->waiting_writers && sd->num_spliced)
40795 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
40796 return 0;
40797
40798 if (sd->flags & SPLICE_F_NONBLOCK)
40799 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
40800 * out of the pipe right after the splice_to_pipe(). So set
40801 * PIPE_READERS appropriately.
40802 */
40803 - pipe->readers = 1;
40804 + atomic_set(&pipe->readers, 1);
40805
40806 current->splice_pipe = pipe;
40807 }
40808 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
40809 };
40810 long ret;
40811
40812 + pax_track_stack();
40813 +
40814 pipe = get_pipe_info(file);
40815 if (!pipe)
40816 return -EBADF;
40817 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
40818 ret = -ERESTARTSYS;
40819 break;
40820 }
40821 - if (!pipe->writers)
40822 + if (!atomic_read(&pipe->writers))
40823 break;
40824 - if (!pipe->waiting_writers) {
40825 + if (!atomic_read(&pipe->waiting_writers)) {
40826 if (flags & SPLICE_F_NONBLOCK) {
40827 ret = -EAGAIN;
40828 break;
40829 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
40830 pipe_lock(pipe);
40831
40832 while (pipe->nrbufs >= pipe->buffers) {
40833 - if (!pipe->readers) {
40834 + if (!atomic_read(&pipe->readers)) {
40835 send_sig(SIGPIPE, current, 0);
40836 ret = -EPIPE;
40837 break;
40838 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
40839 ret = -ERESTARTSYS;
40840 break;
40841 }
40842 - pipe->waiting_writers++;
40843 + atomic_inc(&pipe->waiting_writers);
40844 pipe_wait(pipe);
40845 - pipe->waiting_writers--;
40846 + atomic_dec(&pipe->waiting_writers);
40847 }
40848
40849 pipe_unlock(pipe);
40850 @@ -1819,14 +1825,14 @@ retry:
40851 pipe_double_lock(ipipe, opipe);
40852
40853 do {
40854 - if (!opipe->readers) {
40855 + if (!atomic_read(&opipe->readers)) {
40856 send_sig(SIGPIPE, current, 0);
40857 if (!ret)
40858 ret = -EPIPE;
40859 break;
40860 }
40861
40862 - if (!ipipe->nrbufs && !ipipe->writers)
40863 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
40864 break;
40865
40866 /*
40867 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
40868 pipe_double_lock(ipipe, opipe);
40869
40870 do {
40871 - if (!opipe->readers) {
40872 + if (!atomic_read(&opipe->readers)) {
40873 send_sig(SIGPIPE, current, 0);
40874 if (!ret)
40875 ret = -EPIPE;
40876 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
40877 * return EAGAIN if we have the potential of some data in the
40878 * future, otherwise just return 0
40879 */
40880 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
40881 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
40882 ret = -EAGAIN;
40883
40884 pipe_unlock(ipipe);
40885 diff -urNp linux-3.0.3/fs/sysfs/file.c linux-3.0.3/fs/sysfs/file.c
40886 --- linux-3.0.3/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
40887 +++ linux-3.0.3/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
40888 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
40889
40890 struct sysfs_open_dirent {
40891 atomic_t refcnt;
40892 - atomic_t event;
40893 + atomic_unchecked_t event;
40894 wait_queue_head_t poll;
40895 struct list_head buffers; /* goes through sysfs_buffer.list */
40896 };
40897 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
40898 if (!sysfs_get_active(attr_sd))
40899 return -ENODEV;
40900
40901 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
40902 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
40903 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
40904
40905 sysfs_put_active(attr_sd);
40906 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
40907 return -ENOMEM;
40908
40909 atomic_set(&new_od->refcnt, 0);
40910 - atomic_set(&new_od->event, 1);
40911 + atomic_set_unchecked(&new_od->event, 1);
40912 init_waitqueue_head(&new_od->poll);
40913 INIT_LIST_HEAD(&new_od->buffers);
40914 goto retry;
40915 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
40916
40917 sysfs_put_active(attr_sd);
40918
40919 - if (buffer->event != atomic_read(&od->event))
40920 + if (buffer->event != atomic_read_unchecked(&od->event))
40921 goto trigger;
40922
40923 return DEFAULT_POLLMASK;
40924 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
40925
40926 od = sd->s_attr.open;
40927 if (od) {
40928 - atomic_inc(&od->event);
40929 + atomic_inc_unchecked(&od->event);
40930 wake_up_interruptible(&od->poll);
40931 }
40932
40933 diff -urNp linux-3.0.3/fs/sysfs/mount.c linux-3.0.3/fs/sysfs/mount.c
40934 --- linux-3.0.3/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
40935 +++ linux-3.0.3/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
40936 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
40937 .s_name = "",
40938 .s_count = ATOMIC_INIT(1),
40939 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
40940 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
40941 + .s_mode = S_IFDIR | S_IRWXU,
40942 +#else
40943 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
40944 +#endif
40945 .s_ino = 1,
40946 };
40947
40948 diff -urNp linux-3.0.3/fs/sysfs/symlink.c linux-3.0.3/fs/sysfs/symlink.c
40949 --- linux-3.0.3/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
40950 +++ linux-3.0.3/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
40951 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
40952
40953 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
40954 {
40955 - char *page = nd_get_link(nd);
40956 + const char *page = nd_get_link(nd);
40957 if (!IS_ERR(page))
40958 free_page((unsigned long)page);
40959 }
40960 diff -urNp linux-3.0.3/fs/udf/inode.c linux-3.0.3/fs/udf/inode.c
40961 --- linux-3.0.3/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
40962 +++ linux-3.0.3/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
40963 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
40964 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
40965 int lastblock = 0;
40966
40967 + pax_track_stack();
40968 +
40969 prev_epos.offset = udf_file_entry_alloc_offset(inode);
40970 prev_epos.block = iinfo->i_location;
40971 prev_epos.bh = NULL;
40972 diff -urNp linux-3.0.3/fs/udf/misc.c linux-3.0.3/fs/udf/misc.c
40973 --- linux-3.0.3/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
40974 +++ linux-3.0.3/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
40975 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
40976
40977 u8 udf_tag_checksum(const struct tag *t)
40978 {
40979 - u8 *data = (u8 *)t;
40980 + const u8 *data = (const u8 *)t;
40981 u8 checksum = 0;
40982 int i;
40983 for (i = 0; i < sizeof(struct tag); ++i)
40984 diff -urNp linux-3.0.3/fs/utimes.c linux-3.0.3/fs/utimes.c
40985 --- linux-3.0.3/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
40986 +++ linux-3.0.3/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
40987 @@ -1,6 +1,7 @@
40988 #include <linux/compiler.h>
40989 #include <linux/file.h>
40990 #include <linux/fs.h>
40991 +#include <linux/security.h>
40992 #include <linux/linkage.h>
40993 #include <linux/mount.h>
40994 #include <linux/namei.h>
40995 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
40996 goto mnt_drop_write_and_out;
40997 }
40998 }
40999 +
41000 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41001 + error = -EACCES;
41002 + goto mnt_drop_write_and_out;
41003 + }
41004 +
41005 mutex_lock(&inode->i_mutex);
41006 error = notify_change(path->dentry, &newattrs);
41007 mutex_unlock(&inode->i_mutex);
41008 diff -urNp linux-3.0.3/fs/xattr_acl.c linux-3.0.3/fs/xattr_acl.c
41009 --- linux-3.0.3/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
41010 +++ linux-3.0.3/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
41011 @@ -17,8 +17,8 @@
41012 struct posix_acl *
41013 posix_acl_from_xattr(const void *value, size_t size)
41014 {
41015 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41016 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41017 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41018 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41019 int count;
41020 struct posix_acl *acl;
41021 struct posix_acl_entry *acl_e;
41022 diff -urNp linux-3.0.3/fs/xattr.c linux-3.0.3/fs/xattr.c
41023 --- linux-3.0.3/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
41024 +++ linux-3.0.3/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
41025 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41026 * Extended attribute SET operations
41027 */
41028 static long
41029 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
41030 +setxattr(struct path *path, const char __user *name, const void __user *value,
41031 size_t size, int flags)
41032 {
41033 int error;
41034 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
41035 return PTR_ERR(kvalue);
41036 }
41037
41038 - error = vfs_setxattr(d, kname, kvalue, size, flags);
41039 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41040 + error = -EACCES;
41041 + goto out;
41042 + }
41043 +
41044 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41045 +out:
41046 kfree(kvalue);
41047 return error;
41048 }
41049 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41050 return error;
41051 error = mnt_want_write(path.mnt);
41052 if (!error) {
41053 - error = setxattr(path.dentry, name, value, size, flags);
41054 + error = setxattr(&path, name, value, size, flags);
41055 mnt_drop_write(path.mnt);
41056 }
41057 path_put(&path);
41058 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41059 return error;
41060 error = mnt_want_write(path.mnt);
41061 if (!error) {
41062 - error = setxattr(path.dentry, name, value, size, flags);
41063 + error = setxattr(&path, name, value, size, flags);
41064 mnt_drop_write(path.mnt);
41065 }
41066 path_put(&path);
41067 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41068 const void __user *,value, size_t, size, int, flags)
41069 {
41070 struct file *f;
41071 - struct dentry *dentry;
41072 int error = -EBADF;
41073
41074 f = fget(fd);
41075 if (!f)
41076 return error;
41077 - dentry = f->f_path.dentry;
41078 - audit_inode(NULL, dentry);
41079 + audit_inode(NULL, f->f_path.dentry);
41080 error = mnt_want_write_file(f);
41081 if (!error) {
41082 - error = setxattr(dentry, name, value, size, flags);
41083 + error = setxattr(&f->f_path, name, value, size, flags);
41084 mnt_drop_write(f->f_path.mnt);
41085 }
41086 fput(f);
41087 diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c
41088 --- linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
41089 +++ linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
41090 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41091 xfs_fsop_geom_t fsgeo;
41092 int error;
41093
41094 + memset(&fsgeo, 0, sizeof(fsgeo));
41095 error = xfs_fs_geometry(mp, &fsgeo, 3);
41096 if (error)
41097 return -error;
41098 diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c
41099 --- linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
41100 +++ linux-3.0.3/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
41101 @@ -128,7 +128,7 @@ xfs_find_handle(
41102 }
41103
41104 error = -EFAULT;
41105 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41106 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41107 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41108 goto out_put;
41109
41110 diff -urNp linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c
41111 --- linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
41112 +++ linux-3.0.3/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
41113 @@ -437,7 +437,7 @@ xfs_vn_put_link(
41114 struct nameidata *nd,
41115 void *p)
41116 {
41117 - char *s = nd_get_link(nd);
41118 + const char *s = nd_get_link(nd);
41119
41120 if (!IS_ERR(s))
41121 kfree(s);
41122 diff -urNp linux-3.0.3/fs/xfs/xfs_bmap.c linux-3.0.3/fs/xfs/xfs_bmap.c
41123 --- linux-3.0.3/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
41124 +++ linux-3.0.3/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
41125 @@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
41126 int nmap,
41127 int ret_nmap);
41128 #else
41129 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41130 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41131 #endif /* DEBUG */
41132
41133 STATIC int
41134 diff -urNp linux-3.0.3/fs/xfs/xfs_dir2_sf.c linux-3.0.3/fs/xfs/xfs_dir2_sf.c
41135 --- linux-3.0.3/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
41136 +++ linux-3.0.3/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
41137 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41138 }
41139
41140 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41141 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41142 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41143 + char name[sfep->namelen];
41144 + memcpy(name, sfep->name, sfep->namelen);
41145 + if (filldir(dirent, name, sfep->namelen,
41146 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
41147 + *offset = off & 0x7fffffff;
41148 + return 0;
41149 + }
41150 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41151 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41152 *offset = off & 0x7fffffff;
41153 return 0;
41154 diff -urNp linux-3.0.3/grsecurity/gracl_alloc.c linux-3.0.3/grsecurity/gracl_alloc.c
41155 --- linux-3.0.3/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41156 +++ linux-3.0.3/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
41157 @@ -0,0 +1,105 @@
41158 +#include <linux/kernel.h>
41159 +#include <linux/mm.h>
41160 +#include <linux/slab.h>
41161 +#include <linux/vmalloc.h>
41162 +#include <linux/gracl.h>
41163 +#include <linux/grsecurity.h>
41164 +
41165 +static unsigned long alloc_stack_next = 1;
41166 +static unsigned long alloc_stack_size = 1;
41167 +static void **alloc_stack;
41168 +
41169 +static __inline__ int
41170 +alloc_pop(void)
41171 +{
41172 + if (alloc_stack_next == 1)
41173 + return 0;
41174 +
41175 + kfree(alloc_stack[alloc_stack_next - 2]);
41176 +
41177 + alloc_stack_next--;
41178 +
41179 + return 1;
41180 +}
41181 +
41182 +static __inline__ int
41183 +alloc_push(void *buf)
41184 +{
41185 + if (alloc_stack_next >= alloc_stack_size)
41186 + return 1;
41187 +
41188 + alloc_stack[alloc_stack_next - 1] = buf;
41189 +
41190 + alloc_stack_next++;
41191 +
41192 + return 0;
41193 +}
41194 +
41195 +void *
41196 +acl_alloc(unsigned long len)
41197 +{
41198 + void *ret = NULL;
41199 +
41200 + if (!len || len > PAGE_SIZE)
41201 + goto out;
41202 +
41203 + ret = kmalloc(len, GFP_KERNEL);
41204 +
41205 + if (ret) {
41206 + if (alloc_push(ret)) {
41207 + kfree(ret);
41208 + ret = NULL;
41209 + }
41210 + }
41211 +
41212 +out:
41213 + return ret;
41214 +}
41215 +
41216 +void *
41217 +acl_alloc_num(unsigned long num, unsigned long len)
41218 +{
41219 + if (!len || (num > (PAGE_SIZE / len)))
41220 + return NULL;
41221 +
41222 + return acl_alloc(num * len);
41223 +}
41224 +
41225 +void
41226 +acl_free_all(void)
41227 +{
41228 + if (gr_acl_is_enabled() || !alloc_stack)
41229 + return;
41230 +
41231 + while (alloc_pop()) ;
41232 +
41233 + if (alloc_stack) {
41234 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41235 + kfree(alloc_stack);
41236 + else
41237 + vfree(alloc_stack);
41238 + }
41239 +
41240 + alloc_stack = NULL;
41241 + alloc_stack_size = 1;
41242 + alloc_stack_next = 1;
41243 +
41244 + return;
41245 +}
41246 +
41247 +int
41248 +acl_alloc_stack_init(unsigned long size)
41249 +{
41250 + if ((size * sizeof (void *)) <= PAGE_SIZE)
41251 + alloc_stack =
41252 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41253 + else
41254 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
41255 +
41256 + alloc_stack_size = size;
41257 +
41258 + if (!alloc_stack)
41259 + return 0;
41260 + else
41261 + return 1;
41262 +}
41263 diff -urNp linux-3.0.3/grsecurity/gracl.c linux-3.0.3/grsecurity/gracl.c
41264 --- linux-3.0.3/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41265 +++ linux-3.0.3/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
41266 @@ -0,0 +1,4106 @@
41267 +#include <linux/kernel.h>
41268 +#include <linux/module.h>
41269 +#include <linux/sched.h>
41270 +#include <linux/mm.h>
41271 +#include <linux/file.h>
41272 +#include <linux/fs.h>
41273 +#include <linux/namei.h>
41274 +#include <linux/mount.h>
41275 +#include <linux/tty.h>
41276 +#include <linux/proc_fs.h>
41277 +#include <linux/lglock.h>
41278 +#include <linux/slab.h>
41279 +#include <linux/vmalloc.h>
41280 +#include <linux/types.h>
41281 +#include <linux/sysctl.h>
41282 +#include <linux/netdevice.h>
41283 +#include <linux/ptrace.h>
41284 +#include <linux/gracl.h>
41285 +#include <linux/gralloc.h>
41286 +#include <linux/grsecurity.h>
41287 +#include <linux/grinternal.h>
41288 +#include <linux/pid_namespace.h>
41289 +#include <linux/fdtable.h>
41290 +#include <linux/percpu.h>
41291 +
41292 +#include <asm/uaccess.h>
41293 +#include <asm/errno.h>
41294 +#include <asm/mman.h>
41295 +
41296 +static struct acl_role_db acl_role_set;
41297 +static struct name_db name_set;
41298 +static struct inodev_db inodev_set;
41299 +
41300 +/* for keeping track of userspace pointers used for subjects, so we
41301 + can share references in the kernel as well
41302 +*/
41303 +
41304 +static struct path real_root;
41305 +
41306 +static struct acl_subj_map_db subj_map_set;
41307 +
41308 +static struct acl_role_label *default_role;
41309 +
41310 +static struct acl_role_label *role_list;
41311 +
41312 +static u16 acl_sp_role_value;
41313 +
41314 +extern char *gr_shared_page[4];
41315 +static DEFINE_MUTEX(gr_dev_mutex);
41316 +DEFINE_RWLOCK(gr_inode_lock);
41317 +
41318 +struct gr_arg *gr_usermode;
41319 +
41320 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
41321 +
41322 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41323 +extern void gr_clear_learn_entries(void);
41324 +
41325 +#ifdef CONFIG_GRKERNSEC_RESLOG
41326 +extern void gr_log_resource(const struct task_struct *task,
41327 + const int res, const unsigned long wanted, const int gt);
41328 +#endif
41329 +
41330 +unsigned char *gr_system_salt;
41331 +unsigned char *gr_system_sum;
41332 +
41333 +static struct sprole_pw **acl_special_roles = NULL;
41334 +static __u16 num_sprole_pws = 0;
41335 +
41336 +static struct acl_role_label *kernel_role = NULL;
41337 +
41338 +static unsigned int gr_auth_attempts = 0;
41339 +static unsigned long gr_auth_expires = 0UL;
41340 +
41341 +#ifdef CONFIG_NET
41342 +extern struct vfsmount *sock_mnt;
41343 +#endif
41344 +
41345 +extern struct vfsmount *pipe_mnt;
41346 +extern struct vfsmount *shm_mnt;
41347 +#ifdef CONFIG_HUGETLBFS
41348 +extern struct vfsmount *hugetlbfs_vfsmount;
41349 +#endif
41350 +
41351 +static struct acl_object_label *fakefs_obj_rw;
41352 +static struct acl_object_label *fakefs_obj_rwx;
41353 +
41354 +extern int gr_init_uidset(void);
41355 +extern void gr_free_uidset(void);
41356 +extern void gr_remove_uid(uid_t uid);
41357 +extern int gr_find_uid(uid_t uid);
41358 +
41359 +DECLARE_BRLOCK(vfsmount_lock);
41360 +
41361 +__inline__ int
41362 +gr_acl_is_enabled(void)
41363 +{
41364 + return (gr_status & GR_READY);
41365 +}
41366 +
41367 +#ifdef CONFIG_BTRFS_FS
41368 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41369 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41370 +#endif
41371 +
41372 +static inline dev_t __get_dev(const struct dentry *dentry)
41373 +{
41374 +#ifdef CONFIG_BTRFS_FS
41375 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41376 + return get_btrfs_dev_from_inode(dentry->d_inode);
41377 + else
41378 +#endif
41379 + return dentry->d_inode->i_sb->s_dev;
41380 +}
41381 +
41382 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41383 +{
41384 + return __get_dev(dentry);
41385 +}
41386 +
41387 +static char gr_task_roletype_to_char(struct task_struct *task)
41388 +{
41389 + switch (task->role->roletype &
41390 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41391 + GR_ROLE_SPECIAL)) {
41392 + case GR_ROLE_DEFAULT:
41393 + return 'D';
41394 + case GR_ROLE_USER:
41395 + return 'U';
41396 + case GR_ROLE_GROUP:
41397 + return 'G';
41398 + case GR_ROLE_SPECIAL:
41399 + return 'S';
41400 + }
41401 +
41402 + return 'X';
41403 +}
41404 +
41405 +char gr_roletype_to_char(void)
41406 +{
41407 + return gr_task_roletype_to_char(current);
41408 +}
41409 +
41410 +__inline__ int
41411 +gr_acl_tpe_check(void)
41412 +{
41413 + if (unlikely(!(gr_status & GR_READY)))
41414 + return 0;
41415 + if (current->role->roletype & GR_ROLE_TPE)
41416 + return 1;
41417 + else
41418 + return 0;
41419 +}
41420 +
41421 +int
41422 +gr_handle_rawio(const struct inode *inode)
41423 +{
41424 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41425 + if (inode && S_ISBLK(inode->i_mode) &&
41426 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41427 + !capable(CAP_SYS_RAWIO))
41428 + return 1;
41429 +#endif
41430 + return 0;
41431 +}
41432 +
41433 +static int
41434 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41435 +{
41436 + if (likely(lena != lenb))
41437 + return 0;
41438 +
41439 + return !memcmp(a, b, lena);
41440 +}
41441 +
41442 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41443 +{
41444 + *buflen -= namelen;
41445 + if (*buflen < 0)
41446 + return -ENAMETOOLONG;
41447 + *buffer -= namelen;
41448 + memcpy(*buffer, str, namelen);
41449 + return 0;
41450 +}
41451 +
41452 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41453 +{
41454 + return prepend(buffer, buflen, name->name, name->len);
41455 +}
41456 +
41457 +static int prepend_path(const struct path *path, struct path *root,
41458 + char **buffer, int *buflen)
41459 +{
41460 + struct dentry *dentry = path->dentry;
41461 + struct vfsmount *vfsmnt = path->mnt;
41462 + bool slash = false;
41463 + int error = 0;
41464 +
41465 + while (dentry != root->dentry || vfsmnt != root->mnt) {
41466 + struct dentry * parent;
41467 +
41468 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41469 + /* Global root? */
41470 + if (vfsmnt->mnt_parent == vfsmnt) {
41471 + goto out;
41472 + }
41473 + dentry = vfsmnt->mnt_mountpoint;
41474 + vfsmnt = vfsmnt->mnt_parent;
41475 + continue;
41476 + }
41477 + parent = dentry->d_parent;
41478 + prefetch(parent);
41479 + spin_lock(&dentry->d_lock);
41480 + error = prepend_name(buffer, buflen, &dentry->d_name);
41481 + spin_unlock(&dentry->d_lock);
41482 + if (!error)
41483 + error = prepend(buffer, buflen, "/", 1);
41484 + if (error)
41485 + break;
41486 +
41487 + slash = true;
41488 + dentry = parent;
41489 + }
41490 +
41491 +out:
41492 + if (!error && !slash)
41493 + error = prepend(buffer, buflen, "/", 1);
41494 +
41495 + return error;
41496 +}
41497 +
41498 +/* this must be called with vfsmount_lock and rename_lock held */
41499 +
41500 +static char *__our_d_path(const struct path *path, struct path *root,
41501 + char *buf, int buflen)
41502 +{
41503 + char *res = buf + buflen;
41504 + int error;
41505 +
41506 + prepend(&res, &buflen, "\0", 1);
41507 + error = prepend_path(path, root, &res, &buflen);
41508 + if (error)
41509 + return ERR_PTR(error);
41510 +
41511 + return res;
41512 +}
41513 +
41514 +static char *
41515 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41516 +{
41517 + char *retval;
41518 +
41519 + retval = __our_d_path(path, root, buf, buflen);
41520 + if (unlikely(IS_ERR(retval)))
41521 + retval = strcpy(buf, "<path too long>");
41522 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41523 + retval[1] = '\0';
41524 +
41525 + return retval;
41526 +}
41527 +
41528 +static char *
41529 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41530 + char *buf, int buflen)
41531 +{
41532 + struct path path;
41533 + char *res;
41534 +
41535 + path.dentry = (struct dentry *)dentry;
41536 + path.mnt = (struct vfsmount *)vfsmnt;
41537 +
41538 + /* we can use real_root.dentry, real_root.mnt, because this is only called
41539 + by the RBAC system */
41540 + res = gen_full_path(&path, &real_root, buf, buflen);
41541 +
41542 + return res;
41543 +}
41544 +
41545 +static char *
41546 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41547 + char *buf, int buflen)
41548 +{
41549 + char *res;
41550 + struct path path;
41551 + struct path root;
41552 + struct task_struct *reaper = &init_task;
41553 +
41554 + path.dentry = (struct dentry *)dentry;
41555 + path.mnt = (struct vfsmount *)vfsmnt;
41556 +
41557 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41558 + get_fs_root(reaper->fs, &root);
41559 +
41560 + write_seqlock(&rename_lock);
41561 + br_read_lock(vfsmount_lock);
41562 + res = gen_full_path(&path, &root, buf, buflen);
41563 + br_read_unlock(vfsmount_lock);
41564 + write_sequnlock(&rename_lock);
41565 +
41566 + path_put(&root);
41567 + return res;
41568 +}
41569 +
41570 +static char *
41571 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41572 +{
41573 + char *ret;
41574 + write_seqlock(&rename_lock);
41575 + br_read_lock(vfsmount_lock);
41576 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41577 + PAGE_SIZE);
41578 + br_read_unlock(vfsmount_lock);
41579 + write_sequnlock(&rename_lock);
41580 + return ret;
41581 +}
41582 +
41583 +char *
41584 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41585 +{
41586 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41587 + PAGE_SIZE);
41588 +}
41589 +
41590 +char *
41591 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41592 +{
41593 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41594 + PAGE_SIZE);
41595 +}
41596 +
41597 +char *
41598 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41599 +{
41600 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41601 + PAGE_SIZE);
41602 +}
41603 +
41604 +char *
41605 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41606 +{
41607 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41608 + PAGE_SIZE);
41609 +}
41610 +
41611 +char *
41612 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41613 +{
41614 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41615 + PAGE_SIZE);
41616 +}
41617 +
41618 +__inline__ __u32
41619 +to_gr_audit(const __u32 reqmode)
41620 +{
41621 + /* masks off auditable permission flags, then shifts them to create
41622 + auditing flags, and adds the special case of append auditing if
41623 + we're requesting write */
41624 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41625 +}
41626 +
41627 +struct acl_subject_label *
41628 +lookup_subject_map(const struct acl_subject_label *userp)
41629 +{
41630 + unsigned int index = shash(userp, subj_map_set.s_size);
41631 + struct subject_map *match;
41632 +
41633 + match = subj_map_set.s_hash[index];
41634 +
41635 + while (match && match->user != userp)
41636 + match = match->next;
41637 +
41638 + if (match != NULL)
41639 + return match->kernel;
41640 + else
41641 + return NULL;
41642 +}
41643 +
41644 +static void
41645 +insert_subj_map_entry(struct subject_map *subjmap)
41646 +{
41647 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41648 + struct subject_map **curr;
41649 +
41650 + subjmap->prev = NULL;
41651 +
41652 + curr = &subj_map_set.s_hash[index];
41653 + if (*curr != NULL)
41654 + (*curr)->prev = subjmap;
41655 +
41656 + subjmap->next = *curr;
41657 + *curr = subjmap;
41658 +
41659 + return;
41660 +}
41661 +
41662 +static struct acl_role_label *
41663 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41664 + const gid_t gid)
41665 +{
41666 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41667 + struct acl_role_label *match;
41668 + struct role_allowed_ip *ipp;
41669 + unsigned int x;
41670 + u32 curr_ip = task->signal->curr_ip;
41671 +
41672 + task->signal->saved_ip = curr_ip;
41673 +
41674 + match = acl_role_set.r_hash[index];
41675 +
41676 + while (match) {
41677 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41678 + for (x = 0; x < match->domain_child_num; x++) {
41679 + if (match->domain_children[x] == uid)
41680 + goto found;
41681 + }
41682 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41683 + break;
41684 + match = match->next;
41685 + }
41686 +found:
41687 + if (match == NULL) {
41688 + try_group:
41689 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41690 + match = acl_role_set.r_hash[index];
41691 +
41692 + while (match) {
41693 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41694 + for (x = 0; x < match->domain_child_num; x++) {
41695 + if (match->domain_children[x] == gid)
41696 + goto found2;
41697 + }
41698 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41699 + break;
41700 + match = match->next;
41701 + }
41702 +found2:
41703 + if (match == NULL)
41704 + match = default_role;
41705 + if (match->allowed_ips == NULL)
41706 + return match;
41707 + else {
41708 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41709 + if (likely
41710 + ((ntohl(curr_ip) & ipp->netmask) ==
41711 + (ntohl(ipp->addr) & ipp->netmask)))
41712 + return match;
41713 + }
41714 + match = default_role;
41715 + }
41716 + } else if (match->allowed_ips == NULL) {
41717 + return match;
41718 + } else {
41719 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41720 + if (likely
41721 + ((ntohl(curr_ip) & ipp->netmask) ==
41722 + (ntohl(ipp->addr) & ipp->netmask)))
41723 + return match;
41724 + }
41725 + goto try_group;
41726 + }
41727 +
41728 + return match;
41729 +}
41730 +
41731 +struct acl_subject_label *
41732 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41733 + const struct acl_role_label *role)
41734 +{
41735 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41736 + struct acl_subject_label *match;
41737 +
41738 + match = role->subj_hash[index];
41739 +
41740 + while (match && (match->inode != ino || match->device != dev ||
41741 + (match->mode & GR_DELETED))) {
41742 + match = match->next;
41743 + }
41744 +
41745 + if (match && !(match->mode & GR_DELETED))
41746 + return match;
41747 + else
41748 + return NULL;
41749 +}
41750 +
41751 +struct acl_subject_label *
41752 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41753 + const struct acl_role_label *role)
41754 +{
41755 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41756 + struct acl_subject_label *match;
41757 +
41758 + match = role->subj_hash[index];
41759 +
41760 + while (match && (match->inode != ino || match->device != dev ||
41761 + !(match->mode & GR_DELETED))) {
41762 + match = match->next;
41763 + }
41764 +
41765 + if (match && (match->mode & GR_DELETED))
41766 + return match;
41767 + else
41768 + return NULL;
41769 +}
41770 +
41771 +static struct acl_object_label *
41772 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
41773 + const struct acl_subject_label *subj)
41774 +{
41775 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41776 + struct acl_object_label *match;
41777 +
41778 + match = subj->obj_hash[index];
41779 +
41780 + while (match && (match->inode != ino || match->device != dev ||
41781 + (match->mode & GR_DELETED))) {
41782 + match = match->next;
41783 + }
41784 +
41785 + if (match && !(match->mode & GR_DELETED))
41786 + return match;
41787 + else
41788 + return NULL;
41789 +}
41790 +
41791 +static struct acl_object_label *
41792 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
41793 + const struct acl_subject_label *subj)
41794 +{
41795 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41796 + struct acl_object_label *match;
41797 +
41798 + match = subj->obj_hash[index];
41799 +
41800 + while (match && (match->inode != ino || match->device != dev ||
41801 + !(match->mode & GR_DELETED))) {
41802 + match = match->next;
41803 + }
41804 +
41805 + if (match && (match->mode & GR_DELETED))
41806 + return match;
41807 +
41808 + match = subj->obj_hash[index];
41809 +
41810 + while (match && (match->inode != ino || match->device != dev ||
41811 + (match->mode & GR_DELETED))) {
41812 + match = match->next;
41813 + }
41814 +
41815 + if (match && !(match->mode & GR_DELETED))
41816 + return match;
41817 + else
41818 + return NULL;
41819 +}
41820 +
41821 +static struct name_entry *
41822 +lookup_name_entry(const char *name)
41823 +{
41824 + unsigned int len = strlen(name);
41825 + unsigned int key = full_name_hash(name, len);
41826 + unsigned int index = key % name_set.n_size;
41827 + struct name_entry *match;
41828 +
41829 + match = name_set.n_hash[index];
41830 +
41831 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
41832 + match = match->next;
41833 +
41834 + return match;
41835 +}
41836 +
41837 +static struct name_entry *
41838 +lookup_name_entry_create(const char *name)
41839 +{
41840 + unsigned int len = strlen(name);
41841 + unsigned int key = full_name_hash(name, len);
41842 + unsigned int index = key % name_set.n_size;
41843 + struct name_entry *match;
41844 +
41845 + match = name_set.n_hash[index];
41846 +
41847 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41848 + !match->deleted))
41849 + match = match->next;
41850 +
41851 + if (match && match->deleted)
41852 + return match;
41853 +
41854 + match = name_set.n_hash[index];
41855 +
41856 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41857 + match->deleted))
41858 + match = match->next;
41859 +
41860 + if (match && !match->deleted)
41861 + return match;
41862 + else
41863 + return NULL;
41864 +}
41865 +
41866 +static struct inodev_entry *
41867 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
41868 +{
41869 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
41870 + struct inodev_entry *match;
41871 +
41872 + match = inodev_set.i_hash[index];
41873 +
41874 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
41875 + match = match->next;
41876 +
41877 + return match;
41878 +}
41879 +
41880 +static void
41881 +insert_inodev_entry(struct inodev_entry *entry)
41882 +{
41883 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
41884 + inodev_set.i_size);
41885 + struct inodev_entry **curr;
41886 +
41887 + entry->prev = NULL;
41888 +
41889 + curr = &inodev_set.i_hash[index];
41890 + if (*curr != NULL)
41891 + (*curr)->prev = entry;
41892 +
41893 + entry->next = *curr;
41894 + *curr = entry;
41895 +
41896 + return;
41897 +}
41898 +
41899 +static void
41900 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
41901 +{
41902 + unsigned int index =
41903 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
41904 + struct acl_role_label **curr;
41905 + struct acl_role_label *tmp;
41906 +
41907 + curr = &acl_role_set.r_hash[index];
41908 +
41909 + /* if role was already inserted due to domains and already has
41910 + a role in the same bucket as it attached, then we need to
41911 + combine these two buckets
41912 + */
41913 + if (role->next) {
41914 + tmp = role->next;
41915 + while (tmp->next)
41916 + tmp = tmp->next;
41917 + tmp->next = *curr;
41918 + } else
41919 + role->next = *curr;
41920 + *curr = role;
41921 +
41922 + return;
41923 +}
41924 +
41925 +static void
41926 +insert_acl_role_label(struct acl_role_label *role)
41927 +{
41928 + int i;
41929 +
41930 + if (role_list == NULL) {
41931 + role_list = role;
41932 + role->prev = NULL;
41933 + } else {
41934 + role->prev = role_list;
41935 + role_list = role;
41936 + }
41937 +
41938 + /* used for hash chains */
41939 + role->next = NULL;
41940 +
41941 + if (role->roletype & GR_ROLE_DOMAIN) {
41942 + for (i = 0; i < role->domain_child_num; i++)
41943 + __insert_acl_role_label(role, role->domain_children[i]);
41944 + } else
41945 + __insert_acl_role_label(role, role->uidgid);
41946 +}
41947 +
41948 +static int
41949 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
41950 +{
41951 + struct name_entry **curr, *nentry;
41952 + struct inodev_entry *ientry;
41953 + unsigned int len = strlen(name);
41954 + unsigned int key = full_name_hash(name, len);
41955 + unsigned int index = key % name_set.n_size;
41956 +
41957 + curr = &name_set.n_hash[index];
41958 +
41959 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
41960 + curr = &((*curr)->next);
41961 +
41962 + if (*curr != NULL)
41963 + return 1;
41964 +
41965 + nentry = acl_alloc(sizeof (struct name_entry));
41966 + if (nentry == NULL)
41967 + return 0;
41968 + ientry = acl_alloc(sizeof (struct inodev_entry));
41969 + if (ientry == NULL)
41970 + return 0;
41971 + ientry->nentry = nentry;
41972 +
41973 + nentry->key = key;
41974 + nentry->name = name;
41975 + nentry->inode = inode;
41976 + nentry->device = device;
41977 + nentry->len = len;
41978 + nentry->deleted = deleted;
41979 +
41980 + nentry->prev = NULL;
41981 + curr = &name_set.n_hash[index];
41982 + if (*curr != NULL)
41983 + (*curr)->prev = nentry;
41984 + nentry->next = *curr;
41985 + *curr = nentry;
41986 +
41987 + /* insert us into the table searchable by inode/dev */
41988 + insert_inodev_entry(ientry);
41989 +
41990 + return 1;
41991 +}
41992 +
41993 +static void
41994 +insert_acl_obj_label(struct acl_object_label *obj,
41995 + struct acl_subject_label *subj)
41996 +{
41997 + unsigned int index =
41998 + fhash(obj->inode, obj->device, subj->obj_hash_size);
41999 + struct acl_object_label **curr;
42000 +
42001 +
42002 + obj->prev = NULL;
42003 +
42004 + curr = &subj->obj_hash[index];
42005 + if (*curr != NULL)
42006 + (*curr)->prev = obj;
42007 +
42008 + obj->next = *curr;
42009 + *curr = obj;
42010 +
42011 + return;
42012 +}
42013 +
42014 +static void
42015 +insert_acl_subj_label(struct acl_subject_label *obj,
42016 + struct acl_role_label *role)
42017 +{
42018 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42019 + struct acl_subject_label **curr;
42020 +
42021 + obj->prev = NULL;
42022 +
42023 + curr = &role->subj_hash[index];
42024 + if (*curr != NULL)
42025 + (*curr)->prev = obj;
42026 +
42027 + obj->next = *curr;
42028 + *curr = obj;
42029 +
42030 + return;
42031 +}
42032 +
42033 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42034 +
42035 +static void *
42036 +create_table(__u32 * len, int elementsize)
42037 +{
42038 + unsigned int table_sizes[] = {
42039 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42040 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42041 + 4194301, 8388593, 16777213, 33554393, 67108859
42042 + };
42043 + void *newtable = NULL;
42044 + unsigned int pwr = 0;
42045 +
42046 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42047 + table_sizes[pwr] <= *len)
42048 + pwr++;
42049 +
42050 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42051 + return newtable;
42052 +
42053 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42054 + newtable =
42055 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42056 + else
42057 + newtable = vmalloc(table_sizes[pwr] * elementsize);
42058 +
42059 + *len = table_sizes[pwr];
42060 +
42061 + return newtable;
42062 +}
42063 +
42064 +static int
42065 +init_variables(const struct gr_arg *arg)
42066 +{
42067 + struct task_struct *reaper = &init_task;
42068 + unsigned int stacksize;
42069 +
42070 + subj_map_set.s_size = arg->role_db.num_subjects;
42071 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42072 + name_set.n_size = arg->role_db.num_objects;
42073 + inodev_set.i_size = arg->role_db.num_objects;
42074 +
42075 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
42076 + !name_set.n_size || !inodev_set.i_size)
42077 + return 1;
42078 +
42079 + if (!gr_init_uidset())
42080 + return 1;
42081 +
42082 + /* set up the stack that holds allocation info */
42083 +
42084 + stacksize = arg->role_db.num_pointers + 5;
42085 +
42086 + if (!acl_alloc_stack_init(stacksize))
42087 + return 1;
42088 +
42089 + /* grab reference for the real root dentry and vfsmount */
42090 + get_fs_root(reaper->fs, &real_root);
42091 +
42092 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42093 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42094 +#endif
42095 +
42096 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42097 + if (fakefs_obj_rw == NULL)
42098 + return 1;
42099 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42100 +
42101 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42102 + if (fakefs_obj_rwx == NULL)
42103 + return 1;
42104 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42105 +
42106 + subj_map_set.s_hash =
42107 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42108 + acl_role_set.r_hash =
42109 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42110 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42111 + inodev_set.i_hash =
42112 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42113 +
42114 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42115 + !name_set.n_hash || !inodev_set.i_hash)
42116 + return 1;
42117 +
42118 + memset(subj_map_set.s_hash, 0,
42119 + sizeof(struct subject_map *) * subj_map_set.s_size);
42120 + memset(acl_role_set.r_hash, 0,
42121 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
42122 + memset(name_set.n_hash, 0,
42123 + sizeof (struct name_entry *) * name_set.n_size);
42124 + memset(inodev_set.i_hash, 0,
42125 + sizeof (struct inodev_entry *) * inodev_set.i_size);
42126 +
42127 + return 0;
42128 +}
42129 +
42130 +/* free information not needed after startup
42131 + currently contains user->kernel pointer mappings for subjects
42132 +*/
42133 +
42134 +static void
42135 +free_init_variables(void)
42136 +{
42137 + __u32 i;
42138 +
42139 + if (subj_map_set.s_hash) {
42140 + for (i = 0; i < subj_map_set.s_size; i++) {
42141 + if (subj_map_set.s_hash[i]) {
42142 + kfree(subj_map_set.s_hash[i]);
42143 + subj_map_set.s_hash[i] = NULL;
42144 + }
42145 + }
42146 +
42147 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42148 + PAGE_SIZE)
42149 + kfree(subj_map_set.s_hash);
42150 + else
42151 + vfree(subj_map_set.s_hash);
42152 + }
42153 +
42154 + return;
42155 +}
42156 +
42157 +static void
42158 +free_variables(void)
42159 +{
42160 + struct acl_subject_label *s;
42161 + struct acl_role_label *r;
42162 + struct task_struct *task, *task2;
42163 + unsigned int x;
42164 +
42165 + gr_clear_learn_entries();
42166 +
42167 + read_lock(&tasklist_lock);
42168 + do_each_thread(task2, task) {
42169 + task->acl_sp_role = 0;
42170 + task->acl_role_id = 0;
42171 + task->acl = NULL;
42172 + task->role = NULL;
42173 + } while_each_thread(task2, task);
42174 + read_unlock(&tasklist_lock);
42175 +
42176 + /* release the reference to the real root dentry and vfsmount */
42177 + path_put(&real_root);
42178 +
42179 + /* free all object hash tables */
42180 +
42181 + FOR_EACH_ROLE_START(r)
42182 + if (r->subj_hash == NULL)
42183 + goto next_role;
42184 + FOR_EACH_SUBJECT_START(r, s, x)
42185 + if (s->obj_hash == NULL)
42186 + break;
42187 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42188 + kfree(s->obj_hash);
42189 + else
42190 + vfree(s->obj_hash);
42191 + FOR_EACH_SUBJECT_END(s, x)
42192 + FOR_EACH_NESTED_SUBJECT_START(r, s)
42193 + if (s->obj_hash == NULL)
42194 + break;
42195 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42196 + kfree(s->obj_hash);
42197 + else
42198 + vfree(s->obj_hash);
42199 + FOR_EACH_NESTED_SUBJECT_END(s)
42200 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42201 + kfree(r->subj_hash);
42202 + else
42203 + vfree(r->subj_hash);
42204 + r->subj_hash = NULL;
42205 +next_role:
42206 + FOR_EACH_ROLE_END(r)
42207 +
42208 + acl_free_all();
42209 +
42210 + if (acl_role_set.r_hash) {
42211 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42212 + PAGE_SIZE)
42213 + kfree(acl_role_set.r_hash);
42214 + else
42215 + vfree(acl_role_set.r_hash);
42216 + }
42217 + if (name_set.n_hash) {
42218 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
42219 + PAGE_SIZE)
42220 + kfree(name_set.n_hash);
42221 + else
42222 + vfree(name_set.n_hash);
42223 + }
42224 +
42225 + if (inodev_set.i_hash) {
42226 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42227 + PAGE_SIZE)
42228 + kfree(inodev_set.i_hash);
42229 + else
42230 + vfree(inodev_set.i_hash);
42231 + }
42232 +
42233 + gr_free_uidset();
42234 +
42235 + memset(&name_set, 0, sizeof (struct name_db));
42236 + memset(&inodev_set, 0, sizeof (struct inodev_db));
42237 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42238 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42239 +
42240 + default_role = NULL;
42241 + role_list = NULL;
42242 +
42243 + return;
42244 +}
42245 +
42246 +static __u32
42247 +count_user_objs(struct acl_object_label *userp)
42248 +{
42249 + struct acl_object_label o_tmp;
42250 + __u32 num = 0;
42251 +
42252 + while (userp) {
42253 + if (copy_from_user(&o_tmp, userp,
42254 + sizeof (struct acl_object_label)))
42255 + break;
42256 +
42257 + userp = o_tmp.prev;
42258 + num++;
42259 + }
42260 +
42261 + return num;
42262 +}
42263 +
42264 +static struct acl_subject_label *
42265 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42266 +
42267 +static int
42268 +copy_user_glob(struct acl_object_label *obj)
42269 +{
42270 + struct acl_object_label *g_tmp, **guser;
42271 + unsigned int len;
42272 + char *tmp;
42273 +
42274 + if (obj->globbed == NULL)
42275 + return 0;
42276 +
42277 + guser = &obj->globbed;
42278 + while (*guser) {
42279 + g_tmp = (struct acl_object_label *)
42280 + acl_alloc(sizeof (struct acl_object_label));
42281 + if (g_tmp == NULL)
42282 + return -ENOMEM;
42283 +
42284 + if (copy_from_user(g_tmp, *guser,
42285 + sizeof (struct acl_object_label)))
42286 + return -EFAULT;
42287 +
42288 + len = strnlen_user(g_tmp->filename, PATH_MAX);
42289 +
42290 + if (!len || len >= PATH_MAX)
42291 + return -EINVAL;
42292 +
42293 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42294 + return -ENOMEM;
42295 +
42296 + if (copy_from_user(tmp, g_tmp->filename, len))
42297 + return -EFAULT;
42298 + tmp[len-1] = '\0';
42299 + g_tmp->filename = tmp;
42300 +
42301 + *guser = g_tmp;
42302 + guser = &(g_tmp->next);
42303 + }
42304 +
42305 + return 0;
42306 +}
42307 +
42308 +static int
42309 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42310 + struct acl_role_label *role)
42311 +{
42312 + struct acl_object_label *o_tmp;
42313 + unsigned int len;
42314 + int ret;
42315 + char *tmp;
42316 +
42317 + while (userp) {
42318 + if ((o_tmp = (struct acl_object_label *)
42319 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
42320 + return -ENOMEM;
42321 +
42322 + if (copy_from_user(o_tmp, userp,
42323 + sizeof (struct acl_object_label)))
42324 + return -EFAULT;
42325 +
42326 + userp = o_tmp->prev;
42327 +
42328 + len = strnlen_user(o_tmp->filename, PATH_MAX);
42329 +
42330 + if (!len || len >= PATH_MAX)
42331 + return -EINVAL;
42332 +
42333 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42334 + return -ENOMEM;
42335 +
42336 + if (copy_from_user(tmp, o_tmp->filename, len))
42337 + return -EFAULT;
42338 + tmp[len-1] = '\0';
42339 + o_tmp->filename = tmp;
42340 +
42341 + insert_acl_obj_label(o_tmp, subj);
42342 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42343 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42344 + return -ENOMEM;
42345 +
42346 + ret = copy_user_glob(o_tmp);
42347 + if (ret)
42348 + return ret;
42349 +
42350 + if (o_tmp->nested) {
42351 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42352 + if (IS_ERR(o_tmp->nested))
42353 + return PTR_ERR(o_tmp->nested);
42354 +
42355 + /* insert into nested subject list */
42356 + o_tmp->nested->next = role->hash->first;
42357 + role->hash->first = o_tmp->nested;
42358 + }
42359 + }
42360 +
42361 + return 0;
42362 +}
42363 +
42364 +static __u32
42365 +count_user_subjs(struct acl_subject_label *userp)
42366 +{
42367 + struct acl_subject_label s_tmp;
42368 + __u32 num = 0;
42369 +
42370 + while (userp) {
42371 + if (copy_from_user(&s_tmp, userp,
42372 + sizeof (struct acl_subject_label)))
42373 + break;
42374 +
42375 + userp = s_tmp.prev;
42376 + /* do not count nested subjects against this count, since
42377 + they are not included in the hash table, but are
42378 + attached to objects. We have already counted
42379 + the subjects in userspace for the allocation
42380 + stack
42381 + */
42382 + if (!(s_tmp.mode & GR_NESTED))
42383 + num++;
42384 + }
42385 +
42386 + return num;
42387 +}
42388 +
42389 +static int
42390 +copy_user_allowedips(struct acl_role_label *rolep)
42391 +{
42392 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42393 +
42394 + ruserip = rolep->allowed_ips;
42395 +
42396 + while (ruserip) {
42397 + rlast = rtmp;
42398 +
42399 + if ((rtmp = (struct role_allowed_ip *)
42400 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42401 + return -ENOMEM;
42402 +
42403 + if (copy_from_user(rtmp, ruserip,
42404 + sizeof (struct role_allowed_ip)))
42405 + return -EFAULT;
42406 +
42407 + ruserip = rtmp->prev;
42408 +
42409 + if (!rlast) {
42410 + rtmp->prev = NULL;
42411 + rolep->allowed_ips = rtmp;
42412 + } else {
42413 + rlast->next = rtmp;
42414 + rtmp->prev = rlast;
42415 + }
42416 +
42417 + if (!ruserip)
42418 + rtmp->next = NULL;
42419 + }
42420 +
42421 + return 0;
42422 +}
42423 +
42424 +static int
42425 +copy_user_transitions(struct acl_role_label *rolep)
42426 +{
42427 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
42428 +
42429 + unsigned int len;
42430 + char *tmp;
42431 +
42432 + rusertp = rolep->transitions;
42433 +
42434 + while (rusertp) {
42435 + rlast = rtmp;
42436 +
42437 + if ((rtmp = (struct role_transition *)
42438 + acl_alloc(sizeof (struct role_transition))) == NULL)
42439 + return -ENOMEM;
42440 +
42441 + if (copy_from_user(rtmp, rusertp,
42442 + sizeof (struct role_transition)))
42443 + return -EFAULT;
42444 +
42445 + rusertp = rtmp->prev;
42446 +
42447 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42448 +
42449 + if (!len || len >= GR_SPROLE_LEN)
42450 + return -EINVAL;
42451 +
42452 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42453 + return -ENOMEM;
42454 +
42455 + if (copy_from_user(tmp, rtmp->rolename, len))
42456 + return -EFAULT;
42457 + tmp[len-1] = '\0';
42458 + rtmp->rolename = tmp;
42459 +
42460 + if (!rlast) {
42461 + rtmp->prev = NULL;
42462 + rolep->transitions = rtmp;
42463 + } else {
42464 + rlast->next = rtmp;
42465 + rtmp->prev = rlast;
42466 + }
42467 +
42468 + if (!rusertp)
42469 + rtmp->next = NULL;
42470 + }
42471 +
42472 + return 0;
42473 +}
42474 +
42475 +static struct acl_subject_label *
42476 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42477 +{
42478 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42479 + unsigned int len;
42480 + char *tmp;
42481 + __u32 num_objs;
42482 + struct acl_ip_label **i_tmp, *i_utmp2;
42483 + struct gr_hash_struct ghash;
42484 + struct subject_map *subjmap;
42485 + unsigned int i_num;
42486 + int err;
42487 +
42488 + s_tmp = lookup_subject_map(userp);
42489 +
42490 + /* we've already copied this subject into the kernel, just return
42491 + the reference to it, and don't copy it over again
42492 + */
42493 + if (s_tmp)
42494 + return(s_tmp);
42495 +
42496 + if ((s_tmp = (struct acl_subject_label *)
42497 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42498 + return ERR_PTR(-ENOMEM);
42499 +
42500 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42501 + if (subjmap == NULL)
42502 + return ERR_PTR(-ENOMEM);
42503 +
42504 + subjmap->user = userp;
42505 + subjmap->kernel = s_tmp;
42506 + insert_subj_map_entry(subjmap);
42507 +
42508 + if (copy_from_user(s_tmp, userp,
42509 + sizeof (struct acl_subject_label)))
42510 + return ERR_PTR(-EFAULT);
42511 +
42512 + len = strnlen_user(s_tmp->filename, PATH_MAX);
42513 +
42514 + if (!len || len >= PATH_MAX)
42515 + return ERR_PTR(-EINVAL);
42516 +
42517 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42518 + return ERR_PTR(-ENOMEM);
42519 +
42520 + if (copy_from_user(tmp, s_tmp->filename, len))
42521 + return ERR_PTR(-EFAULT);
42522 + tmp[len-1] = '\0';
42523 + s_tmp->filename = tmp;
42524 +
42525 + if (!strcmp(s_tmp->filename, "/"))
42526 + role->root_label = s_tmp;
42527 +
42528 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42529 + return ERR_PTR(-EFAULT);
42530 +
42531 + /* copy user and group transition tables */
42532 +
42533 + if (s_tmp->user_trans_num) {
42534 + uid_t *uidlist;
42535 +
42536 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42537 + if (uidlist == NULL)
42538 + return ERR_PTR(-ENOMEM);
42539 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42540 + return ERR_PTR(-EFAULT);
42541 +
42542 + s_tmp->user_transitions = uidlist;
42543 + }
42544 +
42545 + if (s_tmp->group_trans_num) {
42546 + gid_t *gidlist;
42547 +
42548 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42549 + if (gidlist == NULL)
42550 + return ERR_PTR(-ENOMEM);
42551 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42552 + return ERR_PTR(-EFAULT);
42553 +
42554 + s_tmp->group_transitions = gidlist;
42555 + }
42556 +
42557 + /* set up object hash table */
42558 + num_objs = count_user_objs(ghash.first);
42559 +
42560 + s_tmp->obj_hash_size = num_objs;
42561 + s_tmp->obj_hash =
42562 + (struct acl_object_label **)
42563 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42564 +
42565 + if (!s_tmp->obj_hash)
42566 + return ERR_PTR(-ENOMEM);
42567 +
42568 + memset(s_tmp->obj_hash, 0,
42569 + s_tmp->obj_hash_size *
42570 + sizeof (struct acl_object_label *));
42571 +
42572 + /* add in objects */
42573 + err = copy_user_objs(ghash.first, s_tmp, role);
42574 +
42575 + if (err)
42576 + return ERR_PTR(err);
42577 +
42578 + /* set pointer for parent subject */
42579 + if (s_tmp->parent_subject) {
42580 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42581 +
42582 + if (IS_ERR(s_tmp2))
42583 + return s_tmp2;
42584 +
42585 + s_tmp->parent_subject = s_tmp2;
42586 + }
42587 +
42588 + /* add in ip acls */
42589 +
42590 + if (!s_tmp->ip_num) {
42591 + s_tmp->ips = NULL;
42592 + goto insert;
42593 + }
42594 +
42595 + i_tmp =
42596 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42597 + sizeof (struct acl_ip_label *));
42598 +
42599 + if (!i_tmp)
42600 + return ERR_PTR(-ENOMEM);
42601 +
42602 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42603 + *(i_tmp + i_num) =
42604 + (struct acl_ip_label *)
42605 + acl_alloc(sizeof (struct acl_ip_label));
42606 + if (!*(i_tmp + i_num))
42607 + return ERR_PTR(-ENOMEM);
42608 +
42609 + if (copy_from_user
42610 + (&i_utmp2, s_tmp->ips + i_num,
42611 + sizeof (struct acl_ip_label *)))
42612 + return ERR_PTR(-EFAULT);
42613 +
42614 + if (copy_from_user
42615 + (*(i_tmp + i_num), i_utmp2,
42616 + sizeof (struct acl_ip_label)))
42617 + return ERR_PTR(-EFAULT);
42618 +
42619 + if ((*(i_tmp + i_num))->iface == NULL)
42620 + continue;
42621 +
42622 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42623 + if (!len || len >= IFNAMSIZ)
42624 + return ERR_PTR(-EINVAL);
42625 + tmp = acl_alloc(len);
42626 + if (tmp == NULL)
42627 + return ERR_PTR(-ENOMEM);
42628 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42629 + return ERR_PTR(-EFAULT);
42630 + (*(i_tmp + i_num))->iface = tmp;
42631 + }
42632 +
42633 + s_tmp->ips = i_tmp;
42634 +
42635 +insert:
42636 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42637 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42638 + return ERR_PTR(-ENOMEM);
42639 +
42640 + return s_tmp;
42641 +}
42642 +
42643 +static int
42644 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42645 +{
42646 + struct acl_subject_label s_pre;
42647 + struct acl_subject_label * ret;
42648 + int err;
42649 +
42650 + while (userp) {
42651 + if (copy_from_user(&s_pre, userp,
42652 + sizeof (struct acl_subject_label)))
42653 + return -EFAULT;
42654 +
42655 + /* do not add nested subjects here, add
42656 + while parsing objects
42657 + */
42658 +
42659 + if (s_pre.mode & GR_NESTED) {
42660 + userp = s_pre.prev;
42661 + continue;
42662 + }
42663 +
42664 + ret = do_copy_user_subj(userp, role);
42665 +
42666 + err = PTR_ERR(ret);
42667 + if (IS_ERR(ret))
42668 + return err;
42669 +
42670 + insert_acl_subj_label(ret, role);
42671 +
42672 + userp = s_pre.prev;
42673 + }
42674 +
42675 + return 0;
42676 +}
42677 +
42678 +static int
42679 +copy_user_acl(struct gr_arg *arg)
42680 +{
42681 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42682 + struct sprole_pw *sptmp;
42683 + struct gr_hash_struct *ghash;
42684 + uid_t *domainlist;
42685 + unsigned int r_num;
42686 + unsigned int len;
42687 + char *tmp;
42688 + int err = 0;
42689 + __u16 i;
42690 + __u32 num_subjs;
42691 +
42692 + /* we need a default and kernel role */
42693 + if (arg->role_db.num_roles < 2)
42694 + return -EINVAL;
42695 +
42696 + /* copy special role authentication info from userspace */
42697 +
42698 + num_sprole_pws = arg->num_sprole_pws;
42699 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42700 +
42701 + if (!acl_special_roles) {
42702 + err = -ENOMEM;
42703 + goto cleanup;
42704 + }
42705 +
42706 + for (i = 0; i < num_sprole_pws; i++) {
42707 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42708 + if (!sptmp) {
42709 + err = -ENOMEM;
42710 + goto cleanup;
42711 + }
42712 + if (copy_from_user(sptmp, arg->sprole_pws + i,
42713 + sizeof (struct sprole_pw))) {
42714 + err = -EFAULT;
42715 + goto cleanup;
42716 + }
42717 +
42718 + len =
42719 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42720 +
42721 + if (!len || len >= GR_SPROLE_LEN) {
42722 + err = -EINVAL;
42723 + goto cleanup;
42724 + }
42725 +
42726 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42727 + err = -ENOMEM;
42728 + goto cleanup;
42729 + }
42730 +
42731 + if (copy_from_user(tmp, sptmp->rolename, len)) {
42732 + err = -EFAULT;
42733 + goto cleanup;
42734 + }
42735 + tmp[len-1] = '\0';
42736 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42737 + printk(KERN_ALERT "Copying special role %s\n", tmp);
42738 +#endif
42739 + sptmp->rolename = tmp;
42740 + acl_special_roles[i] = sptmp;
42741 + }
42742 +
42743 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42744 +
42745 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42746 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
42747 +
42748 + if (!r_tmp) {
42749 + err = -ENOMEM;
42750 + goto cleanup;
42751 + }
42752 +
42753 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
42754 + sizeof (struct acl_role_label *))) {
42755 + err = -EFAULT;
42756 + goto cleanup;
42757 + }
42758 +
42759 + if (copy_from_user(r_tmp, r_utmp2,
42760 + sizeof (struct acl_role_label))) {
42761 + err = -EFAULT;
42762 + goto cleanup;
42763 + }
42764 +
42765 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
42766 +
42767 + if (!len || len >= PATH_MAX) {
42768 + err = -EINVAL;
42769 + goto cleanup;
42770 + }
42771 +
42772 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42773 + err = -ENOMEM;
42774 + goto cleanup;
42775 + }
42776 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
42777 + err = -EFAULT;
42778 + goto cleanup;
42779 + }
42780 + tmp[len-1] = '\0';
42781 + r_tmp->rolename = tmp;
42782 +
42783 + if (!strcmp(r_tmp->rolename, "default")
42784 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
42785 + default_role = r_tmp;
42786 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
42787 + kernel_role = r_tmp;
42788 + }
42789 +
42790 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
42791 + err = -ENOMEM;
42792 + goto cleanup;
42793 + }
42794 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
42795 + err = -EFAULT;
42796 + goto cleanup;
42797 + }
42798 +
42799 + r_tmp->hash = ghash;
42800 +
42801 + num_subjs = count_user_subjs(r_tmp->hash->first);
42802 +
42803 + r_tmp->subj_hash_size = num_subjs;
42804 + r_tmp->subj_hash =
42805 + (struct acl_subject_label **)
42806 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
42807 +
42808 + if (!r_tmp->subj_hash) {
42809 + err = -ENOMEM;
42810 + goto cleanup;
42811 + }
42812 +
42813 + err = copy_user_allowedips(r_tmp);
42814 + if (err)
42815 + goto cleanup;
42816 +
42817 + /* copy domain info */
42818 + if (r_tmp->domain_children != NULL) {
42819 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
42820 + if (domainlist == NULL) {
42821 + err = -ENOMEM;
42822 + goto cleanup;
42823 + }
42824 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
42825 + err = -EFAULT;
42826 + goto cleanup;
42827 + }
42828 + r_tmp->domain_children = domainlist;
42829 + }
42830 +
42831 + err = copy_user_transitions(r_tmp);
42832 + if (err)
42833 + goto cleanup;
42834 +
42835 + memset(r_tmp->subj_hash, 0,
42836 + r_tmp->subj_hash_size *
42837 + sizeof (struct acl_subject_label *));
42838 +
42839 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
42840 +
42841 + if (err)
42842 + goto cleanup;
42843 +
42844 + /* set nested subject list to null */
42845 + r_tmp->hash->first = NULL;
42846 +
42847 + insert_acl_role_label(r_tmp);
42848 + }
42849 +
42850 + goto return_err;
42851 + cleanup:
42852 + free_variables();
42853 + return_err:
42854 + return err;
42855 +
42856 +}
42857 +
42858 +static int
42859 +gracl_init(struct gr_arg *args)
42860 +{
42861 + int error = 0;
42862 +
42863 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
42864 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
42865 +
42866 + if (init_variables(args)) {
42867 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
42868 + error = -ENOMEM;
42869 + free_variables();
42870 + goto out;
42871 + }
42872 +
42873 + error = copy_user_acl(args);
42874 + free_init_variables();
42875 + if (error) {
42876 + free_variables();
42877 + goto out;
42878 + }
42879 +
42880 + if ((error = gr_set_acls(0))) {
42881 + free_variables();
42882 + goto out;
42883 + }
42884 +
42885 + pax_open_kernel();
42886 + gr_status |= GR_READY;
42887 + pax_close_kernel();
42888 +
42889 + out:
42890 + return error;
42891 +}
42892 +
42893 +/* derived from glibc fnmatch() 0: match, 1: no match*/
42894 +
42895 +static int
42896 +glob_match(const char *p, const char *n)
42897 +{
42898 + char c;
42899 +
42900 + while ((c = *p++) != '\0') {
42901 + switch (c) {
42902 + case '?':
42903 + if (*n == '\0')
42904 + return 1;
42905 + else if (*n == '/')
42906 + return 1;
42907 + break;
42908 + case '\\':
42909 + if (*n != c)
42910 + return 1;
42911 + break;
42912 + case '*':
42913 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
42914 + if (*n == '/')
42915 + return 1;
42916 + else if (c == '?') {
42917 + if (*n == '\0')
42918 + return 1;
42919 + else
42920 + ++n;
42921 + }
42922 + }
42923 + if (c == '\0') {
42924 + return 0;
42925 + } else {
42926 + const char *endp;
42927 +
42928 + if ((endp = strchr(n, '/')) == NULL)
42929 + endp = n + strlen(n);
42930 +
42931 + if (c == '[') {
42932 + for (--p; n < endp; ++n)
42933 + if (!glob_match(p, n))
42934 + return 0;
42935 + } else if (c == '/') {
42936 + while (*n != '\0' && *n != '/')
42937 + ++n;
42938 + if (*n == '/' && !glob_match(p, n + 1))
42939 + return 0;
42940 + } else {
42941 + for (--p; n < endp; ++n)
42942 + if (*n == c && !glob_match(p, n))
42943 + return 0;
42944 + }
42945 +
42946 + return 1;
42947 + }
42948 + case '[':
42949 + {
42950 + int not;
42951 + char cold;
42952 +
42953 + if (*n == '\0' || *n == '/')
42954 + return 1;
42955 +
42956 + not = (*p == '!' || *p == '^');
42957 + if (not)
42958 + ++p;
42959 +
42960 + c = *p++;
42961 + for (;;) {
42962 + unsigned char fn = (unsigned char)*n;
42963 +
42964 + if (c == '\0')
42965 + return 1;
42966 + else {
42967 + if (c == fn)
42968 + goto matched;
42969 + cold = c;
42970 + c = *p++;
42971 +
42972 + if (c == '-' && *p != ']') {
42973 + unsigned char cend = *p++;
42974 +
42975 + if (cend == '\0')
42976 + return 1;
42977 +
42978 + if (cold <= fn && fn <= cend)
42979 + goto matched;
42980 +
42981 + c = *p++;
42982 + }
42983 + }
42984 +
42985 + if (c == ']')
42986 + break;
42987 + }
42988 + if (!not)
42989 + return 1;
42990 + break;
42991 + matched:
42992 + while (c != ']') {
42993 + if (c == '\0')
42994 + return 1;
42995 +
42996 + c = *p++;
42997 + }
42998 + if (not)
42999 + return 1;
43000 + }
43001 + break;
43002 + default:
43003 + if (c != *n)
43004 + return 1;
43005 + }
43006 +
43007 + ++n;
43008 + }
43009 +
43010 + if (*n == '\0')
43011 + return 0;
43012 +
43013 + if (*n == '/')
43014 + return 0;
43015 +
43016 + return 1;
43017 +}
43018 +
43019 +static struct acl_object_label *
43020 +chk_glob_label(struct acl_object_label *globbed,
43021 + struct dentry *dentry, struct vfsmount *mnt, char **path)
43022 +{
43023 + struct acl_object_label *tmp;
43024 +
43025 + if (*path == NULL)
43026 + *path = gr_to_filename_nolock(dentry, mnt);
43027 +
43028 + tmp = globbed;
43029 +
43030 + while (tmp) {
43031 + if (!glob_match(tmp->filename, *path))
43032 + return tmp;
43033 + tmp = tmp->next;
43034 + }
43035 +
43036 + return NULL;
43037 +}
43038 +
43039 +static struct acl_object_label *
43040 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43041 + const ino_t curr_ino, const dev_t curr_dev,
43042 + const struct acl_subject_label *subj, char **path, const int checkglob)
43043 +{
43044 + struct acl_subject_label *tmpsubj;
43045 + struct acl_object_label *retval;
43046 + struct acl_object_label *retval2;
43047 +
43048 + tmpsubj = (struct acl_subject_label *) subj;
43049 + read_lock(&gr_inode_lock);
43050 + do {
43051 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43052 + if (retval) {
43053 + if (checkglob && retval->globbed) {
43054 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43055 + (struct vfsmount *)orig_mnt, path);
43056 + if (retval2)
43057 + retval = retval2;
43058 + }
43059 + break;
43060 + }
43061 + } while ((tmpsubj = tmpsubj->parent_subject));
43062 + read_unlock(&gr_inode_lock);
43063 +
43064 + return retval;
43065 +}
43066 +
43067 +static __inline__ struct acl_object_label *
43068 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43069 + struct dentry *curr_dentry,
43070 + const struct acl_subject_label *subj, char **path, const int checkglob)
43071 +{
43072 + int newglob = checkglob;
43073 + ino_t inode;
43074 + dev_t device;
43075 +
43076 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43077 + as we don't want a / * rule to match instead of the / object
43078 + don't do this for create lookups that call this function though, since they're looking up
43079 + on the parent and thus need globbing checks on all paths
43080 + */
43081 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43082 + newglob = GR_NO_GLOB;
43083 +
43084 + spin_lock(&curr_dentry->d_lock);
43085 + inode = curr_dentry->d_inode->i_ino;
43086 + device = __get_dev(curr_dentry);
43087 + spin_unlock(&curr_dentry->d_lock);
43088 +
43089 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43090 +}
43091 +
43092 +static struct acl_object_label *
43093 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43094 + const struct acl_subject_label *subj, char *path, const int checkglob)
43095 +{
43096 + struct dentry *dentry = (struct dentry *) l_dentry;
43097 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43098 + struct acl_object_label *retval;
43099 + struct dentry *parent;
43100 +
43101 + write_seqlock(&rename_lock);
43102 + br_read_lock(vfsmount_lock);
43103 +
43104 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43105 +#ifdef CONFIG_NET
43106 + mnt == sock_mnt ||
43107 +#endif
43108 +#ifdef CONFIG_HUGETLBFS
43109 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43110 +#endif
43111 + /* ignore Eric Biederman */
43112 + IS_PRIVATE(l_dentry->d_inode))) {
43113 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43114 + goto out;
43115 + }
43116 +
43117 + for (;;) {
43118 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43119 + break;
43120 +
43121 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43122 + if (mnt->mnt_parent == mnt)
43123 + break;
43124 +
43125 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43126 + if (retval != NULL)
43127 + goto out;
43128 +
43129 + dentry = mnt->mnt_mountpoint;
43130 + mnt = mnt->mnt_parent;
43131 + continue;
43132 + }
43133 +
43134 + parent = dentry->d_parent;
43135 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43136 + if (retval != NULL)
43137 + goto out;
43138 +
43139 + dentry = parent;
43140 + }
43141 +
43142 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43143 +
43144 + /* real_root is pinned so we don't have to hold a reference */
43145 + if (retval == NULL)
43146 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43147 +out:
43148 + br_read_unlock(vfsmount_lock);
43149 + write_sequnlock(&rename_lock);
43150 +
43151 + BUG_ON(retval == NULL);
43152 +
43153 + return retval;
43154 +}
43155 +
43156 +static __inline__ struct acl_object_label *
43157 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43158 + const struct acl_subject_label *subj)
43159 +{
43160 + char *path = NULL;
43161 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43162 +}
43163 +
43164 +static __inline__ struct acl_object_label *
43165 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43166 + const struct acl_subject_label *subj)
43167 +{
43168 + char *path = NULL;
43169 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43170 +}
43171 +
43172 +static __inline__ struct acl_object_label *
43173 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43174 + const struct acl_subject_label *subj, char *path)
43175 +{
43176 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43177 +}
43178 +
43179 +static struct acl_subject_label *
43180 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43181 + const struct acl_role_label *role)
43182 +{
43183 + struct dentry *dentry = (struct dentry *) l_dentry;
43184 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43185 + struct acl_subject_label *retval;
43186 + struct dentry *parent;
43187 +
43188 + write_seqlock(&rename_lock);
43189 + br_read_lock(vfsmount_lock);
43190 +
43191 + for (;;) {
43192 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43193 + break;
43194 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43195 + if (mnt->mnt_parent == mnt)
43196 + break;
43197 +
43198 + spin_lock(&dentry->d_lock);
43199 + read_lock(&gr_inode_lock);
43200 + retval =
43201 + lookup_acl_subj_label(dentry->d_inode->i_ino,
43202 + __get_dev(dentry), role);
43203 + read_unlock(&gr_inode_lock);
43204 + spin_unlock(&dentry->d_lock);
43205 + if (retval != NULL)
43206 + goto out;
43207 +
43208 + dentry = mnt->mnt_mountpoint;
43209 + mnt = mnt->mnt_parent;
43210 + continue;
43211 + }
43212 +
43213 + spin_lock(&dentry->d_lock);
43214 + read_lock(&gr_inode_lock);
43215 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43216 + __get_dev(dentry), role);
43217 + read_unlock(&gr_inode_lock);
43218 + parent = dentry->d_parent;
43219 + spin_unlock(&dentry->d_lock);
43220 +
43221 + if (retval != NULL)
43222 + goto out;
43223 +
43224 + dentry = parent;
43225 + }
43226 +
43227 + spin_lock(&dentry->d_lock);
43228 + read_lock(&gr_inode_lock);
43229 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43230 + __get_dev(dentry), role);
43231 + read_unlock(&gr_inode_lock);
43232 + spin_unlock(&dentry->d_lock);
43233 +
43234 + if (unlikely(retval == NULL)) {
43235 + /* real_root is pinned, we don't need to hold a reference */
43236 + read_lock(&gr_inode_lock);
43237 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43238 + __get_dev(real_root.dentry), role);
43239 + read_unlock(&gr_inode_lock);
43240 + }
43241 +out:
43242 + br_read_unlock(vfsmount_lock);
43243 + write_sequnlock(&rename_lock);
43244 +
43245 + BUG_ON(retval == NULL);
43246 +
43247 + return retval;
43248 +}
43249 +
43250 +static void
43251 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43252 +{
43253 + struct task_struct *task = current;
43254 + const struct cred *cred = current_cred();
43255 +
43256 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43257 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43258 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43259 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43260 +
43261 + return;
43262 +}
43263 +
43264 +static void
43265 +gr_log_learn_sysctl(const char *path, const __u32 mode)
43266 +{
43267 + struct task_struct *task = current;
43268 + const struct cred *cred = current_cred();
43269 +
43270 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43271 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43272 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43273 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43274 +
43275 + return;
43276 +}
43277 +
43278 +static void
43279 +gr_log_learn_id_change(const char type, const unsigned int real,
43280 + const unsigned int effective, const unsigned int fs)
43281 +{
43282 + struct task_struct *task = current;
43283 + const struct cred *cred = current_cred();
43284 +
43285 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43286 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43287 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43288 + type, real, effective, fs, &task->signal->saved_ip);
43289 +
43290 + return;
43291 +}
43292 +
43293 +__u32
43294 +gr_check_link(const struct dentry * new_dentry,
43295 + const struct dentry * parent_dentry,
43296 + const struct vfsmount * parent_mnt,
43297 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43298 +{
43299 + struct acl_object_label *obj;
43300 + __u32 oldmode, newmode;
43301 + __u32 needmode;
43302 +
43303 + if (unlikely(!(gr_status & GR_READY)))
43304 + return (GR_CREATE | GR_LINK);
43305 +
43306 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43307 + oldmode = obj->mode;
43308 +
43309 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43310 + oldmode |= (GR_CREATE | GR_LINK);
43311 +
43312 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43313 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43314 + needmode |= GR_SETID | GR_AUDIT_SETID;
43315 +
43316 + newmode =
43317 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
43318 + oldmode | needmode);
43319 +
43320 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43321 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43322 + GR_INHERIT | GR_AUDIT_INHERIT);
43323 +
43324 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43325 + goto bad;
43326 +
43327 + if ((oldmode & needmode) != needmode)
43328 + goto bad;
43329 +
43330 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43331 + if ((newmode & needmode) != needmode)
43332 + goto bad;
43333 +
43334 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43335 + return newmode;
43336 +bad:
43337 + needmode = oldmode;
43338 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43339 + needmode |= GR_SETID;
43340 +
43341 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43342 + gr_log_learn(old_dentry, old_mnt, needmode);
43343 + return (GR_CREATE | GR_LINK);
43344 + } else if (newmode & GR_SUPPRESS)
43345 + return GR_SUPPRESS;
43346 + else
43347 + return 0;
43348 +}
43349 +
43350 +__u32
43351 +gr_search_file(const struct dentry * dentry, const __u32 mode,
43352 + const struct vfsmount * mnt)
43353 +{
43354 + __u32 retval = mode;
43355 + struct acl_subject_label *curracl;
43356 + struct acl_object_label *currobj;
43357 +
43358 + if (unlikely(!(gr_status & GR_READY)))
43359 + return (mode & ~GR_AUDITS);
43360 +
43361 + curracl = current->acl;
43362 +
43363 + currobj = chk_obj_label(dentry, mnt, curracl);
43364 + retval = currobj->mode & mode;
43365 +
43366 + /* if we're opening a specified transfer file for writing
43367 + (e.g. /dev/initctl), then transfer our role to init
43368 + */
43369 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43370 + current->role->roletype & GR_ROLE_PERSIST)) {
43371 + struct task_struct *task = init_pid_ns.child_reaper;
43372 +
43373 + if (task->role != current->role) {
43374 + task->acl_sp_role = 0;
43375 + task->acl_role_id = current->acl_role_id;
43376 + task->role = current->role;
43377 + rcu_read_lock();
43378 + read_lock(&grsec_exec_file_lock);
43379 + gr_apply_subject_to_task(task);
43380 + read_unlock(&grsec_exec_file_lock);
43381 + rcu_read_unlock();
43382 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43383 + }
43384 + }
43385 +
43386 + if (unlikely
43387 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43388 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43389 + __u32 new_mode = mode;
43390 +
43391 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43392 +
43393 + retval = new_mode;
43394 +
43395 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43396 + new_mode |= GR_INHERIT;
43397 +
43398 + if (!(mode & GR_NOLEARN))
43399 + gr_log_learn(dentry, mnt, new_mode);
43400 + }
43401 +
43402 + return retval;
43403 +}
43404 +
43405 +__u32
43406 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43407 + const struct vfsmount * mnt, const __u32 mode)
43408 +{
43409 + struct name_entry *match;
43410 + struct acl_object_label *matchpo;
43411 + struct acl_subject_label *curracl;
43412 + char *path;
43413 + __u32 retval;
43414 +
43415 + if (unlikely(!(gr_status & GR_READY)))
43416 + return (mode & ~GR_AUDITS);
43417 +
43418 + preempt_disable();
43419 + path = gr_to_filename_rbac(new_dentry, mnt);
43420 + match = lookup_name_entry_create(path);
43421 +
43422 + if (!match)
43423 + goto check_parent;
43424 +
43425 + curracl = current->acl;
43426 +
43427 + read_lock(&gr_inode_lock);
43428 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43429 + read_unlock(&gr_inode_lock);
43430 +
43431 + if (matchpo) {
43432 + if ((matchpo->mode & mode) !=
43433 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
43434 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43435 + __u32 new_mode = mode;
43436 +
43437 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43438 +
43439 + gr_log_learn(new_dentry, mnt, new_mode);
43440 +
43441 + preempt_enable();
43442 + return new_mode;
43443 + }
43444 + preempt_enable();
43445 + return (matchpo->mode & mode);
43446 + }
43447 +
43448 + check_parent:
43449 + curracl = current->acl;
43450 +
43451 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43452 + retval = matchpo->mode & mode;
43453 +
43454 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43455 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43456 + __u32 new_mode = mode;
43457 +
43458 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43459 +
43460 + gr_log_learn(new_dentry, mnt, new_mode);
43461 + preempt_enable();
43462 + return new_mode;
43463 + }
43464 +
43465 + preempt_enable();
43466 + return retval;
43467 +}
43468 +
43469 +int
43470 +gr_check_hidden_task(const struct task_struct *task)
43471 +{
43472 + if (unlikely(!(gr_status & GR_READY)))
43473 + return 0;
43474 +
43475 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43476 + return 1;
43477 +
43478 + return 0;
43479 +}
43480 +
43481 +int
43482 +gr_check_protected_task(const struct task_struct *task)
43483 +{
43484 + if (unlikely(!(gr_status & GR_READY) || !task))
43485 + return 0;
43486 +
43487 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43488 + task->acl != current->acl)
43489 + return 1;
43490 +
43491 + return 0;
43492 +}
43493 +
43494 +int
43495 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43496 +{
43497 + struct task_struct *p;
43498 + int ret = 0;
43499 +
43500 + if (unlikely(!(gr_status & GR_READY) || !pid))
43501 + return ret;
43502 +
43503 + read_lock(&tasklist_lock);
43504 + do_each_pid_task(pid, type, p) {
43505 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43506 + p->acl != current->acl) {
43507 + ret = 1;
43508 + goto out;
43509 + }
43510 + } while_each_pid_task(pid, type, p);
43511 +out:
43512 + read_unlock(&tasklist_lock);
43513 +
43514 + return ret;
43515 +}
43516 +
43517 +void
43518 +gr_copy_label(struct task_struct *tsk)
43519 +{
43520 + tsk->signal->used_accept = 0;
43521 + tsk->acl_sp_role = 0;
43522 + tsk->acl_role_id = current->acl_role_id;
43523 + tsk->acl = current->acl;
43524 + tsk->role = current->role;
43525 + tsk->signal->curr_ip = current->signal->curr_ip;
43526 + tsk->signal->saved_ip = current->signal->saved_ip;
43527 + if (current->exec_file)
43528 + get_file(current->exec_file);
43529 + tsk->exec_file = current->exec_file;
43530 + tsk->is_writable = current->is_writable;
43531 + if (unlikely(current->signal->used_accept)) {
43532 + current->signal->curr_ip = 0;
43533 + current->signal->saved_ip = 0;
43534 + }
43535 +
43536 + return;
43537 +}
43538 +
43539 +static void
43540 +gr_set_proc_res(struct task_struct *task)
43541 +{
43542 + struct acl_subject_label *proc;
43543 + unsigned short i;
43544 +
43545 + proc = task->acl;
43546 +
43547 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43548 + return;
43549 +
43550 + for (i = 0; i < RLIM_NLIMITS; i++) {
43551 + if (!(proc->resmask & (1 << i)))
43552 + continue;
43553 +
43554 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43555 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43556 + }
43557 +
43558 + return;
43559 +}
43560 +
43561 +extern int __gr_process_user_ban(struct user_struct *user);
43562 +
43563 +int
43564 +gr_check_user_change(int real, int effective, int fs)
43565 +{
43566 + unsigned int i;
43567 + __u16 num;
43568 + uid_t *uidlist;
43569 + int curuid;
43570 + int realok = 0;
43571 + int effectiveok = 0;
43572 + int fsok = 0;
43573 +
43574 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43575 + struct user_struct *user;
43576 +
43577 + if (real == -1)
43578 + goto skipit;
43579 +
43580 + user = find_user(real);
43581 + if (user == NULL)
43582 + goto skipit;
43583 +
43584 + if (__gr_process_user_ban(user)) {
43585 + /* for find_user */
43586 + free_uid(user);
43587 + return 1;
43588 + }
43589 +
43590 + /* for find_user */
43591 + free_uid(user);
43592 +
43593 +skipit:
43594 +#endif
43595 +
43596 + if (unlikely(!(gr_status & GR_READY)))
43597 + return 0;
43598 +
43599 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43600 + gr_log_learn_id_change('u', real, effective, fs);
43601 +
43602 + num = current->acl->user_trans_num;
43603 + uidlist = current->acl->user_transitions;
43604 +
43605 + if (uidlist == NULL)
43606 + return 0;
43607 +
43608 + if (real == -1)
43609 + realok = 1;
43610 + if (effective == -1)
43611 + effectiveok = 1;
43612 + if (fs == -1)
43613 + fsok = 1;
43614 +
43615 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
43616 + for (i = 0; i < num; i++) {
43617 + curuid = (int)uidlist[i];
43618 + if (real == curuid)
43619 + realok = 1;
43620 + if (effective == curuid)
43621 + effectiveok = 1;
43622 + if (fs == curuid)
43623 + fsok = 1;
43624 + }
43625 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
43626 + for (i = 0; i < num; i++) {
43627 + curuid = (int)uidlist[i];
43628 + if (real == curuid)
43629 + break;
43630 + if (effective == curuid)
43631 + break;
43632 + if (fs == curuid)
43633 + break;
43634 + }
43635 + /* not in deny list */
43636 + if (i == num) {
43637 + realok = 1;
43638 + effectiveok = 1;
43639 + fsok = 1;
43640 + }
43641 + }
43642 +
43643 + if (realok && effectiveok && fsok)
43644 + return 0;
43645 + else {
43646 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43647 + return 1;
43648 + }
43649 +}
43650 +
43651 +int
43652 +gr_check_group_change(int real, int effective, int fs)
43653 +{
43654 + unsigned int i;
43655 + __u16 num;
43656 + gid_t *gidlist;
43657 + int curgid;
43658 + int realok = 0;
43659 + int effectiveok = 0;
43660 + int fsok = 0;
43661 +
43662 + if (unlikely(!(gr_status & GR_READY)))
43663 + return 0;
43664 +
43665 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43666 + gr_log_learn_id_change('g', real, effective, fs);
43667 +
43668 + num = current->acl->group_trans_num;
43669 + gidlist = current->acl->group_transitions;
43670 +
43671 + if (gidlist == NULL)
43672 + return 0;
43673 +
43674 + if (real == -1)
43675 + realok = 1;
43676 + if (effective == -1)
43677 + effectiveok = 1;
43678 + if (fs == -1)
43679 + fsok = 1;
43680 +
43681 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
43682 + for (i = 0; i < num; i++) {
43683 + curgid = (int)gidlist[i];
43684 + if (real == curgid)
43685 + realok = 1;
43686 + if (effective == curgid)
43687 + effectiveok = 1;
43688 + if (fs == curgid)
43689 + fsok = 1;
43690 + }
43691 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
43692 + for (i = 0; i < num; i++) {
43693 + curgid = (int)gidlist[i];
43694 + if (real == curgid)
43695 + break;
43696 + if (effective == curgid)
43697 + break;
43698 + if (fs == curgid)
43699 + break;
43700 + }
43701 + /* not in deny list */
43702 + if (i == num) {
43703 + realok = 1;
43704 + effectiveok = 1;
43705 + fsok = 1;
43706 + }
43707 + }
43708 +
43709 + if (realok && effectiveok && fsok)
43710 + return 0;
43711 + else {
43712 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43713 + return 1;
43714 + }
43715 +}
43716 +
43717 +void
43718 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43719 +{
43720 + struct acl_role_label *role = task->role;
43721 + struct acl_subject_label *subj = NULL;
43722 + struct acl_object_label *obj;
43723 + struct file *filp;
43724 +
43725 + if (unlikely(!(gr_status & GR_READY)))
43726 + return;
43727 +
43728 + filp = task->exec_file;
43729 +
43730 + /* kernel process, we'll give them the kernel role */
43731 + if (unlikely(!filp)) {
43732 + task->role = kernel_role;
43733 + task->acl = kernel_role->root_label;
43734 + return;
43735 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43736 + role = lookup_acl_role_label(task, uid, gid);
43737 +
43738 + /* perform subject lookup in possibly new role
43739 + we can use this result below in the case where role == task->role
43740 + */
43741 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43742 +
43743 + /* if we changed uid/gid, but result in the same role
43744 + and are using inheritance, don't lose the inherited subject
43745 + if current subject is other than what normal lookup
43746 + would result in, we arrived via inheritance, don't
43747 + lose subject
43748 + */
43749 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43750 + (subj == task->acl)))
43751 + task->acl = subj;
43752 +
43753 + task->role = role;
43754 +
43755 + task->is_writable = 0;
43756 +
43757 + /* ignore additional mmap checks for processes that are writable
43758 + by the default ACL */
43759 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43760 + if (unlikely(obj->mode & GR_WRITE))
43761 + task->is_writable = 1;
43762 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43763 + if (unlikely(obj->mode & GR_WRITE))
43764 + task->is_writable = 1;
43765 +
43766 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43767 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43768 +#endif
43769 +
43770 + gr_set_proc_res(task);
43771 +
43772 + return;
43773 +}
43774 +
43775 +int
43776 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
43777 + const int unsafe_share)
43778 +{
43779 + struct task_struct *task = current;
43780 + struct acl_subject_label *newacl;
43781 + struct acl_object_label *obj;
43782 + __u32 retmode;
43783 +
43784 + if (unlikely(!(gr_status & GR_READY)))
43785 + return 0;
43786 +
43787 + newacl = chk_subj_label(dentry, mnt, task->role);
43788 +
43789 + task_lock(task);
43790 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
43791 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
43792 + !(task->role->roletype & GR_ROLE_GOD) &&
43793 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
43794 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
43795 + task_unlock(task);
43796 + if (unsafe_share)
43797 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
43798 + else
43799 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
43800 + return -EACCES;
43801 + }
43802 + task_unlock(task);
43803 +
43804 + obj = chk_obj_label(dentry, mnt, task->acl);
43805 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
43806 +
43807 + if (!(task->acl->mode & GR_INHERITLEARN) &&
43808 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
43809 + if (obj->nested)
43810 + task->acl = obj->nested;
43811 + else
43812 + task->acl = newacl;
43813 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
43814 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
43815 +
43816 + task->is_writable = 0;
43817 +
43818 + /* ignore additional mmap checks for processes that are writable
43819 + by the default ACL */
43820 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
43821 + if (unlikely(obj->mode & GR_WRITE))
43822 + task->is_writable = 1;
43823 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
43824 + if (unlikely(obj->mode & GR_WRITE))
43825 + task->is_writable = 1;
43826 +
43827 + gr_set_proc_res(task);
43828 +
43829 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43830 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43831 +#endif
43832 + return 0;
43833 +}
43834 +
43835 +/* always called with valid inodev ptr */
43836 +static void
43837 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
43838 +{
43839 + struct acl_object_label *matchpo;
43840 + struct acl_subject_label *matchps;
43841 + struct acl_subject_label *subj;
43842 + struct acl_role_label *role;
43843 + unsigned int x;
43844 +
43845 + FOR_EACH_ROLE_START(role)
43846 + FOR_EACH_SUBJECT_START(role, subj, x)
43847 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
43848 + matchpo->mode |= GR_DELETED;
43849 + FOR_EACH_SUBJECT_END(subj,x)
43850 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
43851 + if (subj->inode == ino && subj->device == dev)
43852 + subj->mode |= GR_DELETED;
43853 + FOR_EACH_NESTED_SUBJECT_END(subj)
43854 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
43855 + matchps->mode |= GR_DELETED;
43856 + FOR_EACH_ROLE_END(role)
43857 +
43858 + inodev->nentry->deleted = 1;
43859 +
43860 + return;
43861 +}
43862 +
43863 +void
43864 +gr_handle_delete(const ino_t ino, const dev_t dev)
43865 +{
43866 + struct inodev_entry *inodev;
43867 +
43868 + if (unlikely(!(gr_status & GR_READY)))
43869 + return;
43870 +
43871 + write_lock(&gr_inode_lock);
43872 + inodev = lookup_inodev_entry(ino, dev);
43873 + if (inodev != NULL)
43874 + do_handle_delete(inodev, ino, dev);
43875 + write_unlock(&gr_inode_lock);
43876 +
43877 + return;
43878 +}
43879 +
43880 +static void
43881 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
43882 + const ino_t newinode, const dev_t newdevice,
43883 + struct acl_subject_label *subj)
43884 +{
43885 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
43886 + struct acl_object_label *match;
43887 +
43888 + match = subj->obj_hash[index];
43889 +
43890 + while (match && (match->inode != oldinode ||
43891 + match->device != olddevice ||
43892 + !(match->mode & GR_DELETED)))
43893 + match = match->next;
43894 +
43895 + if (match && (match->inode == oldinode)
43896 + && (match->device == olddevice)
43897 + && (match->mode & GR_DELETED)) {
43898 + if (match->prev == NULL) {
43899 + subj->obj_hash[index] = match->next;
43900 + if (match->next != NULL)
43901 + match->next->prev = NULL;
43902 + } else {
43903 + match->prev->next = match->next;
43904 + if (match->next != NULL)
43905 + match->next->prev = match->prev;
43906 + }
43907 + match->prev = NULL;
43908 + match->next = NULL;
43909 + match->inode = newinode;
43910 + match->device = newdevice;
43911 + match->mode &= ~GR_DELETED;
43912 +
43913 + insert_acl_obj_label(match, subj);
43914 + }
43915 +
43916 + return;
43917 +}
43918 +
43919 +static void
43920 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
43921 + const ino_t newinode, const dev_t newdevice,
43922 + struct acl_role_label *role)
43923 +{
43924 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
43925 + struct acl_subject_label *match;
43926 +
43927 + match = role->subj_hash[index];
43928 +
43929 + while (match && (match->inode != oldinode ||
43930 + match->device != olddevice ||
43931 + !(match->mode & GR_DELETED)))
43932 + match = match->next;
43933 +
43934 + if (match && (match->inode == oldinode)
43935 + && (match->device == olddevice)
43936 + && (match->mode & GR_DELETED)) {
43937 + if (match->prev == NULL) {
43938 + role->subj_hash[index] = match->next;
43939 + if (match->next != NULL)
43940 + match->next->prev = NULL;
43941 + } else {
43942 + match->prev->next = match->next;
43943 + if (match->next != NULL)
43944 + match->next->prev = match->prev;
43945 + }
43946 + match->prev = NULL;
43947 + match->next = NULL;
43948 + match->inode = newinode;
43949 + match->device = newdevice;
43950 + match->mode &= ~GR_DELETED;
43951 +
43952 + insert_acl_subj_label(match, role);
43953 + }
43954 +
43955 + return;
43956 +}
43957 +
43958 +static void
43959 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
43960 + const ino_t newinode, const dev_t newdevice)
43961 +{
43962 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
43963 + struct inodev_entry *match;
43964 +
43965 + match = inodev_set.i_hash[index];
43966 +
43967 + while (match && (match->nentry->inode != oldinode ||
43968 + match->nentry->device != olddevice || !match->nentry->deleted))
43969 + match = match->next;
43970 +
43971 + if (match && (match->nentry->inode == oldinode)
43972 + && (match->nentry->device == olddevice) &&
43973 + match->nentry->deleted) {
43974 + if (match->prev == NULL) {
43975 + inodev_set.i_hash[index] = match->next;
43976 + if (match->next != NULL)
43977 + match->next->prev = NULL;
43978 + } else {
43979 + match->prev->next = match->next;
43980 + if (match->next != NULL)
43981 + match->next->prev = match->prev;
43982 + }
43983 + match->prev = NULL;
43984 + match->next = NULL;
43985 + match->nentry->inode = newinode;
43986 + match->nentry->device = newdevice;
43987 + match->nentry->deleted = 0;
43988 +
43989 + insert_inodev_entry(match);
43990 + }
43991 +
43992 + return;
43993 +}
43994 +
43995 +static void
43996 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
43997 + const struct vfsmount *mnt)
43998 +{
43999 + struct acl_subject_label *subj;
44000 + struct acl_role_label *role;
44001 + unsigned int x;
44002 + ino_t ino = dentry->d_inode->i_ino;
44003 + dev_t dev = __get_dev(dentry);
44004 +
44005 + FOR_EACH_ROLE_START(role)
44006 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44007 +
44008 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44009 + if ((subj->inode == ino) && (subj->device == dev)) {
44010 + subj->inode = ino;
44011 + subj->device = dev;
44012 + }
44013 + FOR_EACH_NESTED_SUBJECT_END(subj)
44014 + FOR_EACH_SUBJECT_START(role, subj, x)
44015 + update_acl_obj_label(matchn->inode, matchn->device,
44016 + ino, dev, subj);
44017 + FOR_EACH_SUBJECT_END(subj,x)
44018 + FOR_EACH_ROLE_END(role)
44019 +
44020 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44021 +
44022 + return;
44023 +}
44024 +
44025 +void
44026 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44027 +{
44028 + struct name_entry *matchn;
44029 +
44030 + if (unlikely(!(gr_status & GR_READY)))
44031 + return;
44032 +
44033 + preempt_disable();
44034 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44035 +
44036 + if (unlikely((unsigned long)matchn)) {
44037 + write_lock(&gr_inode_lock);
44038 + do_handle_create(matchn, dentry, mnt);
44039 + write_unlock(&gr_inode_lock);
44040 + }
44041 + preempt_enable();
44042 +
44043 + return;
44044 +}
44045 +
44046 +void
44047 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44048 + struct dentry *old_dentry,
44049 + struct dentry *new_dentry,
44050 + struct vfsmount *mnt, const __u8 replace)
44051 +{
44052 + struct name_entry *matchn;
44053 + struct inodev_entry *inodev;
44054 + ino_t old_ino = old_dentry->d_inode->i_ino;
44055 + dev_t old_dev = __get_dev(old_dentry);
44056 +
44057 + /* vfs_rename swaps the name and parent link for old_dentry and
44058 + new_dentry
44059 + at this point, old_dentry has the new name, parent link, and inode
44060 + for the renamed file
44061 + if a file is being replaced by a rename, new_dentry has the inode
44062 + and name for the replaced file
44063 + */
44064 +
44065 + if (unlikely(!(gr_status & GR_READY)))
44066 + return;
44067 +
44068 + preempt_disable();
44069 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44070 +
44071 + /* we wouldn't have to check d_inode if it weren't for
44072 + NFS silly-renaming
44073 + */
44074 +
44075 + write_lock(&gr_inode_lock);
44076 + if (unlikely(replace && new_dentry->d_inode)) {
44077 + ino_t new_ino = new_dentry->d_inode->i_ino;
44078 + dev_t new_dev = __get_dev(new_dentry);
44079 +
44080 + inodev = lookup_inodev_entry(new_ino, new_dev);
44081 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44082 + do_handle_delete(inodev, new_ino, new_dev);
44083 + }
44084 +
44085 + inodev = lookup_inodev_entry(old_ino, old_dev);
44086 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44087 + do_handle_delete(inodev, old_ino, old_dev);
44088 +
44089 + if (unlikely((unsigned long)matchn))
44090 + do_handle_create(matchn, old_dentry, mnt);
44091 +
44092 + write_unlock(&gr_inode_lock);
44093 + preempt_enable();
44094 +
44095 + return;
44096 +}
44097 +
44098 +static int
44099 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44100 + unsigned char **sum)
44101 +{
44102 + struct acl_role_label *r;
44103 + struct role_allowed_ip *ipp;
44104 + struct role_transition *trans;
44105 + unsigned int i;
44106 + int found = 0;
44107 + u32 curr_ip = current->signal->curr_ip;
44108 +
44109 + current->signal->saved_ip = curr_ip;
44110 +
44111 + /* check transition table */
44112 +
44113 + for (trans = current->role->transitions; trans; trans = trans->next) {
44114 + if (!strcmp(rolename, trans->rolename)) {
44115 + found = 1;
44116 + break;
44117 + }
44118 + }
44119 +
44120 + if (!found)
44121 + return 0;
44122 +
44123 + /* handle special roles that do not require authentication
44124 + and check ip */
44125 +
44126 + FOR_EACH_ROLE_START(r)
44127 + if (!strcmp(rolename, r->rolename) &&
44128 + (r->roletype & GR_ROLE_SPECIAL)) {
44129 + found = 0;
44130 + if (r->allowed_ips != NULL) {
44131 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44132 + if ((ntohl(curr_ip) & ipp->netmask) ==
44133 + (ntohl(ipp->addr) & ipp->netmask))
44134 + found = 1;
44135 + }
44136 + } else
44137 + found = 2;
44138 + if (!found)
44139 + return 0;
44140 +
44141 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44142 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44143 + *salt = NULL;
44144 + *sum = NULL;
44145 + return 1;
44146 + }
44147 + }
44148 + FOR_EACH_ROLE_END(r)
44149 +
44150 + for (i = 0; i < num_sprole_pws; i++) {
44151 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44152 + *salt = acl_special_roles[i]->salt;
44153 + *sum = acl_special_roles[i]->sum;
44154 + return 1;
44155 + }
44156 + }
44157 +
44158 + return 0;
44159 +}
44160 +
44161 +static void
44162 +assign_special_role(char *rolename)
44163 +{
44164 + struct acl_object_label *obj;
44165 + struct acl_role_label *r;
44166 + struct acl_role_label *assigned = NULL;
44167 + struct task_struct *tsk;
44168 + struct file *filp;
44169 +
44170 + FOR_EACH_ROLE_START(r)
44171 + if (!strcmp(rolename, r->rolename) &&
44172 + (r->roletype & GR_ROLE_SPECIAL)) {
44173 + assigned = r;
44174 + break;
44175 + }
44176 + FOR_EACH_ROLE_END(r)
44177 +
44178 + if (!assigned)
44179 + return;
44180 +
44181 + read_lock(&tasklist_lock);
44182 + read_lock(&grsec_exec_file_lock);
44183 +
44184 + tsk = current->real_parent;
44185 + if (tsk == NULL)
44186 + goto out_unlock;
44187 +
44188 + filp = tsk->exec_file;
44189 + if (filp == NULL)
44190 + goto out_unlock;
44191 +
44192 + tsk->is_writable = 0;
44193 +
44194 + tsk->acl_sp_role = 1;
44195 + tsk->acl_role_id = ++acl_sp_role_value;
44196 + tsk->role = assigned;
44197 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44198 +
44199 + /* ignore additional mmap checks for processes that are writable
44200 + by the default ACL */
44201 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44202 + if (unlikely(obj->mode & GR_WRITE))
44203 + tsk->is_writable = 1;
44204 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44205 + if (unlikely(obj->mode & GR_WRITE))
44206 + tsk->is_writable = 1;
44207 +
44208 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44209 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44210 +#endif
44211 +
44212 +out_unlock:
44213 + read_unlock(&grsec_exec_file_lock);
44214 + read_unlock(&tasklist_lock);
44215 + return;
44216 +}
44217 +
44218 +int gr_check_secure_terminal(struct task_struct *task)
44219 +{
44220 + struct task_struct *p, *p2, *p3;
44221 + struct files_struct *files;
44222 + struct fdtable *fdt;
44223 + struct file *our_file = NULL, *file;
44224 + int i;
44225 +
44226 + if (task->signal->tty == NULL)
44227 + return 1;
44228 +
44229 + files = get_files_struct(task);
44230 + if (files != NULL) {
44231 + rcu_read_lock();
44232 + fdt = files_fdtable(files);
44233 + for (i=0; i < fdt->max_fds; i++) {
44234 + file = fcheck_files(files, i);
44235 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44236 + get_file(file);
44237 + our_file = file;
44238 + }
44239 + }
44240 + rcu_read_unlock();
44241 + put_files_struct(files);
44242 + }
44243 +
44244 + if (our_file == NULL)
44245 + return 1;
44246 +
44247 + read_lock(&tasklist_lock);
44248 + do_each_thread(p2, p) {
44249 + files = get_files_struct(p);
44250 + if (files == NULL ||
44251 + (p->signal && p->signal->tty == task->signal->tty)) {
44252 + if (files != NULL)
44253 + put_files_struct(files);
44254 + continue;
44255 + }
44256 + rcu_read_lock();
44257 + fdt = files_fdtable(files);
44258 + for (i=0; i < fdt->max_fds; i++) {
44259 + file = fcheck_files(files, i);
44260 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44261 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44262 + p3 = task;
44263 + while (p3->pid > 0) {
44264 + if (p3 == p)
44265 + break;
44266 + p3 = p3->real_parent;
44267 + }
44268 + if (p3 == p)
44269 + break;
44270 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44271 + gr_handle_alertkill(p);
44272 + rcu_read_unlock();
44273 + put_files_struct(files);
44274 + read_unlock(&tasklist_lock);
44275 + fput(our_file);
44276 + return 0;
44277 + }
44278 + }
44279 + rcu_read_unlock();
44280 + put_files_struct(files);
44281 + } while_each_thread(p2, p);
44282 + read_unlock(&tasklist_lock);
44283 +
44284 + fput(our_file);
44285 + return 1;
44286 +}
44287 +
44288 +ssize_t
44289 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44290 +{
44291 + struct gr_arg_wrapper uwrap;
44292 + unsigned char *sprole_salt = NULL;
44293 + unsigned char *sprole_sum = NULL;
44294 + int error = sizeof (struct gr_arg_wrapper);
44295 + int error2 = 0;
44296 +
44297 + mutex_lock(&gr_dev_mutex);
44298 +
44299 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44300 + error = -EPERM;
44301 + goto out;
44302 + }
44303 +
44304 + if (count != sizeof (struct gr_arg_wrapper)) {
44305 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44306 + error = -EINVAL;
44307 + goto out;
44308 + }
44309 +
44310 +
44311 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44312 + gr_auth_expires = 0;
44313 + gr_auth_attempts = 0;
44314 + }
44315 +
44316 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44317 + error = -EFAULT;
44318 + goto out;
44319 + }
44320 +
44321 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44322 + error = -EINVAL;
44323 + goto out;
44324 + }
44325 +
44326 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44327 + error = -EFAULT;
44328 + goto out;
44329 + }
44330 +
44331 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44332 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44333 + time_after(gr_auth_expires, get_seconds())) {
44334 + error = -EBUSY;
44335 + goto out;
44336 + }
44337 +
44338 + /* if non-root trying to do anything other than use a special role,
44339 + do not attempt authentication, do not count towards authentication
44340 + locking
44341 + */
44342 +
44343 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44344 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44345 + current_uid()) {
44346 + error = -EPERM;
44347 + goto out;
44348 + }
44349 +
44350 + /* ensure pw and special role name are null terminated */
44351 +
44352 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44353 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44354 +
44355 + /* Okay.
44356 + * We have our enough of the argument structure..(we have yet
44357 + * to copy_from_user the tables themselves) . Copy the tables
44358 + * only if we need them, i.e. for loading operations. */
44359 +
44360 + switch (gr_usermode->mode) {
44361 + case GR_STATUS:
44362 + if (gr_status & GR_READY) {
44363 + error = 1;
44364 + if (!gr_check_secure_terminal(current))
44365 + error = 3;
44366 + } else
44367 + error = 2;
44368 + goto out;
44369 + case GR_SHUTDOWN:
44370 + if ((gr_status & GR_READY)
44371 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44372 + pax_open_kernel();
44373 + gr_status &= ~GR_READY;
44374 + pax_close_kernel();
44375 +
44376 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44377 + free_variables();
44378 + memset(gr_usermode, 0, sizeof (struct gr_arg));
44379 + memset(gr_system_salt, 0, GR_SALT_LEN);
44380 + memset(gr_system_sum, 0, GR_SHA_LEN);
44381 + } else if (gr_status & GR_READY) {
44382 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44383 + error = -EPERM;
44384 + } else {
44385 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44386 + error = -EAGAIN;
44387 + }
44388 + break;
44389 + case GR_ENABLE:
44390 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44391 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44392 + else {
44393 + if (gr_status & GR_READY)
44394 + error = -EAGAIN;
44395 + else
44396 + error = error2;
44397 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44398 + }
44399 + break;
44400 + case GR_RELOAD:
44401 + if (!(gr_status & GR_READY)) {
44402 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44403 + error = -EAGAIN;
44404 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44405 + preempt_disable();
44406 +
44407 + pax_open_kernel();
44408 + gr_status &= ~GR_READY;
44409 + pax_close_kernel();
44410 +
44411 + free_variables();
44412 + if (!(error2 = gracl_init(gr_usermode))) {
44413 + preempt_enable();
44414 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44415 + } else {
44416 + preempt_enable();
44417 + error = error2;
44418 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44419 + }
44420 + } else {
44421 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44422 + error = -EPERM;
44423 + }
44424 + break;
44425 + case GR_SEGVMOD:
44426 + if (unlikely(!(gr_status & GR_READY))) {
44427 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44428 + error = -EAGAIN;
44429 + break;
44430 + }
44431 +
44432 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44433 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44434 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44435 + struct acl_subject_label *segvacl;
44436 + segvacl =
44437 + lookup_acl_subj_label(gr_usermode->segv_inode,
44438 + gr_usermode->segv_device,
44439 + current->role);
44440 + if (segvacl) {
44441 + segvacl->crashes = 0;
44442 + segvacl->expires = 0;
44443 + }
44444 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44445 + gr_remove_uid(gr_usermode->segv_uid);
44446 + }
44447 + } else {
44448 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44449 + error = -EPERM;
44450 + }
44451 + break;
44452 + case GR_SPROLE:
44453 + case GR_SPROLEPAM:
44454 + if (unlikely(!(gr_status & GR_READY))) {
44455 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44456 + error = -EAGAIN;
44457 + break;
44458 + }
44459 +
44460 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44461 + current->role->expires = 0;
44462 + current->role->auth_attempts = 0;
44463 + }
44464 +
44465 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44466 + time_after(current->role->expires, get_seconds())) {
44467 + error = -EBUSY;
44468 + goto out;
44469 + }
44470 +
44471 + if (lookup_special_role_auth
44472 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44473 + && ((!sprole_salt && !sprole_sum)
44474 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44475 + char *p = "";
44476 + assign_special_role(gr_usermode->sp_role);
44477 + read_lock(&tasklist_lock);
44478 + if (current->real_parent)
44479 + p = current->real_parent->role->rolename;
44480 + read_unlock(&tasklist_lock);
44481 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44482 + p, acl_sp_role_value);
44483 + } else {
44484 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44485 + error = -EPERM;
44486 + if(!(current->role->auth_attempts++))
44487 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44488 +
44489 + goto out;
44490 + }
44491 + break;
44492 + case GR_UNSPROLE:
44493 + if (unlikely(!(gr_status & GR_READY))) {
44494 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44495 + error = -EAGAIN;
44496 + break;
44497 + }
44498 +
44499 + if (current->role->roletype & GR_ROLE_SPECIAL) {
44500 + char *p = "";
44501 + int i = 0;
44502 +
44503 + read_lock(&tasklist_lock);
44504 + if (current->real_parent) {
44505 + p = current->real_parent->role->rolename;
44506 + i = current->real_parent->acl_role_id;
44507 + }
44508 + read_unlock(&tasklist_lock);
44509 +
44510 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44511 + gr_set_acls(1);
44512 + } else {
44513 + error = -EPERM;
44514 + goto out;
44515 + }
44516 + break;
44517 + default:
44518 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44519 + error = -EINVAL;
44520 + break;
44521 + }
44522 +
44523 + if (error != -EPERM)
44524 + goto out;
44525 +
44526 + if(!(gr_auth_attempts++))
44527 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44528 +
44529 + out:
44530 + mutex_unlock(&gr_dev_mutex);
44531 + return error;
44532 +}
44533 +
44534 +/* must be called with
44535 + rcu_read_lock();
44536 + read_lock(&tasklist_lock);
44537 + read_lock(&grsec_exec_file_lock);
44538 +*/
44539 +int gr_apply_subject_to_task(struct task_struct *task)
44540 +{
44541 + struct acl_object_label *obj;
44542 + char *tmpname;
44543 + struct acl_subject_label *tmpsubj;
44544 + struct file *filp;
44545 + struct name_entry *nmatch;
44546 +
44547 + filp = task->exec_file;
44548 + if (filp == NULL)
44549 + return 0;
44550 +
44551 + /* the following is to apply the correct subject
44552 + on binaries running when the RBAC system
44553 + is enabled, when the binaries have been
44554 + replaced or deleted since their execution
44555 + -----
44556 + when the RBAC system starts, the inode/dev
44557 + from exec_file will be one the RBAC system
44558 + is unaware of. It only knows the inode/dev
44559 + of the present file on disk, or the absence
44560 + of it.
44561 + */
44562 + preempt_disable();
44563 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44564 +
44565 + nmatch = lookup_name_entry(tmpname);
44566 + preempt_enable();
44567 + tmpsubj = NULL;
44568 + if (nmatch) {
44569 + if (nmatch->deleted)
44570 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44571 + else
44572 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44573 + if (tmpsubj != NULL)
44574 + task->acl = tmpsubj;
44575 + }
44576 + if (tmpsubj == NULL)
44577 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44578 + task->role);
44579 + if (task->acl) {
44580 + task->is_writable = 0;
44581 + /* ignore additional mmap checks for processes that are writable
44582 + by the default ACL */
44583 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44584 + if (unlikely(obj->mode & GR_WRITE))
44585 + task->is_writable = 1;
44586 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44587 + if (unlikely(obj->mode & GR_WRITE))
44588 + task->is_writable = 1;
44589 +
44590 + gr_set_proc_res(task);
44591 +
44592 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44593 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44594 +#endif
44595 + } else {
44596 + return 1;
44597 + }
44598 +
44599 + return 0;
44600 +}
44601 +
44602 +int
44603 +gr_set_acls(const int type)
44604 +{
44605 + struct task_struct *task, *task2;
44606 + struct acl_role_label *role = current->role;
44607 + __u16 acl_role_id = current->acl_role_id;
44608 + const struct cred *cred;
44609 + int ret;
44610 +
44611 + rcu_read_lock();
44612 + read_lock(&tasklist_lock);
44613 + read_lock(&grsec_exec_file_lock);
44614 + do_each_thread(task2, task) {
44615 + /* check to see if we're called from the exit handler,
44616 + if so, only replace ACLs that have inherited the admin
44617 + ACL */
44618 +
44619 + if (type && (task->role != role ||
44620 + task->acl_role_id != acl_role_id))
44621 + continue;
44622 +
44623 + task->acl_role_id = 0;
44624 + task->acl_sp_role = 0;
44625 +
44626 + if (task->exec_file) {
44627 + cred = __task_cred(task);
44628 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44629 + ret = gr_apply_subject_to_task(task);
44630 + if (ret) {
44631 + read_unlock(&grsec_exec_file_lock);
44632 + read_unlock(&tasklist_lock);
44633 + rcu_read_unlock();
44634 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44635 + return ret;
44636 + }
44637 + } else {
44638 + // it's a kernel process
44639 + task->role = kernel_role;
44640 + task->acl = kernel_role->root_label;
44641 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44642 + task->acl->mode &= ~GR_PROCFIND;
44643 +#endif
44644 + }
44645 + } while_each_thread(task2, task);
44646 + read_unlock(&grsec_exec_file_lock);
44647 + read_unlock(&tasklist_lock);
44648 + rcu_read_unlock();
44649 +
44650 + return 0;
44651 +}
44652 +
44653 +void
44654 +gr_learn_resource(const struct task_struct *task,
44655 + const int res, const unsigned long wanted, const int gt)
44656 +{
44657 + struct acl_subject_label *acl;
44658 + const struct cred *cred;
44659 +
44660 + if (unlikely((gr_status & GR_READY) &&
44661 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44662 + goto skip_reslog;
44663 +
44664 +#ifdef CONFIG_GRKERNSEC_RESLOG
44665 + gr_log_resource(task, res, wanted, gt);
44666 +#endif
44667 + skip_reslog:
44668 +
44669 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44670 + return;
44671 +
44672 + acl = task->acl;
44673 +
44674 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44675 + !(acl->resmask & (1 << (unsigned short) res))))
44676 + return;
44677 +
44678 + if (wanted >= acl->res[res].rlim_cur) {
44679 + unsigned long res_add;
44680 +
44681 + res_add = wanted;
44682 + switch (res) {
44683 + case RLIMIT_CPU:
44684 + res_add += GR_RLIM_CPU_BUMP;
44685 + break;
44686 + case RLIMIT_FSIZE:
44687 + res_add += GR_RLIM_FSIZE_BUMP;
44688 + break;
44689 + case RLIMIT_DATA:
44690 + res_add += GR_RLIM_DATA_BUMP;
44691 + break;
44692 + case RLIMIT_STACK:
44693 + res_add += GR_RLIM_STACK_BUMP;
44694 + break;
44695 + case RLIMIT_CORE:
44696 + res_add += GR_RLIM_CORE_BUMP;
44697 + break;
44698 + case RLIMIT_RSS:
44699 + res_add += GR_RLIM_RSS_BUMP;
44700 + break;
44701 + case RLIMIT_NPROC:
44702 + res_add += GR_RLIM_NPROC_BUMP;
44703 + break;
44704 + case RLIMIT_NOFILE:
44705 + res_add += GR_RLIM_NOFILE_BUMP;
44706 + break;
44707 + case RLIMIT_MEMLOCK:
44708 + res_add += GR_RLIM_MEMLOCK_BUMP;
44709 + break;
44710 + case RLIMIT_AS:
44711 + res_add += GR_RLIM_AS_BUMP;
44712 + break;
44713 + case RLIMIT_LOCKS:
44714 + res_add += GR_RLIM_LOCKS_BUMP;
44715 + break;
44716 + case RLIMIT_SIGPENDING:
44717 + res_add += GR_RLIM_SIGPENDING_BUMP;
44718 + break;
44719 + case RLIMIT_MSGQUEUE:
44720 + res_add += GR_RLIM_MSGQUEUE_BUMP;
44721 + break;
44722 + case RLIMIT_NICE:
44723 + res_add += GR_RLIM_NICE_BUMP;
44724 + break;
44725 + case RLIMIT_RTPRIO:
44726 + res_add += GR_RLIM_RTPRIO_BUMP;
44727 + break;
44728 + case RLIMIT_RTTIME:
44729 + res_add += GR_RLIM_RTTIME_BUMP;
44730 + break;
44731 + }
44732 +
44733 + acl->res[res].rlim_cur = res_add;
44734 +
44735 + if (wanted > acl->res[res].rlim_max)
44736 + acl->res[res].rlim_max = res_add;
44737 +
44738 + /* only log the subject filename, since resource logging is supported for
44739 + single-subject learning only */
44740 + rcu_read_lock();
44741 + cred = __task_cred(task);
44742 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44743 + task->role->roletype, cred->uid, cred->gid, acl->filename,
44744 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44745 + "", (unsigned long) res, &task->signal->saved_ip);
44746 + rcu_read_unlock();
44747 + }
44748 +
44749 + return;
44750 +}
44751 +
44752 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44753 +void
44754 +pax_set_initial_flags(struct linux_binprm *bprm)
44755 +{
44756 + struct task_struct *task = current;
44757 + struct acl_subject_label *proc;
44758 + unsigned long flags;
44759 +
44760 + if (unlikely(!(gr_status & GR_READY)))
44761 + return;
44762 +
44763 + flags = pax_get_flags(task);
44764 +
44765 + proc = task->acl;
44766 +
44767 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
44768 + flags &= ~MF_PAX_PAGEEXEC;
44769 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
44770 + flags &= ~MF_PAX_SEGMEXEC;
44771 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
44772 + flags &= ~MF_PAX_RANDMMAP;
44773 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
44774 + flags &= ~MF_PAX_EMUTRAMP;
44775 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
44776 + flags &= ~MF_PAX_MPROTECT;
44777 +
44778 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
44779 + flags |= MF_PAX_PAGEEXEC;
44780 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
44781 + flags |= MF_PAX_SEGMEXEC;
44782 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
44783 + flags |= MF_PAX_RANDMMAP;
44784 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
44785 + flags |= MF_PAX_EMUTRAMP;
44786 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
44787 + flags |= MF_PAX_MPROTECT;
44788 +
44789 + pax_set_flags(task, flags);
44790 +
44791 + return;
44792 +}
44793 +#endif
44794 +
44795 +#ifdef CONFIG_SYSCTL
44796 +/* Eric Biederman likes breaking userland ABI and every inode-based security
44797 + system to save 35kb of memory */
44798 +
44799 +/* we modify the passed in filename, but adjust it back before returning */
44800 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
44801 +{
44802 + struct name_entry *nmatch;
44803 + char *p, *lastp = NULL;
44804 + struct acl_object_label *obj = NULL, *tmp;
44805 + struct acl_subject_label *tmpsubj;
44806 + char c = '\0';
44807 +
44808 + read_lock(&gr_inode_lock);
44809 +
44810 + p = name + len - 1;
44811 + do {
44812 + nmatch = lookup_name_entry(name);
44813 + if (lastp != NULL)
44814 + *lastp = c;
44815 +
44816 + if (nmatch == NULL)
44817 + goto next_component;
44818 + tmpsubj = current->acl;
44819 + do {
44820 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
44821 + if (obj != NULL) {
44822 + tmp = obj->globbed;
44823 + while (tmp) {
44824 + if (!glob_match(tmp->filename, name)) {
44825 + obj = tmp;
44826 + goto found_obj;
44827 + }
44828 + tmp = tmp->next;
44829 + }
44830 + goto found_obj;
44831 + }
44832 + } while ((tmpsubj = tmpsubj->parent_subject));
44833 +next_component:
44834 + /* end case */
44835 + if (p == name)
44836 + break;
44837 +
44838 + while (*p != '/')
44839 + p--;
44840 + if (p == name)
44841 + lastp = p + 1;
44842 + else {
44843 + lastp = p;
44844 + p--;
44845 + }
44846 + c = *lastp;
44847 + *lastp = '\0';
44848 + } while (1);
44849 +found_obj:
44850 + read_unlock(&gr_inode_lock);
44851 + /* obj returned will always be non-null */
44852 + return obj;
44853 +}
44854 +
44855 +/* returns 0 when allowing, non-zero on error
44856 + op of 0 is used for readdir, so we don't log the names of hidden files
44857 +*/
44858 +__u32
44859 +gr_handle_sysctl(const struct ctl_table *table, const int op)
44860 +{
44861 + struct ctl_table *tmp;
44862 + const char *proc_sys = "/proc/sys";
44863 + char *path;
44864 + struct acl_object_label *obj;
44865 + unsigned short len = 0, pos = 0, depth = 0, i;
44866 + __u32 err = 0;
44867 + __u32 mode = 0;
44868 +
44869 + if (unlikely(!(gr_status & GR_READY)))
44870 + return 0;
44871 +
44872 + /* for now, ignore operations on non-sysctl entries if it's not a
44873 + readdir*/
44874 + if (table->child != NULL && op != 0)
44875 + return 0;
44876 +
44877 + mode |= GR_FIND;
44878 + /* it's only a read if it's an entry, read on dirs is for readdir */
44879 + if (op & MAY_READ)
44880 + mode |= GR_READ;
44881 + if (op & MAY_WRITE)
44882 + mode |= GR_WRITE;
44883 +
44884 + preempt_disable();
44885 +
44886 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
44887 +
44888 + /* it's only a read/write if it's an actual entry, not a dir
44889 + (which are opened for readdir)
44890 + */
44891 +
44892 + /* convert the requested sysctl entry into a pathname */
44893 +
44894 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
44895 + len += strlen(tmp->procname);
44896 + len++;
44897 + depth++;
44898 + }
44899 +
44900 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
44901 + /* deny */
44902 + goto out;
44903 + }
44904 +
44905 + memset(path, 0, PAGE_SIZE);
44906 +
44907 + memcpy(path, proc_sys, strlen(proc_sys));
44908 +
44909 + pos += strlen(proc_sys);
44910 +
44911 + for (; depth > 0; depth--) {
44912 + path[pos] = '/';
44913 + pos++;
44914 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
44915 + if (depth == i) {
44916 + memcpy(path + pos, tmp->procname,
44917 + strlen(tmp->procname));
44918 + pos += strlen(tmp->procname);
44919 + }
44920 + i++;
44921 + }
44922 + }
44923 +
44924 + obj = gr_lookup_by_name(path, pos);
44925 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
44926 +
44927 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
44928 + ((err & mode) != mode))) {
44929 + __u32 new_mode = mode;
44930 +
44931 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
44932 +
44933 + err = 0;
44934 + gr_log_learn_sysctl(path, new_mode);
44935 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
44936 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
44937 + err = -ENOENT;
44938 + } else if (!(err & GR_FIND)) {
44939 + err = -ENOENT;
44940 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
44941 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
44942 + path, (mode & GR_READ) ? " reading" : "",
44943 + (mode & GR_WRITE) ? " writing" : "");
44944 + err = -EACCES;
44945 + } else if ((err & mode) != mode) {
44946 + err = -EACCES;
44947 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
44948 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
44949 + path, (mode & GR_READ) ? " reading" : "",
44950 + (mode & GR_WRITE) ? " writing" : "");
44951 + err = 0;
44952 + } else
44953 + err = 0;
44954 +
44955 + out:
44956 + preempt_enable();
44957 +
44958 + return err;
44959 +}
44960 +#endif
44961 +
44962 +int
44963 +gr_handle_proc_ptrace(struct task_struct *task)
44964 +{
44965 + struct file *filp;
44966 + struct task_struct *tmp = task;
44967 + struct task_struct *curtemp = current;
44968 + __u32 retmode;
44969 +
44970 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
44971 + if (unlikely(!(gr_status & GR_READY)))
44972 + return 0;
44973 +#endif
44974 +
44975 + read_lock(&tasklist_lock);
44976 + read_lock(&grsec_exec_file_lock);
44977 + filp = task->exec_file;
44978 +
44979 + while (tmp->pid > 0) {
44980 + if (tmp == curtemp)
44981 + break;
44982 + tmp = tmp->real_parent;
44983 + }
44984 +
44985 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
44986 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
44987 + read_unlock(&grsec_exec_file_lock);
44988 + read_unlock(&tasklist_lock);
44989 + return 1;
44990 + }
44991 +
44992 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
44993 + if (!(gr_status & GR_READY)) {
44994 + read_unlock(&grsec_exec_file_lock);
44995 + read_unlock(&tasklist_lock);
44996 + return 0;
44997 + }
44998 +#endif
44999 +
45000 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45001 + read_unlock(&grsec_exec_file_lock);
45002 + read_unlock(&tasklist_lock);
45003 +
45004 + if (retmode & GR_NOPTRACE)
45005 + return 1;
45006 +
45007 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45008 + && (current->acl != task->acl || (current->acl != current->role->root_label
45009 + && current->pid != task->pid)))
45010 + return 1;
45011 +
45012 + return 0;
45013 +}
45014 +
45015 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45016 +{
45017 + if (unlikely(!(gr_status & GR_READY)))
45018 + return;
45019 +
45020 + if (!(current->role->roletype & GR_ROLE_GOD))
45021 + return;
45022 +
45023 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45024 + p->role->rolename, gr_task_roletype_to_char(p),
45025 + p->acl->filename);
45026 +}
45027 +
45028 +int
45029 +gr_handle_ptrace(struct task_struct *task, const long request)
45030 +{
45031 + struct task_struct *tmp = task;
45032 + struct task_struct *curtemp = current;
45033 + __u32 retmode;
45034 +
45035 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45036 + if (unlikely(!(gr_status & GR_READY)))
45037 + return 0;
45038 +#endif
45039 +
45040 + read_lock(&tasklist_lock);
45041 + while (tmp->pid > 0) {
45042 + if (tmp == curtemp)
45043 + break;
45044 + tmp = tmp->real_parent;
45045 + }
45046 +
45047 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45048 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45049 + read_unlock(&tasklist_lock);
45050 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45051 + return 1;
45052 + }
45053 + read_unlock(&tasklist_lock);
45054 +
45055 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45056 + if (!(gr_status & GR_READY))
45057 + return 0;
45058 +#endif
45059 +
45060 + read_lock(&grsec_exec_file_lock);
45061 + if (unlikely(!task->exec_file)) {
45062 + read_unlock(&grsec_exec_file_lock);
45063 + return 0;
45064 + }
45065 +
45066 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45067 + read_unlock(&grsec_exec_file_lock);
45068 +
45069 + if (retmode & GR_NOPTRACE) {
45070 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45071 + return 1;
45072 + }
45073 +
45074 + if (retmode & GR_PTRACERD) {
45075 + switch (request) {
45076 + case PTRACE_POKETEXT:
45077 + case PTRACE_POKEDATA:
45078 + case PTRACE_POKEUSR:
45079 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45080 + case PTRACE_SETREGS:
45081 + case PTRACE_SETFPREGS:
45082 +#endif
45083 +#ifdef CONFIG_X86
45084 + case PTRACE_SETFPXREGS:
45085 +#endif
45086 +#ifdef CONFIG_ALTIVEC
45087 + case PTRACE_SETVRREGS:
45088 +#endif
45089 + return 1;
45090 + default:
45091 + return 0;
45092 + }
45093 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
45094 + !(current->role->roletype & GR_ROLE_GOD) &&
45095 + (current->acl != task->acl)) {
45096 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45097 + return 1;
45098 + }
45099 +
45100 + return 0;
45101 +}
45102 +
45103 +static int is_writable_mmap(const struct file *filp)
45104 +{
45105 + struct task_struct *task = current;
45106 + struct acl_object_label *obj, *obj2;
45107 +
45108 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45109 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45110 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45111 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45112 + task->role->root_label);
45113 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45114 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45115 + return 1;
45116 + }
45117 + }
45118 + return 0;
45119 +}
45120 +
45121 +int
45122 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45123 +{
45124 + __u32 mode;
45125 +
45126 + if (unlikely(!file || !(prot & PROT_EXEC)))
45127 + return 1;
45128 +
45129 + if (is_writable_mmap(file))
45130 + return 0;
45131 +
45132 + mode =
45133 + gr_search_file(file->f_path.dentry,
45134 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45135 + file->f_path.mnt);
45136 +
45137 + if (!gr_tpe_allow(file))
45138 + return 0;
45139 +
45140 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45141 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45142 + return 0;
45143 + } else if (unlikely(!(mode & GR_EXEC))) {
45144 + return 0;
45145 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45146 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45147 + return 1;
45148 + }
45149 +
45150 + return 1;
45151 +}
45152 +
45153 +int
45154 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45155 +{
45156 + __u32 mode;
45157 +
45158 + if (unlikely(!file || !(prot & PROT_EXEC)))
45159 + return 1;
45160 +
45161 + if (is_writable_mmap(file))
45162 + return 0;
45163 +
45164 + mode =
45165 + gr_search_file(file->f_path.dentry,
45166 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45167 + file->f_path.mnt);
45168 +
45169 + if (!gr_tpe_allow(file))
45170 + return 0;
45171 +
45172 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45173 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45174 + return 0;
45175 + } else if (unlikely(!(mode & GR_EXEC))) {
45176 + return 0;
45177 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45178 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45179 + return 1;
45180 + }
45181 +
45182 + return 1;
45183 +}
45184 +
45185 +void
45186 +gr_acl_handle_psacct(struct task_struct *task, const long code)
45187 +{
45188 + unsigned long runtime;
45189 + unsigned long cputime;
45190 + unsigned int wday, cday;
45191 + __u8 whr, chr;
45192 + __u8 wmin, cmin;
45193 + __u8 wsec, csec;
45194 + struct timespec timeval;
45195 +
45196 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45197 + !(task->acl->mode & GR_PROCACCT)))
45198 + return;
45199 +
45200 + do_posix_clock_monotonic_gettime(&timeval);
45201 + runtime = timeval.tv_sec - task->start_time.tv_sec;
45202 + wday = runtime / (3600 * 24);
45203 + runtime -= wday * (3600 * 24);
45204 + whr = runtime / 3600;
45205 + runtime -= whr * 3600;
45206 + wmin = runtime / 60;
45207 + runtime -= wmin * 60;
45208 + wsec = runtime;
45209 +
45210 + cputime = (task->utime + task->stime) / HZ;
45211 + cday = cputime / (3600 * 24);
45212 + cputime -= cday * (3600 * 24);
45213 + chr = cputime / 3600;
45214 + cputime -= chr * 3600;
45215 + cmin = cputime / 60;
45216 + cputime -= cmin * 60;
45217 + csec = cputime;
45218 +
45219 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45220 +
45221 + return;
45222 +}
45223 +
45224 +void gr_set_kernel_label(struct task_struct *task)
45225 +{
45226 + if (gr_status & GR_READY) {
45227 + task->role = kernel_role;
45228 + task->acl = kernel_role->root_label;
45229 + }
45230 + return;
45231 +}
45232 +
45233 +#ifdef CONFIG_TASKSTATS
45234 +int gr_is_taskstats_denied(int pid)
45235 +{
45236 + struct task_struct *task;
45237 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45238 + const struct cred *cred;
45239 +#endif
45240 + int ret = 0;
45241 +
45242 + /* restrict taskstats viewing to un-chrooted root users
45243 + who have the 'view' subject flag if the RBAC system is enabled
45244 + */
45245 +
45246 + rcu_read_lock();
45247 + read_lock(&tasklist_lock);
45248 + task = find_task_by_vpid(pid);
45249 + if (task) {
45250 +#ifdef CONFIG_GRKERNSEC_CHROOT
45251 + if (proc_is_chrooted(task))
45252 + ret = -EACCES;
45253 +#endif
45254 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45255 + cred = __task_cred(task);
45256 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45257 + if (cred->uid != 0)
45258 + ret = -EACCES;
45259 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45260 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45261 + ret = -EACCES;
45262 +#endif
45263 +#endif
45264 + if (gr_status & GR_READY) {
45265 + if (!(task->acl->mode & GR_VIEW))
45266 + ret = -EACCES;
45267 + }
45268 + } else
45269 + ret = -ENOENT;
45270 +
45271 + read_unlock(&tasklist_lock);
45272 + rcu_read_unlock();
45273 +
45274 + return ret;
45275 +}
45276 +#endif
45277 +
45278 +/* AUXV entries are filled via a descendant of search_binary_handler
45279 + after we've already applied the subject for the target
45280 +*/
45281 +int gr_acl_enable_at_secure(void)
45282 +{
45283 + if (unlikely(!(gr_status & GR_READY)))
45284 + return 0;
45285 +
45286 + if (current->acl->mode & GR_ATSECURE)
45287 + return 1;
45288 +
45289 + return 0;
45290 +}
45291 +
45292 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45293 +{
45294 + struct task_struct *task = current;
45295 + struct dentry *dentry = file->f_path.dentry;
45296 + struct vfsmount *mnt = file->f_path.mnt;
45297 + struct acl_object_label *obj, *tmp;
45298 + struct acl_subject_label *subj;
45299 + unsigned int bufsize;
45300 + int is_not_root;
45301 + char *path;
45302 + dev_t dev = __get_dev(dentry);
45303 +
45304 + if (unlikely(!(gr_status & GR_READY)))
45305 + return 1;
45306 +
45307 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45308 + return 1;
45309 +
45310 + /* ignore Eric Biederman */
45311 + if (IS_PRIVATE(dentry->d_inode))
45312 + return 1;
45313 +
45314 + subj = task->acl;
45315 + do {
45316 + obj = lookup_acl_obj_label(ino, dev, subj);
45317 + if (obj != NULL)
45318 + return (obj->mode & GR_FIND) ? 1 : 0;
45319 + } while ((subj = subj->parent_subject));
45320 +
45321 + /* this is purely an optimization since we're looking for an object
45322 + for the directory we're doing a readdir on
45323 + if it's possible for any globbed object to match the entry we're
45324 + filling into the directory, then the object we find here will be
45325 + an anchor point with attached globbed objects
45326 + */
45327 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45328 + if (obj->globbed == NULL)
45329 + return (obj->mode & GR_FIND) ? 1 : 0;
45330 +
45331 + is_not_root = ((obj->filename[0] == '/') &&
45332 + (obj->filename[1] == '\0')) ? 0 : 1;
45333 + bufsize = PAGE_SIZE - namelen - is_not_root;
45334 +
45335 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
45336 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45337 + return 1;
45338 +
45339 + preempt_disable();
45340 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45341 + bufsize);
45342 +
45343 + bufsize = strlen(path);
45344 +
45345 + /* if base is "/", don't append an additional slash */
45346 + if (is_not_root)
45347 + *(path + bufsize) = '/';
45348 + memcpy(path + bufsize + is_not_root, name, namelen);
45349 + *(path + bufsize + namelen + is_not_root) = '\0';
45350 +
45351 + tmp = obj->globbed;
45352 + while (tmp) {
45353 + if (!glob_match(tmp->filename, path)) {
45354 + preempt_enable();
45355 + return (tmp->mode & GR_FIND) ? 1 : 0;
45356 + }
45357 + tmp = tmp->next;
45358 + }
45359 + preempt_enable();
45360 + return (obj->mode & GR_FIND) ? 1 : 0;
45361 +}
45362 +
45363 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45364 +EXPORT_SYMBOL(gr_acl_is_enabled);
45365 +#endif
45366 +EXPORT_SYMBOL(gr_learn_resource);
45367 +EXPORT_SYMBOL(gr_set_kernel_label);
45368 +#ifdef CONFIG_SECURITY
45369 +EXPORT_SYMBOL(gr_check_user_change);
45370 +EXPORT_SYMBOL(gr_check_group_change);
45371 +#endif
45372 +
45373 diff -urNp linux-3.0.3/grsecurity/gracl_cap.c linux-3.0.3/grsecurity/gracl_cap.c
45374 --- linux-3.0.3/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45375 +++ linux-3.0.3/grsecurity/gracl_cap.c 2011-08-23 21:48:14.000000000 -0400
45376 @@ -0,0 +1,139 @@
45377 +#include <linux/kernel.h>
45378 +#include <linux/module.h>
45379 +#include <linux/sched.h>
45380 +#include <linux/gracl.h>
45381 +#include <linux/grsecurity.h>
45382 +#include <linux/grinternal.h>
45383 +
45384 +static const char *captab_log[] = {
45385 + "CAP_CHOWN",
45386 + "CAP_DAC_OVERRIDE",
45387 + "CAP_DAC_READ_SEARCH",
45388 + "CAP_FOWNER",
45389 + "CAP_FSETID",
45390 + "CAP_KILL",
45391 + "CAP_SETGID",
45392 + "CAP_SETUID",
45393 + "CAP_SETPCAP",
45394 + "CAP_LINUX_IMMUTABLE",
45395 + "CAP_NET_BIND_SERVICE",
45396 + "CAP_NET_BROADCAST",
45397 + "CAP_NET_ADMIN",
45398 + "CAP_NET_RAW",
45399 + "CAP_IPC_LOCK",
45400 + "CAP_IPC_OWNER",
45401 + "CAP_SYS_MODULE",
45402 + "CAP_SYS_RAWIO",
45403 + "CAP_SYS_CHROOT",
45404 + "CAP_SYS_PTRACE",
45405 + "CAP_SYS_PACCT",
45406 + "CAP_SYS_ADMIN",
45407 + "CAP_SYS_BOOT",
45408 + "CAP_SYS_NICE",
45409 + "CAP_SYS_RESOURCE",
45410 + "CAP_SYS_TIME",
45411 + "CAP_SYS_TTY_CONFIG",
45412 + "CAP_MKNOD",
45413 + "CAP_LEASE",
45414 + "CAP_AUDIT_WRITE",
45415 + "CAP_AUDIT_CONTROL",
45416 + "CAP_SETFCAP",
45417 + "CAP_MAC_OVERRIDE",
45418 + "CAP_MAC_ADMIN",
45419 + "CAP_SYSLOG"
45420 +};
45421 +
45422 +EXPORT_SYMBOL(gr_is_capable);
45423 +EXPORT_SYMBOL(gr_is_capable_nolog);
45424 +
45425 +int
45426 +gr_is_capable(const int cap)
45427 +{
45428 + struct task_struct *task = current;
45429 + const struct cred *cred = current_cred();
45430 + struct acl_subject_label *curracl;
45431 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45432 + kernel_cap_t cap_audit = __cap_empty_set;
45433 +
45434 + if (!gr_acl_is_enabled())
45435 + return 1;
45436 +
45437 + curracl = task->acl;
45438 +
45439 + cap_drop = curracl->cap_lower;
45440 + cap_mask = curracl->cap_mask;
45441 + cap_audit = curracl->cap_invert_audit;
45442 +
45443 + while ((curracl = curracl->parent_subject)) {
45444 + /* if the cap isn't specified in the current computed mask but is specified in the
45445 + current level subject, and is lowered in the current level subject, then add
45446 + it to the set of dropped capabilities
45447 + otherwise, add the current level subject's mask to the current computed mask
45448 + */
45449 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45450 + cap_raise(cap_mask, cap);
45451 + if (cap_raised(curracl->cap_lower, cap))
45452 + cap_raise(cap_drop, cap);
45453 + if (cap_raised(curracl->cap_invert_audit, cap))
45454 + cap_raise(cap_audit, cap);
45455 + }
45456 + }
45457 +
45458 + if (!cap_raised(cap_drop, cap)) {
45459 + if (cap_raised(cap_audit, cap))
45460 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45461 + return 1;
45462 + }
45463 +
45464 + curracl = task->acl;
45465 +
45466 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45467 + && cap_raised(cred->cap_effective, cap)) {
45468 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45469 + task->role->roletype, cred->uid,
45470 + cred->gid, task->exec_file ?
45471 + gr_to_filename(task->exec_file->f_path.dentry,
45472 + task->exec_file->f_path.mnt) : curracl->filename,
45473 + curracl->filename, 0UL,
45474 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45475 + return 1;
45476 + }
45477 +
45478 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45479 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45480 + return 0;
45481 +}
45482 +
45483 +int
45484 +gr_is_capable_nolog(const int cap)
45485 +{
45486 + struct acl_subject_label *curracl;
45487 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45488 +
45489 + if (!gr_acl_is_enabled())
45490 + return 1;
45491 +
45492 + curracl = current->acl;
45493 +
45494 + cap_drop = curracl->cap_lower;
45495 + cap_mask = curracl->cap_mask;
45496 +
45497 + while ((curracl = curracl->parent_subject)) {
45498 + /* if the cap isn't specified in the current computed mask but is specified in the
45499 + current level subject, and is lowered in the current level subject, then add
45500 + it to the set of dropped capabilities
45501 + otherwise, add the current level subject's mask to the current computed mask
45502 + */
45503 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45504 + cap_raise(cap_mask, cap);
45505 + if (cap_raised(curracl->cap_lower, cap))
45506 + cap_raise(cap_drop, cap);
45507 + }
45508 + }
45509 +
45510 + if (!cap_raised(cap_drop, cap))
45511 + return 1;
45512 +
45513 + return 0;
45514 +}
45515 +
45516 diff -urNp linux-3.0.3/grsecurity/gracl_fs.c linux-3.0.3/grsecurity/gracl_fs.c
45517 --- linux-3.0.3/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45518 +++ linux-3.0.3/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
45519 @@ -0,0 +1,431 @@
45520 +#include <linux/kernel.h>
45521 +#include <linux/sched.h>
45522 +#include <linux/types.h>
45523 +#include <linux/fs.h>
45524 +#include <linux/file.h>
45525 +#include <linux/stat.h>
45526 +#include <linux/grsecurity.h>
45527 +#include <linux/grinternal.h>
45528 +#include <linux/gracl.h>
45529 +
45530 +__u32
45531 +gr_acl_handle_hidden_file(const struct dentry * dentry,
45532 + const struct vfsmount * mnt)
45533 +{
45534 + __u32 mode;
45535 +
45536 + if (unlikely(!dentry->d_inode))
45537 + return GR_FIND;
45538 +
45539 + mode =
45540 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45541 +
45542 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45543 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45544 + return mode;
45545 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45546 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45547 + return 0;
45548 + } else if (unlikely(!(mode & GR_FIND)))
45549 + return 0;
45550 +
45551 + return GR_FIND;
45552 +}
45553 +
45554 +__u32
45555 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45556 + const int fmode)
45557 +{
45558 + __u32 reqmode = GR_FIND;
45559 + __u32 mode;
45560 +
45561 + if (unlikely(!dentry->d_inode))
45562 + return reqmode;
45563 +
45564 + if (unlikely(fmode & O_APPEND))
45565 + reqmode |= GR_APPEND;
45566 + else if (unlikely(fmode & FMODE_WRITE))
45567 + reqmode |= GR_WRITE;
45568 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45569 + reqmode |= GR_READ;
45570 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45571 + reqmode &= ~GR_READ;
45572 + mode =
45573 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45574 + mnt);
45575 +
45576 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45577 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45578 + reqmode & GR_READ ? " reading" : "",
45579 + reqmode & GR_WRITE ? " writing" : reqmode &
45580 + GR_APPEND ? " appending" : "");
45581 + return reqmode;
45582 + } else
45583 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45584 + {
45585 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45586 + reqmode & GR_READ ? " reading" : "",
45587 + reqmode & GR_WRITE ? " writing" : reqmode &
45588 + GR_APPEND ? " appending" : "");
45589 + return 0;
45590 + } else if (unlikely((mode & reqmode) != reqmode))
45591 + return 0;
45592 +
45593 + return reqmode;
45594 +}
45595 +
45596 +__u32
45597 +gr_acl_handle_creat(const struct dentry * dentry,
45598 + const struct dentry * p_dentry,
45599 + const struct vfsmount * p_mnt, const int fmode,
45600 + const int imode)
45601 +{
45602 + __u32 reqmode = GR_WRITE | GR_CREATE;
45603 + __u32 mode;
45604 +
45605 + if (unlikely(fmode & O_APPEND))
45606 + reqmode |= GR_APPEND;
45607 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45608 + reqmode |= GR_READ;
45609 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45610 + reqmode |= GR_SETID;
45611 +
45612 + mode =
45613 + gr_check_create(dentry, p_dentry, p_mnt,
45614 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45615 +
45616 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45617 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45618 + reqmode & GR_READ ? " reading" : "",
45619 + reqmode & GR_WRITE ? " writing" : reqmode &
45620 + GR_APPEND ? " appending" : "");
45621 + return reqmode;
45622 + } else
45623 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45624 + {
45625 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45626 + reqmode & GR_READ ? " reading" : "",
45627 + reqmode & GR_WRITE ? " writing" : reqmode &
45628 + GR_APPEND ? " appending" : "");
45629 + return 0;
45630 + } else if (unlikely((mode & reqmode) != reqmode))
45631 + return 0;
45632 +
45633 + return reqmode;
45634 +}
45635 +
45636 +__u32
45637 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45638 + const int fmode)
45639 +{
45640 + __u32 mode, reqmode = GR_FIND;
45641 +
45642 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45643 + reqmode |= GR_EXEC;
45644 + if (fmode & S_IWOTH)
45645 + reqmode |= GR_WRITE;
45646 + if (fmode & S_IROTH)
45647 + reqmode |= GR_READ;
45648 +
45649 + mode =
45650 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45651 + mnt);
45652 +
45653 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45654 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45655 + reqmode & GR_READ ? " reading" : "",
45656 + reqmode & GR_WRITE ? " writing" : "",
45657 + reqmode & GR_EXEC ? " executing" : "");
45658 + return reqmode;
45659 + } else
45660 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45661 + {
45662 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45663 + reqmode & GR_READ ? " reading" : "",
45664 + reqmode & GR_WRITE ? " writing" : "",
45665 + reqmode & GR_EXEC ? " executing" : "");
45666 + return 0;
45667 + } else if (unlikely((mode & reqmode) != reqmode))
45668 + return 0;
45669 +
45670 + return reqmode;
45671 +}
45672 +
45673 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45674 +{
45675 + __u32 mode;
45676 +
45677 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45678 +
45679 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45680 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45681 + return mode;
45682 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45683 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45684 + return 0;
45685 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45686 + return 0;
45687 +
45688 + return (reqmode);
45689 +}
45690 +
45691 +__u32
45692 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45693 +{
45694 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45695 +}
45696 +
45697 +__u32
45698 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45699 +{
45700 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45701 +}
45702 +
45703 +__u32
45704 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45705 +{
45706 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45707 +}
45708 +
45709 +__u32
45710 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45711 +{
45712 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45713 +}
45714 +
45715 +__u32
45716 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45717 + mode_t mode)
45718 +{
45719 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45720 + return 1;
45721 +
45722 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45723 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45724 + GR_FCHMOD_ACL_MSG);
45725 + } else {
45726 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45727 + }
45728 +}
45729 +
45730 +__u32
45731 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45732 + mode_t mode)
45733 +{
45734 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45735 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45736 + GR_CHMOD_ACL_MSG);
45737 + } else {
45738 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45739 + }
45740 +}
45741 +
45742 +__u32
45743 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45744 +{
45745 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45746 +}
45747 +
45748 +__u32
45749 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45750 +{
45751 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45752 +}
45753 +
45754 +__u32
45755 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45756 +{
45757 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45758 +}
45759 +
45760 +__u32
45761 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45762 +{
45763 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45764 + GR_UNIXCONNECT_ACL_MSG);
45765 +}
45766 +
45767 +/* hardlinks require at minimum create permission,
45768 + any additional privilege required is based on the
45769 + privilege of the file being linked to
45770 +*/
45771 +__u32
45772 +gr_acl_handle_link(const struct dentry * new_dentry,
45773 + const struct dentry * parent_dentry,
45774 + const struct vfsmount * parent_mnt,
45775 + const struct dentry * old_dentry,
45776 + const struct vfsmount * old_mnt, const char *to)
45777 +{
45778 + __u32 mode;
45779 + __u32 needmode = GR_CREATE | GR_LINK;
45780 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
45781 +
45782 + mode =
45783 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
45784 + old_mnt);
45785 +
45786 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
45787 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45788 + return mode;
45789 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45790 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45791 + return 0;
45792 + } else if (unlikely((mode & needmode) != needmode))
45793 + return 0;
45794 +
45795 + return 1;
45796 +}
45797 +
45798 +__u32
45799 +gr_acl_handle_symlink(const struct dentry * new_dentry,
45800 + const struct dentry * parent_dentry,
45801 + const struct vfsmount * parent_mnt, const char *from)
45802 +{
45803 + __u32 needmode = GR_WRITE | GR_CREATE;
45804 + __u32 mode;
45805 +
45806 + mode =
45807 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45808 + GR_CREATE | GR_AUDIT_CREATE |
45809 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
45810 +
45811 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
45812 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45813 + return mode;
45814 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45815 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45816 + return 0;
45817 + } else if (unlikely((mode & needmode) != needmode))
45818 + return 0;
45819 +
45820 + return (GR_WRITE | GR_CREATE);
45821 +}
45822 +
45823 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
45824 +{
45825 + __u32 mode;
45826 +
45827 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45828 +
45829 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45830 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
45831 + return mode;
45832 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45833 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
45834 + return 0;
45835 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45836 + return 0;
45837 +
45838 + return (reqmode);
45839 +}
45840 +
45841 +__u32
45842 +gr_acl_handle_mknod(const struct dentry * new_dentry,
45843 + const struct dentry * parent_dentry,
45844 + const struct vfsmount * parent_mnt,
45845 + const int mode)
45846 +{
45847 + __u32 reqmode = GR_WRITE | GR_CREATE;
45848 + if (unlikely(mode & (S_ISUID | S_ISGID)))
45849 + reqmode |= GR_SETID;
45850 +
45851 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45852 + reqmode, GR_MKNOD_ACL_MSG);
45853 +}
45854 +
45855 +__u32
45856 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
45857 + const struct dentry *parent_dentry,
45858 + const struct vfsmount *parent_mnt)
45859 +{
45860 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
45861 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
45862 +}
45863 +
45864 +#define RENAME_CHECK_SUCCESS(old, new) \
45865 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
45866 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
45867 +
45868 +int
45869 +gr_acl_handle_rename(struct dentry *new_dentry,
45870 + struct dentry *parent_dentry,
45871 + const struct vfsmount *parent_mnt,
45872 + struct dentry *old_dentry,
45873 + struct inode *old_parent_inode,
45874 + struct vfsmount *old_mnt, const char *newname)
45875 +{
45876 + __u32 comp1, comp2;
45877 + int error = 0;
45878 +
45879 + if (unlikely(!gr_acl_is_enabled()))
45880 + return 0;
45881 +
45882 + if (!new_dentry->d_inode) {
45883 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
45884 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
45885 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
45886 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
45887 + GR_DELETE | GR_AUDIT_DELETE |
45888 + GR_AUDIT_READ | GR_AUDIT_WRITE |
45889 + GR_SUPPRESS, old_mnt);
45890 + } else {
45891 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
45892 + GR_CREATE | GR_DELETE |
45893 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
45894 + GR_AUDIT_READ | GR_AUDIT_WRITE |
45895 + GR_SUPPRESS, parent_mnt);
45896 + comp2 =
45897 + gr_search_file(old_dentry,
45898 + GR_READ | GR_WRITE | GR_AUDIT_READ |
45899 + GR_DELETE | GR_AUDIT_DELETE |
45900 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
45901 + }
45902 +
45903 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
45904 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
45905 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
45906 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
45907 + && !(comp2 & GR_SUPPRESS)) {
45908 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
45909 + error = -EACCES;
45910 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
45911 + error = -EACCES;
45912 +
45913 + return error;
45914 +}
45915 +
45916 +void
45917 +gr_acl_handle_exit(void)
45918 +{
45919 + u16 id;
45920 + char *rolename;
45921 + struct file *exec_file;
45922 +
45923 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
45924 + !(current->role->roletype & GR_ROLE_PERSIST))) {
45925 + id = current->acl_role_id;
45926 + rolename = current->role->rolename;
45927 + gr_set_acls(1);
45928 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
45929 + }
45930 +
45931 + write_lock(&grsec_exec_file_lock);
45932 + exec_file = current->exec_file;
45933 + current->exec_file = NULL;
45934 + write_unlock(&grsec_exec_file_lock);
45935 +
45936 + if (exec_file)
45937 + fput(exec_file);
45938 +}
45939 +
45940 +int
45941 +gr_acl_handle_procpidmem(const struct task_struct *task)
45942 +{
45943 + if (unlikely(!gr_acl_is_enabled()))
45944 + return 0;
45945 +
45946 + if (task != current && task->acl->mode & GR_PROTPROCFD)
45947 + return -EACCES;
45948 +
45949 + return 0;
45950 +}
45951 diff -urNp linux-3.0.3/grsecurity/gracl_ip.c linux-3.0.3/grsecurity/gracl_ip.c
45952 --- linux-3.0.3/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
45953 +++ linux-3.0.3/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
45954 @@ -0,0 +1,381 @@
45955 +#include <linux/kernel.h>
45956 +#include <asm/uaccess.h>
45957 +#include <asm/errno.h>
45958 +#include <net/sock.h>
45959 +#include <linux/file.h>
45960 +#include <linux/fs.h>
45961 +#include <linux/net.h>
45962 +#include <linux/in.h>
45963 +#include <linux/skbuff.h>
45964 +#include <linux/ip.h>
45965 +#include <linux/udp.h>
45966 +#include <linux/types.h>
45967 +#include <linux/sched.h>
45968 +#include <linux/netdevice.h>
45969 +#include <linux/inetdevice.h>
45970 +#include <linux/gracl.h>
45971 +#include <linux/grsecurity.h>
45972 +#include <linux/grinternal.h>
45973 +
45974 +#define GR_BIND 0x01
45975 +#define GR_CONNECT 0x02
45976 +#define GR_INVERT 0x04
45977 +#define GR_BINDOVERRIDE 0x08
45978 +#define GR_CONNECTOVERRIDE 0x10
45979 +#define GR_SOCK_FAMILY 0x20
45980 +
45981 +static const char * gr_protocols[IPPROTO_MAX] = {
45982 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
45983 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
45984 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
45985 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
45986 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
45987 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
45988 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
45989 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
45990 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
45991 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
45992 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
45993 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
45994 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
45995 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
45996 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
45997 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
45998 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
45999 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46000 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46001 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46002 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46003 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46004 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46005 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46006 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46007 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46008 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46009 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46010 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46011 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46012 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46013 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46014 + };
46015 +
46016 +static const char * gr_socktypes[SOCK_MAX] = {
46017 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46018 + "unknown:7", "unknown:8", "unknown:9", "packet"
46019 + };
46020 +
46021 +static const char * gr_sockfamilies[AF_MAX+1] = {
46022 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46023 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46024 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46025 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46026 + };
46027 +
46028 +const char *
46029 +gr_proto_to_name(unsigned char proto)
46030 +{
46031 + return gr_protocols[proto];
46032 +}
46033 +
46034 +const char *
46035 +gr_socktype_to_name(unsigned char type)
46036 +{
46037 + return gr_socktypes[type];
46038 +}
46039 +
46040 +const char *
46041 +gr_sockfamily_to_name(unsigned char family)
46042 +{
46043 + return gr_sockfamilies[family];
46044 +}
46045 +
46046 +int
46047 +gr_search_socket(const int domain, const int type, const int protocol)
46048 +{
46049 + struct acl_subject_label *curr;
46050 + const struct cred *cred = current_cred();
46051 +
46052 + if (unlikely(!gr_acl_is_enabled()))
46053 + goto exit;
46054 +
46055 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
46056 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46057 + goto exit; // let the kernel handle it
46058 +
46059 + curr = current->acl;
46060 +
46061 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46062 + /* the family is allowed, if this is PF_INET allow it only if
46063 + the extra sock type/protocol checks pass */
46064 + if (domain == PF_INET)
46065 + goto inet_check;
46066 + goto exit;
46067 + } else {
46068 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46069 + __u32 fakeip = 0;
46070 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46071 + current->role->roletype, cred->uid,
46072 + cred->gid, current->exec_file ?
46073 + gr_to_filename(current->exec_file->f_path.dentry,
46074 + current->exec_file->f_path.mnt) :
46075 + curr->filename, curr->filename,
46076 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46077 + &current->signal->saved_ip);
46078 + goto exit;
46079 + }
46080 + goto exit_fail;
46081 + }
46082 +
46083 +inet_check:
46084 + /* the rest of this checking is for IPv4 only */
46085 + if (!curr->ips)
46086 + goto exit;
46087 +
46088 + if ((curr->ip_type & (1 << type)) &&
46089 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46090 + goto exit;
46091 +
46092 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46093 + /* we don't place acls on raw sockets , and sometimes
46094 + dgram/ip sockets are opened for ioctl and not
46095 + bind/connect, so we'll fake a bind learn log */
46096 + if (type == SOCK_RAW || type == SOCK_PACKET) {
46097 + __u32 fakeip = 0;
46098 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46099 + current->role->roletype, cred->uid,
46100 + cred->gid, current->exec_file ?
46101 + gr_to_filename(current->exec_file->f_path.dentry,
46102 + current->exec_file->f_path.mnt) :
46103 + curr->filename, curr->filename,
46104 + &fakeip, 0, type,
46105 + protocol, GR_CONNECT, &current->signal->saved_ip);
46106 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46107 + __u32 fakeip = 0;
46108 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46109 + current->role->roletype, cred->uid,
46110 + cred->gid, current->exec_file ?
46111 + gr_to_filename(current->exec_file->f_path.dentry,
46112 + current->exec_file->f_path.mnt) :
46113 + curr->filename, curr->filename,
46114 + &fakeip, 0, type,
46115 + protocol, GR_BIND, &current->signal->saved_ip);
46116 + }
46117 + /* we'll log when they use connect or bind */
46118 + goto exit;
46119 + }
46120 +
46121 +exit_fail:
46122 + if (domain == PF_INET)
46123 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46124 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
46125 + else
46126 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46127 + gr_socktype_to_name(type), protocol);
46128 +
46129 + return 0;
46130 +exit:
46131 + return 1;
46132 +}
46133 +
46134 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46135 +{
46136 + if ((ip->mode & mode) &&
46137 + (ip_port >= ip->low) &&
46138 + (ip_port <= ip->high) &&
46139 + ((ntohl(ip_addr) & our_netmask) ==
46140 + (ntohl(our_addr) & our_netmask))
46141 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46142 + && (ip->type & (1 << type))) {
46143 + if (ip->mode & GR_INVERT)
46144 + return 2; // specifically denied
46145 + else
46146 + return 1; // allowed
46147 + }
46148 +
46149 + return 0; // not specifically allowed, may continue parsing
46150 +}
46151 +
46152 +static int
46153 +gr_search_connectbind(const int full_mode, struct sock *sk,
46154 + struct sockaddr_in *addr, const int type)
46155 +{
46156 + char iface[IFNAMSIZ] = {0};
46157 + struct acl_subject_label *curr;
46158 + struct acl_ip_label *ip;
46159 + struct inet_sock *isk;
46160 + struct net_device *dev;
46161 + struct in_device *idev;
46162 + unsigned long i;
46163 + int ret;
46164 + int mode = full_mode & (GR_BIND | GR_CONNECT);
46165 + __u32 ip_addr = 0;
46166 + __u32 our_addr;
46167 + __u32 our_netmask;
46168 + char *p;
46169 + __u16 ip_port = 0;
46170 + const struct cred *cred = current_cred();
46171 +
46172 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46173 + return 0;
46174 +
46175 + curr = current->acl;
46176 + isk = inet_sk(sk);
46177 +
46178 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46179 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46180 + addr->sin_addr.s_addr = curr->inaddr_any_override;
46181 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46182 + struct sockaddr_in saddr;
46183 + int err;
46184 +
46185 + saddr.sin_family = AF_INET;
46186 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
46187 + saddr.sin_port = isk->inet_sport;
46188 +
46189 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46190 + if (err)
46191 + return err;
46192 +
46193 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46194 + if (err)
46195 + return err;
46196 + }
46197 +
46198 + if (!curr->ips)
46199 + return 0;
46200 +
46201 + ip_addr = addr->sin_addr.s_addr;
46202 + ip_port = ntohs(addr->sin_port);
46203 +
46204 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46205 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46206 + current->role->roletype, cred->uid,
46207 + cred->gid, current->exec_file ?
46208 + gr_to_filename(current->exec_file->f_path.dentry,
46209 + current->exec_file->f_path.mnt) :
46210 + curr->filename, curr->filename,
46211 + &ip_addr, ip_port, type,
46212 + sk->sk_protocol, mode, &current->signal->saved_ip);
46213 + return 0;
46214 + }
46215 +
46216 + for (i = 0; i < curr->ip_num; i++) {
46217 + ip = *(curr->ips + i);
46218 + if (ip->iface != NULL) {
46219 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
46220 + p = strchr(iface, ':');
46221 + if (p != NULL)
46222 + *p = '\0';
46223 + dev = dev_get_by_name(sock_net(sk), iface);
46224 + if (dev == NULL)
46225 + continue;
46226 + idev = in_dev_get(dev);
46227 + if (idev == NULL) {
46228 + dev_put(dev);
46229 + continue;
46230 + }
46231 + rcu_read_lock();
46232 + for_ifa(idev) {
46233 + if (!strcmp(ip->iface, ifa->ifa_label)) {
46234 + our_addr = ifa->ifa_address;
46235 + our_netmask = 0xffffffff;
46236 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46237 + if (ret == 1) {
46238 + rcu_read_unlock();
46239 + in_dev_put(idev);
46240 + dev_put(dev);
46241 + return 0;
46242 + } else if (ret == 2) {
46243 + rcu_read_unlock();
46244 + in_dev_put(idev);
46245 + dev_put(dev);
46246 + goto denied;
46247 + }
46248 + }
46249 + } endfor_ifa(idev);
46250 + rcu_read_unlock();
46251 + in_dev_put(idev);
46252 + dev_put(dev);
46253 + } else {
46254 + our_addr = ip->addr;
46255 + our_netmask = ip->netmask;
46256 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46257 + if (ret == 1)
46258 + return 0;
46259 + else if (ret == 2)
46260 + goto denied;
46261 + }
46262 + }
46263 +
46264 +denied:
46265 + if (mode == GR_BIND)
46266 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46267 + else if (mode == GR_CONNECT)
46268 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46269 +
46270 + return -EACCES;
46271 +}
46272 +
46273 +int
46274 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46275 +{
46276 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46277 +}
46278 +
46279 +int
46280 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46281 +{
46282 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46283 +}
46284 +
46285 +int gr_search_listen(struct socket *sock)
46286 +{
46287 + struct sock *sk = sock->sk;
46288 + struct sockaddr_in addr;
46289 +
46290 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46291 + addr.sin_port = inet_sk(sk)->inet_sport;
46292 +
46293 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46294 +}
46295 +
46296 +int gr_search_accept(struct socket *sock)
46297 +{
46298 + struct sock *sk = sock->sk;
46299 + struct sockaddr_in addr;
46300 +
46301 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46302 + addr.sin_port = inet_sk(sk)->inet_sport;
46303 +
46304 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46305 +}
46306 +
46307 +int
46308 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46309 +{
46310 + if (addr)
46311 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46312 + else {
46313 + struct sockaddr_in sin;
46314 + const struct inet_sock *inet = inet_sk(sk);
46315 +
46316 + sin.sin_addr.s_addr = inet->inet_daddr;
46317 + sin.sin_port = inet->inet_dport;
46318 +
46319 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46320 + }
46321 +}
46322 +
46323 +int
46324 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46325 +{
46326 + struct sockaddr_in sin;
46327 +
46328 + if (unlikely(skb->len < sizeof (struct udphdr)))
46329 + return 0; // skip this packet
46330 +
46331 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46332 + sin.sin_port = udp_hdr(skb)->source;
46333 +
46334 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46335 +}
46336 diff -urNp linux-3.0.3/grsecurity/gracl_learn.c linux-3.0.3/grsecurity/gracl_learn.c
46337 --- linux-3.0.3/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46338 +++ linux-3.0.3/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
46339 @@ -0,0 +1,207 @@
46340 +#include <linux/kernel.h>
46341 +#include <linux/mm.h>
46342 +#include <linux/sched.h>
46343 +#include <linux/poll.h>
46344 +#include <linux/string.h>
46345 +#include <linux/file.h>
46346 +#include <linux/types.h>
46347 +#include <linux/vmalloc.h>
46348 +#include <linux/grinternal.h>
46349 +
46350 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46351 + size_t count, loff_t *ppos);
46352 +extern int gr_acl_is_enabled(void);
46353 +
46354 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46355 +static int gr_learn_attached;
46356 +
46357 +/* use a 512k buffer */
46358 +#define LEARN_BUFFER_SIZE (512 * 1024)
46359 +
46360 +static DEFINE_SPINLOCK(gr_learn_lock);
46361 +static DEFINE_MUTEX(gr_learn_user_mutex);
46362 +
46363 +/* we need to maintain two buffers, so that the kernel context of grlearn
46364 + uses a semaphore around the userspace copying, and the other kernel contexts
46365 + use a spinlock when copying into the buffer, since they cannot sleep
46366 +*/
46367 +static char *learn_buffer;
46368 +static char *learn_buffer_user;
46369 +static int learn_buffer_len;
46370 +static int learn_buffer_user_len;
46371 +
46372 +static ssize_t
46373 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46374 +{
46375 + DECLARE_WAITQUEUE(wait, current);
46376 + ssize_t retval = 0;
46377 +
46378 + add_wait_queue(&learn_wait, &wait);
46379 + set_current_state(TASK_INTERRUPTIBLE);
46380 + do {
46381 + mutex_lock(&gr_learn_user_mutex);
46382 + spin_lock(&gr_learn_lock);
46383 + if (learn_buffer_len)
46384 + break;
46385 + spin_unlock(&gr_learn_lock);
46386 + mutex_unlock(&gr_learn_user_mutex);
46387 + if (file->f_flags & O_NONBLOCK) {
46388 + retval = -EAGAIN;
46389 + goto out;
46390 + }
46391 + if (signal_pending(current)) {
46392 + retval = -ERESTARTSYS;
46393 + goto out;
46394 + }
46395 +
46396 + schedule();
46397 + } while (1);
46398 +
46399 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46400 + learn_buffer_user_len = learn_buffer_len;
46401 + retval = learn_buffer_len;
46402 + learn_buffer_len = 0;
46403 +
46404 + spin_unlock(&gr_learn_lock);
46405 +
46406 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46407 + retval = -EFAULT;
46408 +
46409 + mutex_unlock(&gr_learn_user_mutex);
46410 +out:
46411 + set_current_state(TASK_RUNNING);
46412 + remove_wait_queue(&learn_wait, &wait);
46413 + return retval;
46414 +}
46415 +
46416 +static unsigned int
46417 +poll_learn(struct file * file, poll_table * wait)
46418 +{
46419 + poll_wait(file, &learn_wait, wait);
46420 +
46421 + if (learn_buffer_len)
46422 + return (POLLIN | POLLRDNORM);
46423 +
46424 + return 0;
46425 +}
46426 +
46427 +void
46428 +gr_clear_learn_entries(void)
46429 +{
46430 + char *tmp;
46431 +
46432 + mutex_lock(&gr_learn_user_mutex);
46433 + spin_lock(&gr_learn_lock);
46434 + tmp = learn_buffer;
46435 + learn_buffer = NULL;
46436 + spin_unlock(&gr_learn_lock);
46437 + if (tmp)
46438 + vfree(tmp);
46439 + if (learn_buffer_user != NULL) {
46440 + vfree(learn_buffer_user);
46441 + learn_buffer_user = NULL;
46442 + }
46443 + learn_buffer_len = 0;
46444 + mutex_unlock(&gr_learn_user_mutex);
46445 +
46446 + return;
46447 +}
46448 +
46449 +void
46450 +gr_add_learn_entry(const char *fmt, ...)
46451 +{
46452 + va_list args;
46453 + unsigned int len;
46454 +
46455 + if (!gr_learn_attached)
46456 + return;
46457 +
46458 + spin_lock(&gr_learn_lock);
46459 +
46460 + /* leave a gap at the end so we know when it's "full" but don't have to
46461 + compute the exact length of the string we're trying to append
46462 + */
46463 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46464 + spin_unlock(&gr_learn_lock);
46465 + wake_up_interruptible(&learn_wait);
46466 + return;
46467 + }
46468 + if (learn_buffer == NULL) {
46469 + spin_unlock(&gr_learn_lock);
46470 + return;
46471 + }
46472 +
46473 + va_start(args, fmt);
46474 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46475 + va_end(args);
46476 +
46477 + learn_buffer_len += len + 1;
46478 +
46479 + spin_unlock(&gr_learn_lock);
46480 + wake_up_interruptible(&learn_wait);
46481 +
46482 + return;
46483 +}
46484 +
46485 +static int
46486 +open_learn(struct inode *inode, struct file *file)
46487 +{
46488 + if (file->f_mode & FMODE_READ && gr_learn_attached)
46489 + return -EBUSY;
46490 + if (file->f_mode & FMODE_READ) {
46491 + int retval = 0;
46492 + mutex_lock(&gr_learn_user_mutex);
46493 + if (learn_buffer == NULL)
46494 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46495 + if (learn_buffer_user == NULL)
46496 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46497 + if (learn_buffer == NULL) {
46498 + retval = -ENOMEM;
46499 + goto out_error;
46500 + }
46501 + if (learn_buffer_user == NULL) {
46502 + retval = -ENOMEM;
46503 + goto out_error;
46504 + }
46505 + learn_buffer_len = 0;
46506 + learn_buffer_user_len = 0;
46507 + gr_learn_attached = 1;
46508 +out_error:
46509 + mutex_unlock(&gr_learn_user_mutex);
46510 + return retval;
46511 + }
46512 + return 0;
46513 +}
46514 +
46515 +static int
46516 +close_learn(struct inode *inode, struct file *file)
46517 +{
46518 + if (file->f_mode & FMODE_READ) {
46519 + char *tmp = NULL;
46520 + mutex_lock(&gr_learn_user_mutex);
46521 + spin_lock(&gr_learn_lock);
46522 + tmp = learn_buffer;
46523 + learn_buffer = NULL;
46524 + spin_unlock(&gr_learn_lock);
46525 + if (tmp)
46526 + vfree(tmp);
46527 + if (learn_buffer_user != NULL) {
46528 + vfree(learn_buffer_user);
46529 + learn_buffer_user = NULL;
46530 + }
46531 + learn_buffer_len = 0;
46532 + learn_buffer_user_len = 0;
46533 + gr_learn_attached = 0;
46534 + mutex_unlock(&gr_learn_user_mutex);
46535 + }
46536 +
46537 + return 0;
46538 +}
46539 +
46540 +const struct file_operations grsec_fops = {
46541 + .read = read_learn,
46542 + .write = write_grsec_handler,
46543 + .open = open_learn,
46544 + .release = close_learn,
46545 + .poll = poll_learn,
46546 +};
46547 diff -urNp linux-3.0.3/grsecurity/gracl_res.c linux-3.0.3/grsecurity/gracl_res.c
46548 --- linux-3.0.3/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46549 +++ linux-3.0.3/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
46550 @@ -0,0 +1,68 @@
46551 +#include <linux/kernel.h>
46552 +#include <linux/sched.h>
46553 +#include <linux/gracl.h>
46554 +#include <linux/grinternal.h>
46555 +
46556 +static const char *restab_log[] = {
46557 + [RLIMIT_CPU] = "RLIMIT_CPU",
46558 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46559 + [RLIMIT_DATA] = "RLIMIT_DATA",
46560 + [RLIMIT_STACK] = "RLIMIT_STACK",
46561 + [RLIMIT_CORE] = "RLIMIT_CORE",
46562 + [RLIMIT_RSS] = "RLIMIT_RSS",
46563 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
46564 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46565 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46566 + [RLIMIT_AS] = "RLIMIT_AS",
46567 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46568 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46569 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46570 + [RLIMIT_NICE] = "RLIMIT_NICE",
46571 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46572 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46573 + [GR_CRASH_RES] = "RLIMIT_CRASH"
46574 +};
46575 +
46576 +void
46577 +gr_log_resource(const struct task_struct *task,
46578 + const int res, const unsigned long wanted, const int gt)
46579 +{
46580 + const struct cred *cred;
46581 + unsigned long rlim;
46582 +
46583 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
46584 + return;
46585 +
46586 + // not yet supported resource
46587 + if (unlikely(!restab_log[res]))
46588 + return;
46589 +
46590 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46591 + rlim = task_rlimit_max(task, res);
46592 + else
46593 + rlim = task_rlimit(task, res);
46594 +
46595 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46596 + return;
46597 +
46598 + rcu_read_lock();
46599 + cred = __task_cred(task);
46600 +
46601 + if (res == RLIMIT_NPROC &&
46602 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46603 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46604 + goto out_rcu_unlock;
46605 + else if (res == RLIMIT_MEMLOCK &&
46606 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46607 + goto out_rcu_unlock;
46608 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46609 + goto out_rcu_unlock;
46610 + rcu_read_unlock();
46611 +
46612 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46613 +
46614 + return;
46615 +out_rcu_unlock:
46616 + rcu_read_unlock();
46617 + return;
46618 +}
46619 diff -urNp linux-3.0.3/grsecurity/gracl_segv.c linux-3.0.3/grsecurity/gracl_segv.c
46620 --- linux-3.0.3/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46621 +++ linux-3.0.3/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
46622 @@ -0,0 +1,299 @@
46623 +#include <linux/kernel.h>
46624 +#include <linux/mm.h>
46625 +#include <asm/uaccess.h>
46626 +#include <asm/errno.h>
46627 +#include <asm/mman.h>
46628 +#include <net/sock.h>
46629 +#include <linux/file.h>
46630 +#include <linux/fs.h>
46631 +#include <linux/net.h>
46632 +#include <linux/in.h>
46633 +#include <linux/slab.h>
46634 +#include <linux/types.h>
46635 +#include <linux/sched.h>
46636 +#include <linux/timer.h>
46637 +#include <linux/gracl.h>
46638 +#include <linux/grsecurity.h>
46639 +#include <linux/grinternal.h>
46640 +
46641 +static struct crash_uid *uid_set;
46642 +static unsigned short uid_used;
46643 +static DEFINE_SPINLOCK(gr_uid_lock);
46644 +extern rwlock_t gr_inode_lock;
46645 +extern struct acl_subject_label *
46646 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46647 + struct acl_role_label *role);
46648 +
46649 +#ifdef CONFIG_BTRFS_FS
46650 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46651 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46652 +#endif
46653 +
46654 +static inline dev_t __get_dev(const struct dentry *dentry)
46655 +{
46656 +#ifdef CONFIG_BTRFS_FS
46657 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46658 + return get_btrfs_dev_from_inode(dentry->d_inode);
46659 + else
46660 +#endif
46661 + return dentry->d_inode->i_sb->s_dev;
46662 +}
46663 +
46664 +int
46665 +gr_init_uidset(void)
46666 +{
46667 + uid_set =
46668 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46669 + uid_used = 0;
46670 +
46671 + return uid_set ? 1 : 0;
46672 +}
46673 +
46674 +void
46675 +gr_free_uidset(void)
46676 +{
46677 + if (uid_set)
46678 + kfree(uid_set);
46679 +
46680 + return;
46681 +}
46682 +
46683 +int
46684 +gr_find_uid(const uid_t uid)
46685 +{
46686 + struct crash_uid *tmp = uid_set;
46687 + uid_t buid;
46688 + int low = 0, high = uid_used - 1, mid;
46689 +
46690 + while (high >= low) {
46691 + mid = (low + high) >> 1;
46692 + buid = tmp[mid].uid;
46693 + if (buid == uid)
46694 + return mid;
46695 + if (buid > uid)
46696 + high = mid - 1;
46697 + if (buid < uid)
46698 + low = mid + 1;
46699 + }
46700 +
46701 + return -1;
46702 +}
46703 +
46704 +static __inline__ void
46705 +gr_insertsort(void)
46706 +{
46707 + unsigned short i, j;
46708 + struct crash_uid index;
46709 +
46710 + for (i = 1; i < uid_used; i++) {
46711 + index = uid_set[i];
46712 + j = i;
46713 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46714 + uid_set[j] = uid_set[j - 1];
46715 + j--;
46716 + }
46717 + uid_set[j] = index;
46718 + }
46719 +
46720 + return;
46721 +}
46722 +
46723 +static __inline__ void
46724 +gr_insert_uid(const uid_t uid, const unsigned long expires)
46725 +{
46726 + int loc;
46727 +
46728 + if (uid_used == GR_UIDTABLE_MAX)
46729 + return;
46730 +
46731 + loc = gr_find_uid(uid);
46732 +
46733 + if (loc >= 0) {
46734 + uid_set[loc].expires = expires;
46735 + return;
46736 + }
46737 +
46738 + uid_set[uid_used].uid = uid;
46739 + uid_set[uid_used].expires = expires;
46740 + uid_used++;
46741 +
46742 + gr_insertsort();
46743 +
46744 + return;
46745 +}
46746 +
46747 +void
46748 +gr_remove_uid(const unsigned short loc)
46749 +{
46750 + unsigned short i;
46751 +
46752 + for (i = loc + 1; i < uid_used; i++)
46753 + uid_set[i - 1] = uid_set[i];
46754 +
46755 + uid_used--;
46756 +
46757 + return;
46758 +}
46759 +
46760 +int
46761 +gr_check_crash_uid(const uid_t uid)
46762 +{
46763 + int loc;
46764 + int ret = 0;
46765 +
46766 + if (unlikely(!gr_acl_is_enabled()))
46767 + return 0;
46768 +
46769 + spin_lock(&gr_uid_lock);
46770 + loc = gr_find_uid(uid);
46771 +
46772 + if (loc < 0)
46773 + goto out_unlock;
46774 +
46775 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
46776 + gr_remove_uid(loc);
46777 + else
46778 + ret = 1;
46779 +
46780 +out_unlock:
46781 + spin_unlock(&gr_uid_lock);
46782 + return ret;
46783 +}
46784 +
46785 +static __inline__ int
46786 +proc_is_setxid(const struct cred *cred)
46787 +{
46788 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
46789 + cred->uid != cred->fsuid)
46790 + return 1;
46791 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
46792 + cred->gid != cred->fsgid)
46793 + return 1;
46794 +
46795 + return 0;
46796 +}
46797 +
46798 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
46799 +
46800 +void
46801 +gr_handle_crash(struct task_struct *task, const int sig)
46802 +{
46803 + struct acl_subject_label *curr;
46804 + struct acl_subject_label *curr2;
46805 + struct task_struct *tsk, *tsk2;
46806 + const struct cred *cred;
46807 + const struct cred *cred2;
46808 +
46809 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
46810 + return;
46811 +
46812 + if (unlikely(!gr_acl_is_enabled()))
46813 + return;
46814 +
46815 + curr = task->acl;
46816 +
46817 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
46818 + return;
46819 +
46820 + if (time_before_eq(curr->expires, get_seconds())) {
46821 + curr->expires = 0;
46822 + curr->crashes = 0;
46823 + }
46824 +
46825 + curr->crashes++;
46826 +
46827 + if (!curr->expires)
46828 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
46829 +
46830 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46831 + time_after(curr->expires, get_seconds())) {
46832 + rcu_read_lock();
46833 + cred = __task_cred(task);
46834 + if (cred->uid && proc_is_setxid(cred)) {
46835 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46836 + spin_lock(&gr_uid_lock);
46837 + gr_insert_uid(cred->uid, curr->expires);
46838 + spin_unlock(&gr_uid_lock);
46839 + curr->expires = 0;
46840 + curr->crashes = 0;
46841 + read_lock(&tasklist_lock);
46842 + do_each_thread(tsk2, tsk) {
46843 + cred2 = __task_cred(tsk);
46844 + if (tsk != task && cred2->uid == cred->uid)
46845 + gr_fake_force_sig(SIGKILL, tsk);
46846 + } while_each_thread(tsk2, tsk);
46847 + read_unlock(&tasklist_lock);
46848 + } else {
46849 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46850 + read_lock(&tasklist_lock);
46851 + do_each_thread(tsk2, tsk) {
46852 + if (likely(tsk != task)) {
46853 + curr2 = tsk->acl;
46854 +
46855 + if (curr2->device == curr->device &&
46856 + curr2->inode == curr->inode)
46857 + gr_fake_force_sig(SIGKILL, tsk);
46858 + }
46859 + } while_each_thread(tsk2, tsk);
46860 + read_unlock(&tasklist_lock);
46861 + }
46862 + rcu_read_unlock();
46863 + }
46864 +
46865 + return;
46866 +}
46867 +
46868 +int
46869 +gr_check_crash_exec(const struct file *filp)
46870 +{
46871 + struct acl_subject_label *curr;
46872 +
46873 + if (unlikely(!gr_acl_is_enabled()))
46874 + return 0;
46875 +
46876 + read_lock(&gr_inode_lock);
46877 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
46878 + __get_dev(filp->f_path.dentry),
46879 + current->role);
46880 + read_unlock(&gr_inode_lock);
46881 +
46882 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
46883 + (!curr->crashes && !curr->expires))
46884 + return 0;
46885 +
46886 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46887 + time_after(curr->expires, get_seconds()))
46888 + return 1;
46889 + else if (time_before_eq(curr->expires, get_seconds())) {
46890 + curr->crashes = 0;
46891 + curr->expires = 0;
46892 + }
46893 +
46894 + return 0;
46895 +}
46896 +
46897 +void
46898 +gr_handle_alertkill(struct task_struct *task)
46899 +{
46900 + struct acl_subject_label *curracl;
46901 + __u32 curr_ip;
46902 + struct task_struct *p, *p2;
46903 +
46904 + if (unlikely(!gr_acl_is_enabled()))
46905 + return;
46906 +
46907 + curracl = task->acl;
46908 + curr_ip = task->signal->curr_ip;
46909 +
46910 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
46911 + read_lock(&tasklist_lock);
46912 + do_each_thread(p2, p) {
46913 + if (p->signal->curr_ip == curr_ip)
46914 + gr_fake_force_sig(SIGKILL, p);
46915 + } while_each_thread(p2, p);
46916 + read_unlock(&tasklist_lock);
46917 + } else if (curracl->mode & GR_KILLPROC)
46918 + gr_fake_force_sig(SIGKILL, task);
46919 +
46920 + return;
46921 +}
46922 diff -urNp linux-3.0.3/grsecurity/gracl_shm.c linux-3.0.3/grsecurity/gracl_shm.c
46923 --- linux-3.0.3/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
46924 +++ linux-3.0.3/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
46925 @@ -0,0 +1,40 @@
46926 +#include <linux/kernel.h>
46927 +#include <linux/mm.h>
46928 +#include <linux/sched.h>
46929 +#include <linux/file.h>
46930 +#include <linux/ipc.h>
46931 +#include <linux/gracl.h>
46932 +#include <linux/grsecurity.h>
46933 +#include <linux/grinternal.h>
46934 +
46935 +int
46936 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
46937 + const time_t shm_createtime, const uid_t cuid, const int shmid)
46938 +{
46939 + struct task_struct *task;
46940 +
46941 + if (!gr_acl_is_enabled())
46942 + return 1;
46943 +
46944 + rcu_read_lock();
46945 + read_lock(&tasklist_lock);
46946 +
46947 + task = find_task_by_vpid(shm_cprid);
46948 +
46949 + if (unlikely(!task))
46950 + task = find_task_by_vpid(shm_lapid);
46951 +
46952 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
46953 + (task->pid == shm_lapid)) &&
46954 + (task->acl->mode & GR_PROTSHM) &&
46955 + (task->acl != current->acl))) {
46956 + read_unlock(&tasklist_lock);
46957 + rcu_read_unlock();
46958 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
46959 + return 0;
46960 + }
46961 + read_unlock(&tasklist_lock);
46962 + rcu_read_unlock();
46963 +
46964 + return 1;
46965 +}
46966 diff -urNp linux-3.0.3/grsecurity/grsec_chdir.c linux-3.0.3/grsecurity/grsec_chdir.c
46967 --- linux-3.0.3/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
46968 +++ linux-3.0.3/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
46969 @@ -0,0 +1,19 @@
46970 +#include <linux/kernel.h>
46971 +#include <linux/sched.h>
46972 +#include <linux/fs.h>
46973 +#include <linux/file.h>
46974 +#include <linux/grsecurity.h>
46975 +#include <linux/grinternal.h>
46976 +
46977 +void
46978 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
46979 +{
46980 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
46981 + if ((grsec_enable_chdir && grsec_enable_group &&
46982 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
46983 + !grsec_enable_group)) {
46984 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
46985 + }
46986 +#endif
46987 + return;
46988 +}
46989 diff -urNp linux-3.0.3/grsecurity/grsec_chroot.c linux-3.0.3/grsecurity/grsec_chroot.c
46990 --- linux-3.0.3/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
46991 +++ linux-3.0.3/grsecurity/grsec_chroot.c 2011-08-23 21:48:14.000000000 -0400
46992 @@ -0,0 +1,349 @@
46993 +#include <linux/kernel.h>
46994 +#include <linux/module.h>
46995 +#include <linux/sched.h>
46996 +#include <linux/file.h>
46997 +#include <linux/fs.h>
46998 +#include <linux/mount.h>
46999 +#include <linux/types.h>
47000 +#include <linux/pid_namespace.h>
47001 +#include <linux/grsecurity.h>
47002 +#include <linux/grinternal.h>
47003 +
47004 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47005 +{
47006 +#ifdef CONFIG_GRKERNSEC
47007 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47008 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47009 + task->gr_is_chrooted = 1;
47010 + else
47011 + task->gr_is_chrooted = 0;
47012 +
47013 + task->gr_chroot_dentry = path->dentry;
47014 +#endif
47015 + return;
47016 +}
47017 +
47018 +void gr_clear_chroot_entries(struct task_struct *task)
47019 +{
47020 +#ifdef CONFIG_GRKERNSEC
47021 + task->gr_is_chrooted = 0;
47022 + task->gr_chroot_dentry = NULL;
47023 +#endif
47024 + return;
47025 +}
47026 +
47027 +int
47028 +gr_handle_chroot_unix(const pid_t pid)
47029 +{
47030 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47031 + struct task_struct *p;
47032 +
47033 + if (unlikely(!grsec_enable_chroot_unix))
47034 + return 1;
47035 +
47036 + if (likely(!proc_is_chrooted(current)))
47037 + return 1;
47038 +
47039 + rcu_read_lock();
47040 + read_lock(&tasklist_lock);
47041 + p = find_task_by_vpid_unrestricted(pid);
47042 + if (unlikely(p && !have_same_root(current, p))) {
47043 + read_unlock(&tasklist_lock);
47044 + rcu_read_unlock();
47045 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47046 + return 0;
47047 + }
47048 + read_unlock(&tasklist_lock);
47049 + rcu_read_unlock();
47050 +#endif
47051 + return 1;
47052 +}
47053 +
47054 +int
47055 +gr_handle_chroot_nice(void)
47056 +{
47057 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47058 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47059 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47060 + return -EPERM;
47061 + }
47062 +#endif
47063 + return 0;
47064 +}
47065 +
47066 +int
47067 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47068 +{
47069 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47070 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47071 + && proc_is_chrooted(current)) {
47072 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47073 + return -EACCES;
47074 + }
47075 +#endif
47076 + return 0;
47077 +}
47078 +
47079 +int
47080 +gr_handle_chroot_rawio(const struct inode *inode)
47081 +{
47082 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47083 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47084 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47085 + return 1;
47086 +#endif
47087 + return 0;
47088 +}
47089 +
47090 +int
47091 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47092 +{
47093 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47094 + struct task_struct *p;
47095 + int ret = 0;
47096 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47097 + return ret;
47098 +
47099 + read_lock(&tasklist_lock);
47100 + do_each_pid_task(pid, type, p) {
47101 + if (!have_same_root(current, p)) {
47102 + ret = 1;
47103 + goto out;
47104 + }
47105 + } while_each_pid_task(pid, type, p);
47106 +out:
47107 + read_unlock(&tasklist_lock);
47108 + return ret;
47109 +#endif
47110 + return 0;
47111 +}
47112 +
47113 +int
47114 +gr_pid_is_chrooted(struct task_struct *p)
47115 +{
47116 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47117 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47118 + return 0;
47119 +
47120 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47121 + !have_same_root(current, p)) {
47122 + return 1;
47123 + }
47124 +#endif
47125 + return 0;
47126 +}
47127 +
47128 +EXPORT_SYMBOL(gr_pid_is_chrooted);
47129 +
47130 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47131 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47132 +{
47133 + struct path path, currentroot;
47134 + int ret = 0;
47135 +
47136 + path.dentry = (struct dentry *)u_dentry;
47137 + path.mnt = (struct vfsmount *)u_mnt;
47138 + get_fs_root(current->fs, &currentroot);
47139 + if (path_is_under(&path, &currentroot))
47140 + ret = 1;
47141 + path_put(&currentroot);
47142 +
47143 + return ret;
47144 +}
47145 +#endif
47146 +
47147 +int
47148 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47149 +{
47150 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47151 + if (!grsec_enable_chroot_fchdir)
47152 + return 1;
47153 +
47154 + if (!proc_is_chrooted(current))
47155 + return 1;
47156 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47157 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47158 + return 0;
47159 + }
47160 +#endif
47161 + return 1;
47162 +}
47163 +
47164 +int
47165 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47166 + const time_t shm_createtime)
47167 +{
47168 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47169 + struct task_struct *p;
47170 + time_t starttime;
47171 +
47172 + if (unlikely(!grsec_enable_chroot_shmat))
47173 + return 1;
47174 +
47175 + if (likely(!proc_is_chrooted(current)))
47176 + return 1;
47177 +
47178 + rcu_read_lock();
47179 + read_lock(&tasklist_lock);
47180 +
47181 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47182 + starttime = p->start_time.tv_sec;
47183 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47184 + if (have_same_root(current, p)) {
47185 + goto allow;
47186 + } else {
47187 + read_unlock(&tasklist_lock);
47188 + rcu_read_unlock();
47189 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47190 + return 0;
47191 + }
47192 + }
47193 + /* creator exited, pid reuse, fall through to next check */
47194 + }
47195 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47196 + if (unlikely(!have_same_root(current, p))) {
47197 + read_unlock(&tasklist_lock);
47198 + rcu_read_unlock();
47199 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47200 + return 0;
47201 + }
47202 + }
47203 +
47204 +allow:
47205 + read_unlock(&tasklist_lock);
47206 + rcu_read_unlock();
47207 +#endif
47208 + return 1;
47209 +}
47210 +
47211 +void
47212 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47213 +{
47214 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47215 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47216 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47217 +#endif
47218 + return;
47219 +}
47220 +
47221 +int
47222 +gr_handle_chroot_mknod(const struct dentry *dentry,
47223 + const struct vfsmount *mnt, const int mode)
47224 +{
47225 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47226 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47227 + proc_is_chrooted(current)) {
47228 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47229 + return -EPERM;
47230 + }
47231 +#endif
47232 + return 0;
47233 +}
47234 +
47235 +int
47236 +gr_handle_chroot_mount(const struct dentry *dentry,
47237 + const struct vfsmount *mnt, const char *dev_name)
47238 +{
47239 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47240 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47241 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47242 + return -EPERM;
47243 + }
47244 +#endif
47245 + return 0;
47246 +}
47247 +
47248 +int
47249 +gr_handle_chroot_pivot(void)
47250 +{
47251 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47252 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47253 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47254 + return -EPERM;
47255 + }
47256 +#endif
47257 + return 0;
47258 +}
47259 +
47260 +int
47261 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47262 +{
47263 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47264 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47265 + !gr_is_outside_chroot(dentry, mnt)) {
47266 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47267 + return -EPERM;
47268 + }
47269 +#endif
47270 + return 0;
47271 +}
47272 +
47273 +int
47274 +gr_handle_chroot_caps(struct path *path)
47275 +{
47276 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47277 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47278 + (init_task.fs->root.dentry != path->dentry) &&
47279 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47280 +
47281 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47282 + const struct cred *old = current_cred();
47283 + struct cred *new = prepare_creds();
47284 + if (new == NULL)
47285 + return 1;
47286 +
47287 + new->cap_permitted = cap_drop(old->cap_permitted,
47288 + chroot_caps);
47289 + new->cap_inheritable = cap_drop(old->cap_inheritable,
47290 + chroot_caps);
47291 + new->cap_effective = cap_drop(old->cap_effective,
47292 + chroot_caps);
47293 +
47294 + commit_creds(new);
47295 +
47296 + return 0;
47297 + }
47298 +#endif
47299 + return 0;
47300 +}
47301 +
47302 +int
47303 +gr_handle_chroot_sysctl(const int op)
47304 +{
47305 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47306 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47307 + proc_is_chrooted(current))
47308 + return -EACCES;
47309 +#endif
47310 + return 0;
47311 +}
47312 +
47313 +void
47314 +gr_handle_chroot_chdir(struct path *path)
47315 +{
47316 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47317 + if (grsec_enable_chroot_chdir)
47318 + set_fs_pwd(current->fs, path);
47319 +#endif
47320 + return;
47321 +}
47322 +
47323 +int
47324 +gr_handle_chroot_chmod(const struct dentry *dentry,
47325 + const struct vfsmount *mnt, const int mode)
47326 +{
47327 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47328 + /* allow chmod +s on directories, but not files */
47329 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47330 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47331 + proc_is_chrooted(current)) {
47332 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47333 + return -EPERM;
47334 + }
47335 +#endif
47336 + return 0;
47337 +}
47338 +
47339 +#ifdef CONFIG_SECURITY
47340 +EXPORT_SYMBOL(gr_handle_chroot_caps);
47341 +#endif
47342 diff -urNp linux-3.0.3/grsecurity/grsec_disabled.c linux-3.0.3/grsecurity/grsec_disabled.c
47343 --- linux-3.0.3/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47344 +++ linux-3.0.3/grsecurity/grsec_disabled.c 2011-08-23 21:48:14.000000000 -0400
47345 @@ -0,0 +1,447 @@
47346 +#include <linux/kernel.h>
47347 +#include <linux/module.h>
47348 +#include <linux/sched.h>
47349 +#include <linux/file.h>
47350 +#include <linux/fs.h>
47351 +#include <linux/kdev_t.h>
47352 +#include <linux/net.h>
47353 +#include <linux/in.h>
47354 +#include <linux/ip.h>
47355 +#include <linux/skbuff.h>
47356 +#include <linux/sysctl.h>
47357 +
47358 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47359 +void
47360 +pax_set_initial_flags(struct linux_binprm *bprm)
47361 +{
47362 + return;
47363 +}
47364 +#endif
47365 +
47366 +#ifdef CONFIG_SYSCTL
47367 +__u32
47368 +gr_handle_sysctl(const struct ctl_table * table, const int op)
47369 +{
47370 + return 0;
47371 +}
47372 +#endif
47373 +
47374 +#ifdef CONFIG_TASKSTATS
47375 +int gr_is_taskstats_denied(int pid)
47376 +{
47377 + return 0;
47378 +}
47379 +#endif
47380 +
47381 +int
47382 +gr_acl_is_enabled(void)
47383 +{
47384 + return 0;
47385 +}
47386 +
47387 +int
47388 +gr_handle_rawio(const struct inode *inode)
47389 +{
47390 + return 0;
47391 +}
47392 +
47393 +void
47394 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47395 +{
47396 + return;
47397 +}
47398 +
47399 +int
47400 +gr_handle_ptrace(struct task_struct *task, const long request)
47401 +{
47402 + return 0;
47403 +}
47404 +
47405 +int
47406 +gr_handle_proc_ptrace(struct task_struct *task)
47407 +{
47408 + return 0;
47409 +}
47410 +
47411 +void
47412 +gr_learn_resource(const struct task_struct *task,
47413 + const int res, const unsigned long wanted, const int gt)
47414 +{
47415 + return;
47416 +}
47417 +
47418 +int
47419 +gr_set_acls(const int type)
47420 +{
47421 + return 0;
47422 +}
47423 +
47424 +int
47425 +gr_check_hidden_task(const struct task_struct *tsk)
47426 +{
47427 + return 0;
47428 +}
47429 +
47430 +int
47431 +gr_check_protected_task(const struct task_struct *task)
47432 +{
47433 + return 0;
47434 +}
47435 +
47436 +int
47437 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47438 +{
47439 + return 0;
47440 +}
47441 +
47442 +void
47443 +gr_copy_label(struct task_struct *tsk)
47444 +{
47445 + return;
47446 +}
47447 +
47448 +void
47449 +gr_set_pax_flags(struct task_struct *task)
47450 +{
47451 + return;
47452 +}
47453 +
47454 +int
47455 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47456 + const int unsafe_share)
47457 +{
47458 + return 0;
47459 +}
47460 +
47461 +void
47462 +gr_handle_delete(const ino_t ino, const dev_t dev)
47463 +{
47464 + return;
47465 +}
47466 +
47467 +void
47468 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47469 +{
47470 + return;
47471 +}
47472 +
47473 +void
47474 +gr_handle_crash(struct task_struct *task, const int sig)
47475 +{
47476 + return;
47477 +}
47478 +
47479 +int
47480 +gr_check_crash_exec(const struct file *filp)
47481 +{
47482 + return 0;
47483 +}
47484 +
47485 +int
47486 +gr_check_crash_uid(const uid_t uid)
47487 +{
47488 + return 0;
47489 +}
47490 +
47491 +void
47492 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47493 + struct dentry *old_dentry,
47494 + struct dentry *new_dentry,
47495 + struct vfsmount *mnt, const __u8 replace)
47496 +{
47497 + return;
47498 +}
47499 +
47500 +int
47501 +gr_search_socket(const int family, const int type, const int protocol)
47502 +{
47503 + return 1;
47504 +}
47505 +
47506 +int
47507 +gr_search_connectbind(const int mode, const struct socket *sock,
47508 + const struct sockaddr_in *addr)
47509 +{
47510 + return 0;
47511 +}
47512 +
47513 +int
47514 +gr_is_capable(const int cap)
47515 +{
47516 + return 1;
47517 +}
47518 +
47519 +int
47520 +gr_is_capable_nolog(const int cap)
47521 +{
47522 + return 1;
47523 +}
47524 +
47525 +void
47526 +gr_handle_alertkill(struct task_struct *task)
47527 +{
47528 + return;
47529 +}
47530 +
47531 +__u32
47532 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47533 +{
47534 + return 1;
47535 +}
47536 +
47537 +__u32
47538 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47539 + const struct vfsmount * mnt)
47540 +{
47541 + return 1;
47542 +}
47543 +
47544 +__u32
47545 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47546 + const int fmode)
47547 +{
47548 + return 1;
47549 +}
47550 +
47551 +__u32
47552 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47553 +{
47554 + return 1;
47555 +}
47556 +
47557 +__u32
47558 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47559 +{
47560 + return 1;
47561 +}
47562 +
47563 +int
47564 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47565 + unsigned int *vm_flags)
47566 +{
47567 + return 1;
47568 +}
47569 +
47570 +__u32
47571 +gr_acl_handle_truncate(const struct dentry * dentry,
47572 + const struct vfsmount * mnt)
47573 +{
47574 + return 1;
47575 +}
47576 +
47577 +__u32
47578 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47579 +{
47580 + return 1;
47581 +}
47582 +
47583 +__u32
47584 +gr_acl_handle_access(const struct dentry * dentry,
47585 + const struct vfsmount * mnt, const int fmode)
47586 +{
47587 + return 1;
47588 +}
47589 +
47590 +__u32
47591 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47592 + mode_t mode)
47593 +{
47594 + return 1;
47595 +}
47596 +
47597 +__u32
47598 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47599 + mode_t mode)
47600 +{
47601 + return 1;
47602 +}
47603 +
47604 +__u32
47605 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47606 +{
47607 + return 1;
47608 +}
47609 +
47610 +__u32
47611 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47612 +{
47613 + return 1;
47614 +}
47615 +
47616 +void
47617 +grsecurity_init(void)
47618 +{
47619 + return;
47620 +}
47621 +
47622 +__u32
47623 +gr_acl_handle_mknod(const struct dentry * new_dentry,
47624 + const struct dentry * parent_dentry,
47625 + const struct vfsmount * parent_mnt,
47626 + const int mode)
47627 +{
47628 + return 1;
47629 +}
47630 +
47631 +__u32
47632 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
47633 + const struct dentry * parent_dentry,
47634 + const struct vfsmount * parent_mnt)
47635 +{
47636 + return 1;
47637 +}
47638 +
47639 +__u32
47640 +gr_acl_handle_symlink(const struct dentry * new_dentry,
47641 + const struct dentry * parent_dentry,
47642 + const struct vfsmount * parent_mnt, const char *from)
47643 +{
47644 + return 1;
47645 +}
47646 +
47647 +__u32
47648 +gr_acl_handle_link(const struct dentry * new_dentry,
47649 + const struct dentry * parent_dentry,
47650 + const struct vfsmount * parent_mnt,
47651 + const struct dentry * old_dentry,
47652 + const struct vfsmount * old_mnt, const char *to)
47653 +{
47654 + return 1;
47655 +}
47656 +
47657 +int
47658 +gr_acl_handle_rename(const struct dentry *new_dentry,
47659 + const struct dentry *parent_dentry,
47660 + const struct vfsmount *parent_mnt,
47661 + const struct dentry *old_dentry,
47662 + const struct inode *old_parent_inode,
47663 + const struct vfsmount *old_mnt, const char *newname)
47664 +{
47665 + return 0;
47666 +}
47667 +
47668 +int
47669 +gr_acl_handle_filldir(const struct file *file, const char *name,
47670 + const int namelen, const ino_t ino)
47671 +{
47672 + return 1;
47673 +}
47674 +
47675 +int
47676 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47677 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47678 +{
47679 + return 1;
47680 +}
47681 +
47682 +int
47683 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47684 +{
47685 + return 0;
47686 +}
47687 +
47688 +int
47689 +gr_search_accept(const struct socket *sock)
47690 +{
47691 + return 0;
47692 +}
47693 +
47694 +int
47695 +gr_search_listen(const struct socket *sock)
47696 +{
47697 + return 0;
47698 +}
47699 +
47700 +int
47701 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47702 +{
47703 + return 0;
47704 +}
47705 +
47706 +__u32
47707 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47708 +{
47709 + return 1;
47710 +}
47711 +
47712 +__u32
47713 +gr_acl_handle_creat(const struct dentry * dentry,
47714 + const struct dentry * p_dentry,
47715 + const struct vfsmount * p_mnt, const int fmode,
47716 + const int imode)
47717 +{
47718 + return 1;
47719 +}
47720 +
47721 +void
47722 +gr_acl_handle_exit(void)
47723 +{
47724 + return;
47725 +}
47726 +
47727 +int
47728 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47729 +{
47730 + return 1;
47731 +}
47732 +
47733 +void
47734 +gr_set_role_label(const uid_t uid, const gid_t gid)
47735 +{
47736 + return;
47737 +}
47738 +
47739 +int
47740 +gr_acl_handle_procpidmem(const struct task_struct *task)
47741 +{
47742 + return 0;
47743 +}
47744 +
47745 +int
47746 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47747 +{
47748 + return 0;
47749 +}
47750 +
47751 +int
47752 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47753 +{
47754 + return 0;
47755 +}
47756 +
47757 +void
47758 +gr_set_kernel_label(struct task_struct *task)
47759 +{
47760 + return;
47761 +}
47762 +
47763 +int
47764 +gr_check_user_change(int real, int effective, int fs)
47765 +{
47766 + return 0;
47767 +}
47768 +
47769 +int
47770 +gr_check_group_change(int real, int effective, int fs)
47771 +{
47772 + return 0;
47773 +}
47774 +
47775 +int gr_acl_enable_at_secure(void)
47776 +{
47777 + return 0;
47778 +}
47779 +
47780 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47781 +{
47782 + return dentry->d_inode->i_sb->s_dev;
47783 +}
47784 +
47785 +EXPORT_SYMBOL(gr_is_capable);
47786 +EXPORT_SYMBOL(gr_is_capable_nolog);
47787 +EXPORT_SYMBOL(gr_learn_resource);
47788 +EXPORT_SYMBOL(gr_set_kernel_label);
47789 +#ifdef CONFIG_SECURITY
47790 +EXPORT_SYMBOL(gr_check_user_change);
47791 +EXPORT_SYMBOL(gr_check_group_change);
47792 +#endif
47793 diff -urNp linux-3.0.3/grsecurity/grsec_exec.c linux-3.0.3/grsecurity/grsec_exec.c
47794 --- linux-3.0.3/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
47795 +++ linux-3.0.3/grsecurity/grsec_exec.c 2011-08-23 21:48:14.000000000 -0400
47796 @@ -0,0 +1,87 @@
47797 +#include <linux/kernel.h>
47798 +#include <linux/sched.h>
47799 +#include <linux/file.h>
47800 +#include <linux/binfmts.h>
47801 +#include <linux/fs.h>
47802 +#include <linux/types.h>
47803 +#include <linux/grdefs.h>
47804 +#include <linux/grsecurity.h>
47805 +#include <linux/grinternal.h>
47806 +#include <linux/capability.h>
47807 +
47808 +#include <asm/uaccess.h>
47809 +
47810 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47811 +static char gr_exec_arg_buf[132];
47812 +static DEFINE_MUTEX(gr_exec_arg_mutex);
47813 +#endif
47814 +
47815 +int
47816 +gr_handle_nproc(void)
47817 +{
47818 +#ifdef CONFIG_GRKERNSEC_EXECVE
47819 + const struct cred *cred = current_cred();
47820 + if (grsec_enable_execve && cred->user &&
47821 + (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
47822 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
47823 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
47824 + return -EAGAIN;
47825 + }
47826 +#endif
47827 + return 0;
47828 +}
47829 +
47830 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
47831 +
47832 +void
47833 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
47834 +{
47835 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47836 + char *grarg = gr_exec_arg_buf;
47837 + unsigned int i, x, execlen = 0;
47838 + char c;
47839 +
47840 + if (!((grsec_enable_execlog && grsec_enable_group &&
47841 + in_group_p(grsec_audit_gid))
47842 + || (grsec_enable_execlog && !grsec_enable_group)))
47843 + return;
47844 +
47845 + mutex_lock(&gr_exec_arg_mutex);
47846 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
47847 +
47848 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
47849 + const char __user *p;
47850 + unsigned int len;
47851 +
47852 + p = get_user_arg_ptr(argv, i);
47853 + if (IS_ERR(p))
47854 + goto log;
47855 +
47856 + len = strnlen_user(p, 128 - execlen);
47857 + if (len > 128 - execlen)
47858 + len = 128 - execlen;
47859 + else if (len > 0)
47860 + len--;
47861 + if (copy_from_user(grarg + execlen, p, len))
47862 + goto log;
47863 +
47864 + /* rewrite unprintable characters */
47865 + for (x = 0; x < len; x++) {
47866 + c = *(grarg + execlen + x);
47867 + if (c < 32 || c > 126)
47868 + *(grarg + execlen + x) = ' ';
47869 + }
47870 +
47871 + execlen += len;
47872 + *(grarg + execlen) = ' ';
47873 + *(grarg + execlen + 1) = '\0';
47874 + execlen++;
47875 + }
47876 +
47877 + log:
47878 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
47879 + bprm->file->f_path.mnt, grarg);
47880 + mutex_unlock(&gr_exec_arg_mutex);
47881 +#endif
47882 + return;
47883 +}
47884 diff -urNp linux-3.0.3/grsecurity/grsec_fifo.c linux-3.0.3/grsecurity/grsec_fifo.c
47885 --- linux-3.0.3/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
47886 +++ linux-3.0.3/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
47887 @@ -0,0 +1,24 @@
47888 +#include <linux/kernel.h>
47889 +#include <linux/sched.h>
47890 +#include <linux/fs.h>
47891 +#include <linux/file.h>
47892 +#include <linux/grinternal.h>
47893 +
47894 +int
47895 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
47896 + const struct dentry *dir, const int flag, const int acc_mode)
47897 +{
47898 +#ifdef CONFIG_GRKERNSEC_FIFO
47899 + const struct cred *cred = current_cred();
47900 +
47901 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
47902 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
47903 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
47904 + (cred->fsuid != dentry->d_inode->i_uid)) {
47905 + if (!inode_permission(dentry->d_inode, acc_mode))
47906 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
47907 + return -EACCES;
47908 + }
47909 +#endif
47910 + return 0;
47911 +}
47912 diff -urNp linux-3.0.3/grsecurity/grsec_fork.c linux-3.0.3/grsecurity/grsec_fork.c
47913 --- linux-3.0.3/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
47914 +++ linux-3.0.3/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
47915 @@ -0,0 +1,23 @@
47916 +#include <linux/kernel.h>
47917 +#include <linux/sched.h>
47918 +#include <linux/grsecurity.h>
47919 +#include <linux/grinternal.h>
47920 +#include <linux/errno.h>
47921 +
47922 +void
47923 +gr_log_forkfail(const int retval)
47924 +{
47925 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
47926 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
47927 + switch (retval) {
47928 + case -EAGAIN:
47929 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
47930 + break;
47931 + case -ENOMEM:
47932 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
47933 + break;
47934 + }
47935 + }
47936 +#endif
47937 + return;
47938 +}
47939 diff -urNp linux-3.0.3/grsecurity/grsec_init.c linux-3.0.3/grsecurity/grsec_init.c
47940 --- linux-3.0.3/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
47941 +++ linux-3.0.3/grsecurity/grsec_init.c 2011-08-23 21:48:14.000000000 -0400
47942 @@ -0,0 +1,273 @@
47943 +#include <linux/kernel.h>
47944 +#include <linux/sched.h>
47945 +#include <linux/mm.h>
47946 +#include <linux/gracl.h>
47947 +#include <linux/slab.h>
47948 +#include <linux/vmalloc.h>
47949 +#include <linux/percpu.h>
47950 +#include <linux/module.h>
47951 +
47952 +int grsec_enable_brute;
47953 +int grsec_enable_link;
47954 +int grsec_enable_dmesg;
47955 +int grsec_enable_harden_ptrace;
47956 +int grsec_enable_fifo;
47957 +int grsec_enable_execve;
47958 +int grsec_enable_execlog;
47959 +int grsec_enable_signal;
47960 +int grsec_enable_forkfail;
47961 +int grsec_enable_audit_ptrace;
47962 +int grsec_enable_time;
47963 +int grsec_enable_audit_textrel;
47964 +int grsec_enable_group;
47965 +int grsec_audit_gid;
47966 +int grsec_enable_chdir;
47967 +int grsec_enable_mount;
47968 +int grsec_enable_rofs;
47969 +int grsec_enable_chroot_findtask;
47970 +int grsec_enable_chroot_mount;
47971 +int grsec_enable_chroot_shmat;
47972 +int grsec_enable_chroot_fchdir;
47973 +int grsec_enable_chroot_double;
47974 +int grsec_enable_chroot_pivot;
47975 +int grsec_enable_chroot_chdir;
47976 +int grsec_enable_chroot_chmod;
47977 +int grsec_enable_chroot_mknod;
47978 +int grsec_enable_chroot_nice;
47979 +int grsec_enable_chroot_execlog;
47980 +int grsec_enable_chroot_caps;
47981 +int grsec_enable_chroot_sysctl;
47982 +int grsec_enable_chroot_unix;
47983 +int grsec_enable_tpe;
47984 +int grsec_tpe_gid;
47985 +int grsec_enable_blackhole;
47986 +#ifdef CONFIG_IPV6_MODULE
47987 +EXPORT_SYMBOL(grsec_enable_blackhole);
47988 +#endif
47989 +int grsec_lastack_retries;
47990 +int grsec_enable_tpe_all;
47991 +int grsec_enable_tpe_invert;
47992 +int grsec_enable_socket_all;
47993 +int grsec_socket_all_gid;
47994 +int grsec_enable_socket_client;
47995 +int grsec_socket_client_gid;
47996 +int grsec_enable_socket_server;
47997 +int grsec_socket_server_gid;
47998 +int grsec_resource_logging;
47999 +int grsec_disable_privio;
48000 +int grsec_enable_log_rwxmaps;
48001 +int grsec_lock;
48002 +
48003 +DEFINE_SPINLOCK(grsec_alert_lock);
48004 +unsigned long grsec_alert_wtime = 0;
48005 +unsigned long grsec_alert_fyet = 0;
48006 +
48007 +DEFINE_SPINLOCK(grsec_audit_lock);
48008 +
48009 +DEFINE_RWLOCK(grsec_exec_file_lock);
48010 +
48011 +char *gr_shared_page[4];
48012 +
48013 +char *gr_alert_log_fmt;
48014 +char *gr_audit_log_fmt;
48015 +char *gr_alert_log_buf;
48016 +char *gr_audit_log_buf;
48017 +
48018 +extern struct gr_arg *gr_usermode;
48019 +extern unsigned char *gr_system_salt;
48020 +extern unsigned char *gr_system_sum;
48021 +
48022 +void __init
48023 +grsecurity_init(void)
48024 +{
48025 + int j;
48026 + /* create the per-cpu shared pages */
48027 +
48028 +#ifdef CONFIG_X86
48029 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48030 +#endif
48031 +
48032 + for (j = 0; j < 4; j++) {
48033 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48034 + if (gr_shared_page[j] == NULL) {
48035 + panic("Unable to allocate grsecurity shared page");
48036 + return;
48037 + }
48038 + }
48039 +
48040 + /* allocate log buffers */
48041 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48042 + if (!gr_alert_log_fmt) {
48043 + panic("Unable to allocate grsecurity alert log format buffer");
48044 + return;
48045 + }
48046 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48047 + if (!gr_audit_log_fmt) {
48048 + panic("Unable to allocate grsecurity audit log format buffer");
48049 + return;
48050 + }
48051 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48052 + if (!gr_alert_log_buf) {
48053 + panic("Unable to allocate grsecurity alert log buffer");
48054 + return;
48055 + }
48056 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48057 + if (!gr_audit_log_buf) {
48058 + panic("Unable to allocate grsecurity audit log buffer");
48059 + return;
48060 + }
48061 +
48062 + /* allocate memory for authentication structure */
48063 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48064 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48065 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48066 +
48067 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48068 + panic("Unable to allocate grsecurity authentication structure");
48069 + return;
48070 + }
48071 +
48072 +
48073 +#ifdef CONFIG_GRKERNSEC_IO
48074 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48075 + grsec_disable_privio = 1;
48076 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48077 + grsec_disable_privio = 1;
48078 +#else
48079 + grsec_disable_privio = 0;
48080 +#endif
48081 +#endif
48082 +
48083 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48084 + /* for backward compatibility, tpe_invert always defaults to on if
48085 + enabled in the kernel
48086 + */
48087 + grsec_enable_tpe_invert = 1;
48088 +#endif
48089 +
48090 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48091 +#ifndef CONFIG_GRKERNSEC_SYSCTL
48092 + grsec_lock = 1;
48093 +#endif
48094 +
48095 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48096 + grsec_enable_audit_textrel = 1;
48097 +#endif
48098 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48099 + grsec_enable_log_rwxmaps = 1;
48100 +#endif
48101 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48102 + grsec_enable_group = 1;
48103 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48104 +#endif
48105 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48106 + grsec_enable_chdir = 1;
48107 +#endif
48108 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48109 + grsec_enable_harden_ptrace = 1;
48110 +#endif
48111 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48112 + grsec_enable_mount = 1;
48113 +#endif
48114 +#ifdef CONFIG_GRKERNSEC_LINK
48115 + grsec_enable_link = 1;
48116 +#endif
48117 +#ifdef CONFIG_GRKERNSEC_BRUTE
48118 + grsec_enable_brute = 1;
48119 +#endif
48120 +#ifdef CONFIG_GRKERNSEC_DMESG
48121 + grsec_enable_dmesg = 1;
48122 +#endif
48123 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48124 + grsec_enable_blackhole = 1;
48125 + grsec_lastack_retries = 4;
48126 +#endif
48127 +#ifdef CONFIG_GRKERNSEC_FIFO
48128 + grsec_enable_fifo = 1;
48129 +#endif
48130 +#ifdef CONFIG_GRKERNSEC_EXECVE
48131 + grsec_enable_execve = 1;
48132 +#endif
48133 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48134 + grsec_enable_execlog = 1;
48135 +#endif
48136 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48137 + grsec_enable_signal = 1;
48138 +#endif
48139 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48140 + grsec_enable_forkfail = 1;
48141 +#endif
48142 +#ifdef CONFIG_GRKERNSEC_TIME
48143 + grsec_enable_time = 1;
48144 +#endif
48145 +#ifdef CONFIG_GRKERNSEC_RESLOG
48146 + grsec_resource_logging = 1;
48147 +#endif
48148 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48149 + grsec_enable_chroot_findtask = 1;
48150 +#endif
48151 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48152 + grsec_enable_chroot_unix = 1;
48153 +#endif
48154 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48155 + grsec_enable_chroot_mount = 1;
48156 +#endif
48157 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48158 + grsec_enable_chroot_fchdir = 1;
48159 +#endif
48160 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48161 + grsec_enable_chroot_shmat = 1;
48162 +#endif
48163 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48164 + grsec_enable_audit_ptrace = 1;
48165 +#endif
48166 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48167 + grsec_enable_chroot_double = 1;
48168 +#endif
48169 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48170 + grsec_enable_chroot_pivot = 1;
48171 +#endif
48172 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48173 + grsec_enable_chroot_chdir = 1;
48174 +#endif
48175 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48176 + grsec_enable_chroot_chmod = 1;
48177 +#endif
48178 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48179 + grsec_enable_chroot_mknod = 1;
48180 +#endif
48181 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48182 + grsec_enable_chroot_nice = 1;
48183 +#endif
48184 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48185 + grsec_enable_chroot_execlog = 1;
48186 +#endif
48187 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48188 + grsec_enable_chroot_caps = 1;
48189 +#endif
48190 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48191 + grsec_enable_chroot_sysctl = 1;
48192 +#endif
48193 +#ifdef CONFIG_GRKERNSEC_TPE
48194 + grsec_enable_tpe = 1;
48195 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48196 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
48197 + grsec_enable_tpe_all = 1;
48198 +#endif
48199 +#endif
48200 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48201 + grsec_enable_socket_all = 1;
48202 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48203 +#endif
48204 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48205 + grsec_enable_socket_client = 1;
48206 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48207 +#endif
48208 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48209 + grsec_enable_socket_server = 1;
48210 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48211 +#endif
48212 +#endif
48213 +
48214 + return;
48215 +}
48216 diff -urNp linux-3.0.3/grsecurity/grsec_link.c linux-3.0.3/grsecurity/grsec_link.c
48217 --- linux-3.0.3/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48218 +++ linux-3.0.3/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
48219 @@ -0,0 +1,43 @@
48220 +#include <linux/kernel.h>
48221 +#include <linux/sched.h>
48222 +#include <linux/fs.h>
48223 +#include <linux/file.h>
48224 +#include <linux/grinternal.h>
48225 +
48226 +int
48227 +gr_handle_follow_link(const struct inode *parent,
48228 + const struct inode *inode,
48229 + const struct dentry *dentry, const struct vfsmount *mnt)
48230 +{
48231 +#ifdef CONFIG_GRKERNSEC_LINK
48232 + const struct cred *cred = current_cred();
48233 +
48234 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48235 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48236 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48237 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48238 + return -EACCES;
48239 + }
48240 +#endif
48241 + return 0;
48242 +}
48243 +
48244 +int
48245 +gr_handle_hardlink(const struct dentry *dentry,
48246 + const struct vfsmount *mnt,
48247 + struct inode *inode, const int mode, const char *to)
48248 +{
48249 +#ifdef CONFIG_GRKERNSEC_LINK
48250 + const struct cred *cred = current_cred();
48251 +
48252 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48253 + (!S_ISREG(mode) || (mode & S_ISUID) ||
48254 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48255 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48256 + !capable(CAP_FOWNER) && cred->uid) {
48257 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48258 + return -EPERM;
48259 + }
48260 +#endif
48261 + return 0;
48262 +}
48263 diff -urNp linux-3.0.3/grsecurity/grsec_log.c linux-3.0.3/grsecurity/grsec_log.c
48264 --- linux-3.0.3/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48265 +++ linux-3.0.3/grsecurity/grsec_log.c 2011-08-23 21:48:14.000000000 -0400
48266 @@ -0,0 +1,310 @@
48267 +#include <linux/kernel.h>
48268 +#include <linux/sched.h>
48269 +#include <linux/file.h>
48270 +#include <linux/tty.h>
48271 +#include <linux/fs.h>
48272 +#include <linux/grinternal.h>
48273 +
48274 +#ifdef CONFIG_TREE_PREEMPT_RCU
48275 +#define DISABLE_PREEMPT() preempt_disable()
48276 +#define ENABLE_PREEMPT() preempt_enable()
48277 +#else
48278 +#define DISABLE_PREEMPT()
48279 +#define ENABLE_PREEMPT()
48280 +#endif
48281 +
48282 +#define BEGIN_LOCKS(x) \
48283 + DISABLE_PREEMPT(); \
48284 + rcu_read_lock(); \
48285 + read_lock(&tasklist_lock); \
48286 + read_lock(&grsec_exec_file_lock); \
48287 + if (x != GR_DO_AUDIT) \
48288 + spin_lock(&grsec_alert_lock); \
48289 + else \
48290 + spin_lock(&grsec_audit_lock)
48291 +
48292 +#define END_LOCKS(x) \
48293 + if (x != GR_DO_AUDIT) \
48294 + spin_unlock(&grsec_alert_lock); \
48295 + else \
48296 + spin_unlock(&grsec_audit_lock); \
48297 + read_unlock(&grsec_exec_file_lock); \
48298 + read_unlock(&tasklist_lock); \
48299 + rcu_read_unlock(); \
48300 + ENABLE_PREEMPT(); \
48301 + if (x == GR_DONT_AUDIT) \
48302 + gr_handle_alertkill(current)
48303 +
48304 +enum {
48305 + FLOODING,
48306 + NO_FLOODING
48307 +};
48308 +
48309 +extern char *gr_alert_log_fmt;
48310 +extern char *gr_audit_log_fmt;
48311 +extern char *gr_alert_log_buf;
48312 +extern char *gr_audit_log_buf;
48313 +
48314 +static int gr_log_start(int audit)
48315 +{
48316 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48317 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48318 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48319 +
48320 + if (audit == GR_DO_AUDIT)
48321 + goto set_fmt;
48322 +
48323 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48324 + grsec_alert_wtime = jiffies;
48325 + grsec_alert_fyet = 0;
48326 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48327 + grsec_alert_fyet++;
48328 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48329 + grsec_alert_wtime = jiffies;
48330 + grsec_alert_fyet++;
48331 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48332 + return FLOODING;
48333 + } else return FLOODING;
48334 +
48335 +set_fmt:
48336 + memset(buf, 0, PAGE_SIZE);
48337 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
48338 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48339 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48340 + } else if (current->signal->curr_ip) {
48341 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48342 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48343 + } else if (gr_acl_is_enabled()) {
48344 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48345 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48346 + } else {
48347 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
48348 + strcpy(buf, fmt);
48349 + }
48350 +
48351 + return NO_FLOODING;
48352 +}
48353 +
48354 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48355 + __attribute__ ((format (printf, 2, 0)));
48356 +
48357 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48358 +{
48359 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48360 + unsigned int len = strlen(buf);
48361 +
48362 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48363 +
48364 + return;
48365 +}
48366 +
48367 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48368 + __attribute__ ((format (printf, 2, 3)));
48369 +
48370 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48371 +{
48372 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48373 + unsigned int len = strlen(buf);
48374 + va_list ap;
48375 +
48376 + va_start(ap, msg);
48377 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48378 + va_end(ap);
48379 +
48380 + return;
48381 +}
48382 +
48383 +static void gr_log_end(int audit)
48384 +{
48385 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48386 + unsigned int len = strlen(buf);
48387 +
48388 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48389 + printk("%s\n", buf);
48390 +
48391 + return;
48392 +}
48393 +
48394 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48395 +{
48396 + int logtype;
48397 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48398 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48399 + void *voidptr = NULL;
48400 + int num1 = 0, num2 = 0;
48401 + unsigned long ulong1 = 0, ulong2 = 0;
48402 + struct dentry *dentry = NULL;
48403 + struct vfsmount *mnt = NULL;
48404 + struct file *file = NULL;
48405 + struct task_struct *task = NULL;
48406 + const struct cred *cred, *pcred;
48407 + va_list ap;
48408 +
48409 + BEGIN_LOCKS(audit);
48410 + logtype = gr_log_start(audit);
48411 + if (logtype == FLOODING) {
48412 + END_LOCKS(audit);
48413 + return;
48414 + }
48415 + va_start(ap, argtypes);
48416 + switch (argtypes) {
48417 + case GR_TTYSNIFF:
48418 + task = va_arg(ap, struct task_struct *);
48419 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48420 + break;
48421 + case GR_SYSCTL_HIDDEN:
48422 + str1 = va_arg(ap, char *);
48423 + gr_log_middle_varargs(audit, msg, result, str1);
48424 + break;
48425 + case GR_RBAC:
48426 + dentry = va_arg(ap, struct dentry *);
48427 + mnt = va_arg(ap, struct vfsmount *);
48428 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48429 + break;
48430 + case GR_RBAC_STR:
48431 + dentry = va_arg(ap, struct dentry *);
48432 + mnt = va_arg(ap, struct vfsmount *);
48433 + str1 = va_arg(ap, char *);
48434 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48435 + break;
48436 + case GR_STR_RBAC:
48437 + str1 = va_arg(ap, char *);
48438 + dentry = va_arg(ap, struct dentry *);
48439 + mnt = va_arg(ap, struct vfsmount *);
48440 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48441 + break;
48442 + case GR_RBAC_MODE2:
48443 + dentry = va_arg(ap, struct dentry *);
48444 + mnt = va_arg(ap, struct vfsmount *);
48445 + str1 = va_arg(ap, char *);
48446 + str2 = va_arg(ap, char *);
48447 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48448 + break;
48449 + case GR_RBAC_MODE3:
48450 + dentry = va_arg(ap, struct dentry *);
48451 + mnt = va_arg(ap, struct vfsmount *);
48452 + str1 = va_arg(ap, char *);
48453 + str2 = va_arg(ap, char *);
48454 + str3 = va_arg(ap, char *);
48455 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48456 + break;
48457 + case GR_FILENAME:
48458 + dentry = va_arg(ap, struct dentry *);
48459 + mnt = va_arg(ap, struct vfsmount *);
48460 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48461 + break;
48462 + case GR_STR_FILENAME:
48463 + str1 = va_arg(ap, char *);
48464 + dentry = va_arg(ap, struct dentry *);
48465 + mnt = va_arg(ap, struct vfsmount *);
48466 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48467 + break;
48468 + case GR_FILENAME_STR:
48469 + dentry = va_arg(ap, struct dentry *);
48470 + mnt = va_arg(ap, struct vfsmount *);
48471 + str1 = va_arg(ap, char *);
48472 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48473 + break;
48474 + case GR_FILENAME_TWO_INT:
48475 + dentry = va_arg(ap, struct dentry *);
48476 + mnt = va_arg(ap, struct vfsmount *);
48477 + num1 = va_arg(ap, int);
48478 + num2 = va_arg(ap, int);
48479 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48480 + break;
48481 + case GR_FILENAME_TWO_INT_STR:
48482 + dentry = va_arg(ap, struct dentry *);
48483 + mnt = va_arg(ap, struct vfsmount *);
48484 + num1 = va_arg(ap, int);
48485 + num2 = va_arg(ap, int);
48486 + str1 = va_arg(ap, char *);
48487 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48488 + break;
48489 + case GR_TEXTREL:
48490 + file = va_arg(ap, struct file *);
48491 + ulong1 = va_arg(ap, unsigned long);
48492 + ulong2 = va_arg(ap, unsigned long);
48493 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48494 + break;
48495 + case GR_PTRACE:
48496 + task = va_arg(ap, struct task_struct *);
48497 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48498 + break;
48499 + case GR_RESOURCE:
48500 + task = va_arg(ap, struct task_struct *);
48501 + cred = __task_cred(task);
48502 + pcred = __task_cred(task->real_parent);
48503 + ulong1 = va_arg(ap, unsigned long);
48504 + str1 = va_arg(ap, char *);
48505 + ulong2 = va_arg(ap, unsigned long);
48506 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48507 + break;
48508 + case GR_CAP:
48509 + task = va_arg(ap, struct task_struct *);
48510 + cred = __task_cred(task);
48511 + pcred = __task_cred(task->real_parent);
48512 + str1 = va_arg(ap, char *);
48513 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48514 + break;
48515 + case GR_SIG:
48516 + str1 = va_arg(ap, char *);
48517 + voidptr = va_arg(ap, void *);
48518 + gr_log_middle_varargs(audit, msg, str1, voidptr);
48519 + break;
48520 + case GR_SIG2:
48521 + task = va_arg(ap, struct task_struct *);
48522 + cred = __task_cred(task);
48523 + pcred = __task_cred(task->real_parent);
48524 + num1 = va_arg(ap, int);
48525 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48526 + break;
48527 + case GR_CRASH1:
48528 + task = va_arg(ap, struct task_struct *);
48529 + cred = __task_cred(task);
48530 + pcred = __task_cred(task->real_parent);
48531 + ulong1 = va_arg(ap, unsigned long);
48532 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48533 + break;
48534 + case GR_CRASH2:
48535 + task = va_arg(ap, struct task_struct *);
48536 + cred = __task_cred(task);
48537 + pcred = __task_cred(task->real_parent);
48538 + ulong1 = va_arg(ap, unsigned long);
48539 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48540 + break;
48541 + case GR_RWXMAP:
48542 + file = va_arg(ap, struct file *);
48543 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48544 + break;
48545 + case GR_PSACCT:
48546 + {
48547 + unsigned int wday, cday;
48548 + __u8 whr, chr;
48549 + __u8 wmin, cmin;
48550 + __u8 wsec, csec;
48551 + char cur_tty[64] = { 0 };
48552 + char parent_tty[64] = { 0 };
48553 +
48554 + task = va_arg(ap, struct task_struct *);
48555 + wday = va_arg(ap, unsigned int);
48556 + cday = va_arg(ap, unsigned int);
48557 + whr = va_arg(ap, int);
48558 + chr = va_arg(ap, int);
48559 + wmin = va_arg(ap, int);
48560 + cmin = va_arg(ap, int);
48561 + wsec = va_arg(ap, int);
48562 + csec = va_arg(ap, int);
48563 + ulong1 = va_arg(ap, unsigned long);
48564 + cred = __task_cred(task);
48565 + pcred = __task_cred(task->real_parent);
48566 +
48567 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48568 + }
48569 + break;
48570 + default:
48571 + gr_log_middle(audit, msg, ap);
48572 + }
48573 + va_end(ap);
48574 + gr_log_end(audit);
48575 + END_LOCKS(audit);
48576 +}
48577 diff -urNp linux-3.0.3/grsecurity/grsec_mem.c linux-3.0.3/grsecurity/grsec_mem.c
48578 --- linux-3.0.3/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48579 +++ linux-3.0.3/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
48580 @@ -0,0 +1,33 @@
48581 +#include <linux/kernel.h>
48582 +#include <linux/sched.h>
48583 +#include <linux/mm.h>
48584 +#include <linux/mman.h>
48585 +#include <linux/grinternal.h>
48586 +
48587 +void
48588 +gr_handle_ioperm(void)
48589 +{
48590 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48591 + return;
48592 +}
48593 +
48594 +void
48595 +gr_handle_iopl(void)
48596 +{
48597 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48598 + return;
48599 +}
48600 +
48601 +void
48602 +gr_handle_mem_readwrite(u64 from, u64 to)
48603 +{
48604 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48605 + return;
48606 +}
48607 +
48608 +void
48609 +gr_handle_vm86(void)
48610 +{
48611 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48612 + return;
48613 +}
48614 diff -urNp linux-3.0.3/grsecurity/grsec_mount.c linux-3.0.3/grsecurity/grsec_mount.c
48615 --- linux-3.0.3/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48616 +++ linux-3.0.3/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
48617 @@ -0,0 +1,62 @@
48618 +#include <linux/kernel.h>
48619 +#include <linux/sched.h>
48620 +#include <linux/mount.h>
48621 +#include <linux/grsecurity.h>
48622 +#include <linux/grinternal.h>
48623 +
48624 +void
48625 +gr_log_remount(const char *devname, const int retval)
48626 +{
48627 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48628 + if (grsec_enable_mount && (retval >= 0))
48629 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48630 +#endif
48631 + return;
48632 +}
48633 +
48634 +void
48635 +gr_log_unmount(const char *devname, const int retval)
48636 +{
48637 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48638 + if (grsec_enable_mount && (retval >= 0))
48639 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48640 +#endif
48641 + return;
48642 +}
48643 +
48644 +void
48645 +gr_log_mount(const char *from, const char *to, const int retval)
48646 +{
48647 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48648 + if (grsec_enable_mount && (retval >= 0))
48649 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48650 +#endif
48651 + return;
48652 +}
48653 +
48654 +int
48655 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48656 +{
48657 +#ifdef CONFIG_GRKERNSEC_ROFS
48658 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48659 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48660 + return -EPERM;
48661 + } else
48662 + return 0;
48663 +#endif
48664 + return 0;
48665 +}
48666 +
48667 +int
48668 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48669 +{
48670 +#ifdef CONFIG_GRKERNSEC_ROFS
48671 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48672 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48673 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48674 + return -EPERM;
48675 + } else
48676 + return 0;
48677 +#endif
48678 + return 0;
48679 +}
48680 diff -urNp linux-3.0.3/grsecurity/grsec_pax.c linux-3.0.3/grsecurity/grsec_pax.c
48681 --- linux-3.0.3/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48682 +++ linux-3.0.3/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
48683 @@ -0,0 +1,36 @@
48684 +#include <linux/kernel.h>
48685 +#include <linux/sched.h>
48686 +#include <linux/mm.h>
48687 +#include <linux/file.h>
48688 +#include <linux/grinternal.h>
48689 +#include <linux/grsecurity.h>
48690 +
48691 +void
48692 +gr_log_textrel(struct vm_area_struct * vma)
48693 +{
48694 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48695 + if (grsec_enable_audit_textrel)
48696 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48697 +#endif
48698 + return;
48699 +}
48700 +
48701 +void
48702 +gr_log_rwxmmap(struct file *file)
48703 +{
48704 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48705 + if (grsec_enable_log_rwxmaps)
48706 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
48707 +#endif
48708 + return;
48709 +}
48710 +
48711 +void
48712 +gr_log_rwxmprotect(struct file *file)
48713 +{
48714 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48715 + if (grsec_enable_log_rwxmaps)
48716 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
48717 +#endif
48718 + return;
48719 +}
48720 diff -urNp linux-3.0.3/grsecurity/grsec_ptrace.c linux-3.0.3/grsecurity/grsec_ptrace.c
48721 --- linux-3.0.3/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
48722 +++ linux-3.0.3/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
48723 @@ -0,0 +1,14 @@
48724 +#include <linux/kernel.h>
48725 +#include <linux/sched.h>
48726 +#include <linux/grinternal.h>
48727 +#include <linux/grsecurity.h>
48728 +
48729 +void
48730 +gr_audit_ptrace(struct task_struct *task)
48731 +{
48732 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48733 + if (grsec_enable_audit_ptrace)
48734 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
48735 +#endif
48736 + return;
48737 +}
48738 diff -urNp linux-3.0.3/grsecurity/grsec_sig.c linux-3.0.3/grsecurity/grsec_sig.c
48739 --- linux-3.0.3/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
48740 +++ linux-3.0.3/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
48741 @@ -0,0 +1,206 @@
48742 +#include <linux/kernel.h>
48743 +#include <linux/sched.h>
48744 +#include <linux/delay.h>
48745 +#include <linux/grsecurity.h>
48746 +#include <linux/grinternal.h>
48747 +#include <linux/hardirq.h>
48748 +
48749 +char *signames[] = {
48750 + [SIGSEGV] = "Segmentation fault",
48751 + [SIGILL] = "Illegal instruction",
48752 + [SIGABRT] = "Abort",
48753 + [SIGBUS] = "Invalid alignment/Bus error"
48754 +};
48755 +
48756 +void
48757 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
48758 +{
48759 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48760 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
48761 + (sig == SIGABRT) || (sig == SIGBUS))) {
48762 + if (t->pid == current->pid) {
48763 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
48764 + } else {
48765 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
48766 + }
48767 + }
48768 +#endif
48769 + return;
48770 +}
48771 +
48772 +int
48773 +gr_handle_signal(const struct task_struct *p, const int sig)
48774 +{
48775 +#ifdef CONFIG_GRKERNSEC
48776 + if (current->pid > 1 && gr_check_protected_task(p)) {
48777 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
48778 + return -EPERM;
48779 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
48780 + return -EPERM;
48781 + }
48782 +#endif
48783 + return 0;
48784 +}
48785 +
48786 +#ifdef CONFIG_GRKERNSEC
48787 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
48788 +
48789 +int gr_fake_force_sig(int sig, struct task_struct *t)
48790 +{
48791 + unsigned long int flags;
48792 + int ret, blocked, ignored;
48793 + struct k_sigaction *action;
48794 +
48795 + spin_lock_irqsave(&t->sighand->siglock, flags);
48796 + action = &t->sighand->action[sig-1];
48797 + ignored = action->sa.sa_handler == SIG_IGN;
48798 + blocked = sigismember(&t->blocked, sig);
48799 + if (blocked || ignored) {
48800 + action->sa.sa_handler = SIG_DFL;
48801 + if (blocked) {
48802 + sigdelset(&t->blocked, sig);
48803 + recalc_sigpending_and_wake(t);
48804 + }
48805 + }
48806 + if (action->sa.sa_handler == SIG_DFL)
48807 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
48808 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
48809 +
48810 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
48811 +
48812 + return ret;
48813 +}
48814 +#endif
48815 +
48816 +#ifdef CONFIG_GRKERNSEC_BRUTE
48817 +#define GR_USER_BAN_TIME (15 * 60)
48818 +
48819 +static int __get_dumpable(unsigned long mm_flags)
48820 +{
48821 + int ret;
48822 +
48823 + ret = mm_flags & MMF_DUMPABLE_MASK;
48824 + return (ret >= 2) ? 2 : ret;
48825 +}
48826 +#endif
48827 +
48828 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
48829 +{
48830 +#ifdef CONFIG_GRKERNSEC_BRUTE
48831 + uid_t uid = 0;
48832 +
48833 + if (!grsec_enable_brute)
48834 + return;
48835 +
48836 + rcu_read_lock();
48837 + read_lock(&tasklist_lock);
48838 + read_lock(&grsec_exec_file_lock);
48839 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
48840 + p->real_parent->brute = 1;
48841 + else {
48842 + const struct cred *cred = __task_cred(p), *cred2;
48843 + struct task_struct *tsk, *tsk2;
48844 +
48845 + if (!__get_dumpable(mm_flags) && cred->uid) {
48846 + struct user_struct *user;
48847 +
48848 + uid = cred->uid;
48849 +
48850 + /* this is put upon execution past expiration */
48851 + user = find_user(uid);
48852 + if (user == NULL)
48853 + goto unlock;
48854 + user->banned = 1;
48855 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
48856 + if (user->ban_expires == ~0UL)
48857 + user->ban_expires--;
48858 +
48859 + do_each_thread(tsk2, tsk) {
48860 + cred2 = __task_cred(tsk);
48861 + if (tsk != p && cred2->uid == uid)
48862 + gr_fake_force_sig(SIGKILL, tsk);
48863 + } while_each_thread(tsk2, tsk);
48864 + }
48865 + }
48866 +unlock:
48867 + read_unlock(&grsec_exec_file_lock);
48868 + read_unlock(&tasklist_lock);
48869 + rcu_read_unlock();
48870 +
48871 + if (uid)
48872 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
48873 +
48874 +#endif
48875 + return;
48876 +}
48877 +
48878 +void gr_handle_brute_check(void)
48879 +{
48880 +#ifdef CONFIG_GRKERNSEC_BRUTE
48881 + if (current->brute)
48882 + msleep(30 * 1000);
48883 +#endif
48884 + return;
48885 +}
48886 +
48887 +void gr_handle_kernel_exploit(void)
48888 +{
48889 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
48890 + const struct cred *cred;
48891 + struct task_struct *tsk, *tsk2;
48892 + struct user_struct *user;
48893 + uid_t uid;
48894 +
48895 + if (in_irq() || in_serving_softirq() || in_nmi())
48896 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
48897 +
48898 + uid = current_uid();
48899 +
48900 + if (uid == 0)
48901 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
48902 + else {
48903 + /* kill all the processes of this user, hold a reference
48904 + to their creds struct, and prevent them from creating
48905 + another process until system reset
48906 + */
48907 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
48908 + /* we intentionally leak this ref */
48909 + user = get_uid(current->cred->user);
48910 + if (user) {
48911 + user->banned = 1;
48912 + user->ban_expires = ~0UL;
48913 + }
48914 +
48915 + read_lock(&tasklist_lock);
48916 + do_each_thread(tsk2, tsk) {
48917 + cred = __task_cred(tsk);
48918 + if (cred->uid == uid)
48919 + gr_fake_force_sig(SIGKILL, tsk);
48920 + } while_each_thread(tsk2, tsk);
48921 + read_unlock(&tasklist_lock);
48922 + }
48923 +#endif
48924 +}
48925 +
48926 +int __gr_process_user_ban(struct user_struct *user)
48927 +{
48928 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
48929 + if (unlikely(user->banned)) {
48930 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
48931 + user->banned = 0;
48932 + user->ban_expires = 0;
48933 + free_uid(user);
48934 + } else
48935 + return -EPERM;
48936 + }
48937 +#endif
48938 + return 0;
48939 +}
48940 +
48941 +int gr_process_user_ban(void)
48942 +{
48943 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
48944 + return __gr_process_user_ban(current->cred->user);
48945 +#endif
48946 + return 0;
48947 +}
48948 diff -urNp linux-3.0.3/grsecurity/grsec_sock.c linux-3.0.3/grsecurity/grsec_sock.c
48949 --- linux-3.0.3/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
48950 +++ linux-3.0.3/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
48951 @@ -0,0 +1,244 @@
48952 +#include <linux/kernel.h>
48953 +#include <linux/module.h>
48954 +#include <linux/sched.h>
48955 +#include <linux/file.h>
48956 +#include <linux/net.h>
48957 +#include <linux/in.h>
48958 +#include <linux/ip.h>
48959 +#include <net/sock.h>
48960 +#include <net/inet_sock.h>
48961 +#include <linux/grsecurity.h>
48962 +#include <linux/grinternal.h>
48963 +#include <linux/gracl.h>
48964 +
48965 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
48966 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
48967 +
48968 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
48969 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
48970 +
48971 +#ifdef CONFIG_UNIX_MODULE
48972 +EXPORT_SYMBOL(gr_acl_handle_unix);
48973 +EXPORT_SYMBOL(gr_acl_handle_mknod);
48974 +EXPORT_SYMBOL(gr_handle_chroot_unix);
48975 +EXPORT_SYMBOL(gr_handle_create);
48976 +#endif
48977 +
48978 +#ifdef CONFIG_GRKERNSEC
48979 +#define gr_conn_table_size 32749
48980 +struct conn_table_entry {
48981 + struct conn_table_entry *next;
48982 + struct signal_struct *sig;
48983 +};
48984 +
48985 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
48986 +DEFINE_SPINLOCK(gr_conn_table_lock);
48987 +
48988 +extern const char * gr_socktype_to_name(unsigned char type);
48989 +extern const char * gr_proto_to_name(unsigned char proto);
48990 +extern const char * gr_sockfamily_to_name(unsigned char family);
48991 +
48992 +static __inline__ int
48993 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
48994 +{
48995 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
48996 +}
48997 +
48998 +static __inline__ int
48999 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49000 + __u16 sport, __u16 dport)
49001 +{
49002 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49003 + sig->gr_sport == sport && sig->gr_dport == dport))
49004 + return 1;
49005 + else
49006 + return 0;
49007 +}
49008 +
49009 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49010 +{
49011 + struct conn_table_entry **match;
49012 + unsigned int index;
49013 +
49014 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49015 + sig->gr_sport, sig->gr_dport,
49016 + gr_conn_table_size);
49017 +
49018 + newent->sig = sig;
49019 +
49020 + match = &gr_conn_table[index];
49021 + newent->next = *match;
49022 + *match = newent;
49023 +
49024 + return;
49025 +}
49026 +
49027 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49028 +{
49029 + struct conn_table_entry *match, *last = NULL;
49030 + unsigned int index;
49031 +
49032 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49033 + sig->gr_sport, sig->gr_dport,
49034 + gr_conn_table_size);
49035 +
49036 + match = gr_conn_table[index];
49037 + while (match && !conn_match(match->sig,
49038 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49039 + sig->gr_dport)) {
49040 + last = match;
49041 + match = match->next;
49042 + }
49043 +
49044 + if (match) {
49045 + if (last)
49046 + last->next = match->next;
49047 + else
49048 + gr_conn_table[index] = NULL;
49049 + kfree(match);
49050 + }
49051 +
49052 + return;
49053 +}
49054 +
49055 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49056 + __u16 sport, __u16 dport)
49057 +{
49058 + struct conn_table_entry *match;
49059 + unsigned int index;
49060 +
49061 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49062 +
49063 + match = gr_conn_table[index];
49064 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49065 + match = match->next;
49066 +
49067 + if (match)
49068 + return match->sig;
49069 + else
49070 + return NULL;
49071 +}
49072 +
49073 +#endif
49074 +
49075 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49076 +{
49077 +#ifdef CONFIG_GRKERNSEC
49078 + struct signal_struct *sig = task->signal;
49079 + struct conn_table_entry *newent;
49080 +
49081 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49082 + if (newent == NULL)
49083 + return;
49084 + /* no bh lock needed since we are called with bh disabled */
49085 + spin_lock(&gr_conn_table_lock);
49086 + gr_del_task_from_ip_table_nolock(sig);
49087 + sig->gr_saddr = inet->inet_rcv_saddr;
49088 + sig->gr_daddr = inet->inet_daddr;
49089 + sig->gr_sport = inet->inet_sport;
49090 + sig->gr_dport = inet->inet_dport;
49091 + gr_add_to_task_ip_table_nolock(sig, newent);
49092 + spin_unlock(&gr_conn_table_lock);
49093 +#endif
49094 + return;
49095 +}
49096 +
49097 +void gr_del_task_from_ip_table(struct task_struct *task)
49098 +{
49099 +#ifdef CONFIG_GRKERNSEC
49100 + spin_lock_bh(&gr_conn_table_lock);
49101 + gr_del_task_from_ip_table_nolock(task->signal);
49102 + spin_unlock_bh(&gr_conn_table_lock);
49103 +#endif
49104 + return;
49105 +}
49106 +
49107 +void
49108 +gr_attach_curr_ip(const struct sock *sk)
49109 +{
49110 +#ifdef CONFIG_GRKERNSEC
49111 + struct signal_struct *p, *set;
49112 + const struct inet_sock *inet = inet_sk(sk);
49113 +
49114 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49115 + return;
49116 +
49117 + set = current->signal;
49118 +
49119 + spin_lock_bh(&gr_conn_table_lock);
49120 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49121 + inet->inet_dport, inet->inet_sport);
49122 + if (unlikely(p != NULL)) {
49123 + set->curr_ip = p->curr_ip;
49124 + set->used_accept = 1;
49125 + gr_del_task_from_ip_table_nolock(p);
49126 + spin_unlock_bh(&gr_conn_table_lock);
49127 + return;
49128 + }
49129 + spin_unlock_bh(&gr_conn_table_lock);
49130 +
49131 + set->curr_ip = inet->inet_daddr;
49132 + set->used_accept = 1;
49133 +#endif
49134 + return;
49135 +}
49136 +
49137 +int
49138 +gr_handle_sock_all(const int family, const int type, const int protocol)
49139 +{
49140 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49141 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49142 + (family != AF_UNIX)) {
49143 + if (family == AF_INET)
49144 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49145 + else
49146 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49147 + return -EACCES;
49148 + }
49149 +#endif
49150 + return 0;
49151 +}
49152 +
49153 +int
49154 +gr_handle_sock_server(const struct sockaddr *sck)
49155 +{
49156 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49157 + if (grsec_enable_socket_server &&
49158 + in_group_p(grsec_socket_server_gid) &&
49159 + sck && (sck->sa_family != AF_UNIX) &&
49160 + (sck->sa_family != AF_LOCAL)) {
49161 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49162 + return -EACCES;
49163 + }
49164 +#endif
49165 + return 0;
49166 +}
49167 +
49168 +int
49169 +gr_handle_sock_server_other(const struct sock *sck)
49170 +{
49171 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49172 + if (grsec_enable_socket_server &&
49173 + in_group_p(grsec_socket_server_gid) &&
49174 + sck && (sck->sk_family != AF_UNIX) &&
49175 + (sck->sk_family != AF_LOCAL)) {
49176 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49177 + return -EACCES;
49178 + }
49179 +#endif
49180 + return 0;
49181 +}
49182 +
49183 +int
49184 +gr_handle_sock_client(const struct sockaddr *sck)
49185 +{
49186 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49187 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49188 + sck && (sck->sa_family != AF_UNIX) &&
49189 + (sck->sa_family != AF_LOCAL)) {
49190 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49191 + return -EACCES;
49192 + }
49193 +#endif
49194 + return 0;
49195 +}
49196 diff -urNp linux-3.0.3/grsecurity/grsec_sysctl.c linux-3.0.3/grsecurity/grsec_sysctl.c
49197 --- linux-3.0.3/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49198 +++ linux-3.0.3/grsecurity/grsec_sysctl.c 2011-08-23 21:48:14.000000000 -0400
49199 @@ -0,0 +1,442 @@
49200 +#include <linux/kernel.h>
49201 +#include <linux/sched.h>
49202 +#include <linux/sysctl.h>
49203 +#include <linux/grsecurity.h>
49204 +#include <linux/grinternal.h>
49205 +
49206 +int
49207 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49208 +{
49209 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49210 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49211 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49212 + return -EACCES;
49213 + }
49214 +#endif
49215 + return 0;
49216 +}
49217 +
49218 +#ifdef CONFIG_GRKERNSEC_ROFS
49219 +static int __maybe_unused one = 1;
49220 +#endif
49221 +
49222 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49223 +struct ctl_table grsecurity_table[] = {
49224 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49225 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49226 +#ifdef CONFIG_GRKERNSEC_IO
49227 + {
49228 + .procname = "disable_priv_io",
49229 + .data = &grsec_disable_privio,
49230 + .maxlen = sizeof(int),
49231 + .mode = 0600,
49232 + .proc_handler = &proc_dointvec,
49233 + },
49234 +#endif
49235 +#endif
49236 +#ifdef CONFIG_GRKERNSEC_LINK
49237 + {
49238 + .procname = "linking_restrictions",
49239 + .data = &grsec_enable_link,
49240 + .maxlen = sizeof(int),
49241 + .mode = 0600,
49242 + .proc_handler = &proc_dointvec,
49243 + },
49244 +#endif
49245 +#ifdef CONFIG_GRKERNSEC_BRUTE
49246 + {
49247 + .procname = "deter_bruteforce",
49248 + .data = &grsec_enable_brute,
49249 + .maxlen = sizeof(int),
49250 + .mode = 0600,
49251 + .proc_handler = &proc_dointvec,
49252 + },
49253 +#endif
49254 +#ifdef CONFIG_GRKERNSEC_FIFO
49255 + {
49256 + .procname = "fifo_restrictions",
49257 + .data = &grsec_enable_fifo,
49258 + .maxlen = sizeof(int),
49259 + .mode = 0600,
49260 + .proc_handler = &proc_dointvec,
49261 + },
49262 +#endif
49263 +#ifdef CONFIG_GRKERNSEC_EXECVE
49264 + {
49265 + .procname = "execve_limiting",
49266 + .data = &grsec_enable_execve,
49267 + .maxlen = sizeof(int),
49268 + .mode = 0600,
49269 + .proc_handler = &proc_dointvec,
49270 + },
49271 +#endif
49272 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49273 + {
49274 + .procname = "ip_blackhole",
49275 + .data = &grsec_enable_blackhole,
49276 + .maxlen = sizeof(int),
49277 + .mode = 0600,
49278 + .proc_handler = &proc_dointvec,
49279 + },
49280 + {
49281 + .procname = "lastack_retries",
49282 + .data = &grsec_lastack_retries,
49283 + .maxlen = sizeof(int),
49284 + .mode = 0600,
49285 + .proc_handler = &proc_dointvec,
49286 + },
49287 +#endif
49288 +#ifdef CONFIG_GRKERNSEC_EXECLOG
49289 + {
49290 + .procname = "exec_logging",
49291 + .data = &grsec_enable_execlog,
49292 + .maxlen = sizeof(int),
49293 + .mode = 0600,
49294 + .proc_handler = &proc_dointvec,
49295 + },
49296 +#endif
49297 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49298 + {
49299 + .procname = "rwxmap_logging",
49300 + .data = &grsec_enable_log_rwxmaps,
49301 + .maxlen = sizeof(int),
49302 + .mode = 0600,
49303 + .proc_handler = &proc_dointvec,
49304 + },
49305 +#endif
49306 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49307 + {
49308 + .procname = "signal_logging",
49309 + .data = &grsec_enable_signal,
49310 + .maxlen = sizeof(int),
49311 + .mode = 0600,
49312 + .proc_handler = &proc_dointvec,
49313 + },
49314 +#endif
49315 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
49316 + {
49317 + .procname = "forkfail_logging",
49318 + .data = &grsec_enable_forkfail,
49319 + .maxlen = sizeof(int),
49320 + .mode = 0600,
49321 + .proc_handler = &proc_dointvec,
49322 + },
49323 +#endif
49324 +#ifdef CONFIG_GRKERNSEC_TIME
49325 + {
49326 + .procname = "timechange_logging",
49327 + .data = &grsec_enable_time,
49328 + .maxlen = sizeof(int),
49329 + .mode = 0600,
49330 + .proc_handler = &proc_dointvec,
49331 + },
49332 +#endif
49333 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49334 + {
49335 + .procname = "chroot_deny_shmat",
49336 + .data = &grsec_enable_chroot_shmat,
49337 + .maxlen = sizeof(int),
49338 + .mode = 0600,
49339 + .proc_handler = &proc_dointvec,
49340 + },
49341 +#endif
49342 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49343 + {
49344 + .procname = "chroot_deny_unix",
49345 + .data = &grsec_enable_chroot_unix,
49346 + .maxlen = sizeof(int),
49347 + .mode = 0600,
49348 + .proc_handler = &proc_dointvec,
49349 + },
49350 +#endif
49351 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49352 + {
49353 + .procname = "chroot_deny_mount",
49354 + .data = &grsec_enable_chroot_mount,
49355 + .maxlen = sizeof(int),
49356 + .mode = 0600,
49357 + .proc_handler = &proc_dointvec,
49358 + },
49359 +#endif
49360 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49361 + {
49362 + .procname = "chroot_deny_fchdir",
49363 + .data = &grsec_enable_chroot_fchdir,
49364 + .maxlen = sizeof(int),
49365 + .mode = 0600,
49366 + .proc_handler = &proc_dointvec,
49367 + },
49368 +#endif
49369 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49370 + {
49371 + .procname = "chroot_deny_chroot",
49372 + .data = &grsec_enable_chroot_double,
49373 + .maxlen = sizeof(int),
49374 + .mode = 0600,
49375 + .proc_handler = &proc_dointvec,
49376 + },
49377 +#endif
49378 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49379 + {
49380 + .procname = "chroot_deny_pivot",
49381 + .data = &grsec_enable_chroot_pivot,
49382 + .maxlen = sizeof(int),
49383 + .mode = 0600,
49384 + .proc_handler = &proc_dointvec,
49385 + },
49386 +#endif
49387 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49388 + {
49389 + .procname = "chroot_enforce_chdir",
49390 + .data = &grsec_enable_chroot_chdir,
49391 + .maxlen = sizeof(int),
49392 + .mode = 0600,
49393 + .proc_handler = &proc_dointvec,
49394 + },
49395 +#endif
49396 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49397 + {
49398 + .procname = "chroot_deny_chmod",
49399 + .data = &grsec_enable_chroot_chmod,
49400 + .maxlen = sizeof(int),
49401 + .mode = 0600,
49402 + .proc_handler = &proc_dointvec,
49403 + },
49404 +#endif
49405 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49406 + {
49407 + .procname = "chroot_deny_mknod",
49408 + .data = &grsec_enable_chroot_mknod,
49409 + .maxlen = sizeof(int),
49410 + .mode = 0600,
49411 + .proc_handler = &proc_dointvec,
49412 + },
49413 +#endif
49414 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49415 + {
49416 + .procname = "chroot_restrict_nice",
49417 + .data = &grsec_enable_chroot_nice,
49418 + .maxlen = sizeof(int),
49419 + .mode = 0600,
49420 + .proc_handler = &proc_dointvec,
49421 + },
49422 +#endif
49423 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49424 + {
49425 + .procname = "chroot_execlog",
49426 + .data = &grsec_enable_chroot_execlog,
49427 + .maxlen = sizeof(int),
49428 + .mode = 0600,
49429 + .proc_handler = &proc_dointvec,
49430 + },
49431 +#endif
49432 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49433 + {
49434 + .procname = "chroot_caps",
49435 + .data = &grsec_enable_chroot_caps,
49436 + .maxlen = sizeof(int),
49437 + .mode = 0600,
49438 + .proc_handler = &proc_dointvec,
49439 + },
49440 +#endif
49441 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49442 + {
49443 + .procname = "chroot_deny_sysctl",
49444 + .data = &grsec_enable_chroot_sysctl,
49445 + .maxlen = sizeof(int),
49446 + .mode = 0600,
49447 + .proc_handler = &proc_dointvec,
49448 + },
49449 +#endif
49450 +#ifdef CONFIG_GRKERNSEC_TPE
49451 + {
49452 + .procname = "tpe",
49453 + .data = &grsec_enable_tpe,
49454 + .maxlen = sizeof(int),
49455 + .mode = 0600,
49456 + .proc_handler = &proc_dointvec,
49457 + },
49458 + {
49459 + .procname = "tpe_gid",
49460 + .data = &grsec_tpe_gid,
49461 + .maxlen = sizeof(int),
49462 + .mode = 0600,
49463 + .proc_handler = &proc_dointvec,
49464 + },
49465 +#endif
49466 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49467 + {
49468 + .procname = "tpe_invert",
49469 + .data = &grsec_enable_tpe_invert,
49470 + .maxlen = sizeof(int),
49471 + .mode = 0600,
49472 + .proc_handler = &proc_dointvec,
49473 + },
49474 +#endif
49475 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49476 + {
49477 + .procname = "tpe_restrict_all",
49478 + .data = &grsec_enable_tpe_all,
49479 + .maxlen = sizeof(int),
49480 + .mode = 0600,
49481 + .proc_handler = &proc_dointvec,
49482 + },
49483 +#endif
49484 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49485 + {
49486 + .procname = "socket_all",
49487 + .data = &grsec_enable_socket_all,
49488 + .maxlen = sizeof(int),
49489 + .mode = 0600,
49490 + .proc_handler = &proc_dointvec,
49491 + },
49492 + {
49493 + .procname = "socket_all_gid",
49494 + .data = &grsec_socket_all_gid,
49495 + .maxlen = sizeof(int),
49496 + .mode = 0600,
49497 + .proc_handler = &proc_dointvec,
49498 + },
49499 +#endif
49500 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49501 + {
49502 + .procname = "socket_client",
49503 + .data = &grsec_enable_socket_client,
49504 + .maxlen = sizeof(int),
49505 + .mode = 0600,
49506 + .proc_handler = &proc_dointvec,
49507 + },
49508 + {
49509 + .procname = "socket_client_gid",
49510 + .data = &grsec_socket_client_gid,
49511 + .maxlen = sizeof(int),
49512 + .mode = 0600,
49513 + .proc_handler = &proc_dointvec,
49514 + },
49515 +#endif
49516 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49517 + {
49518 + .procname = "socket_server",
49519 + .data = &grsec_enable_socket_server,
49520 + .maxlen = sizeof(int),
49521 + .mode = 0600,
49522 + .proc_handler = &proc_dointvec,
49523 + },
49524 + {
49525 + .procname = "socket_server_gid",
49526 + .data = &grsec_socket_server_gid,
49527 + .maxlen = sizeof(int),
49528 + .mode = 0600,
49529 + .proc_handler = &proc_dointvec,
49530 + },
49531 +#endif
49532 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49533 + {
49534 + .procname = "audit_group",
49535 + .data = &grsec_enable_group,
49536 + .maxlen = sizeof(int),
49537 + .mode = 0600,
49538 + .proc_handler = &proc_dointvec,
49539 + },
49540 + {
49541 + .procname = "audit_gid",
49542 + .data = &grsec_audit_gid,
49543 + .maxlen = sizeof(int),
49544 + .mode = 0600,
49545 + .proc_handler = &proc_dointvec,
49546 + },
49547 +#endif
49548 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49549 + {
49550 + .procname = "audit_chdir",
49551 + .data = &grsec_enable_chdir,
49552 + .maxlen = sizeof(int),
49553 + .mode = 0600,
49554 + .proc_handler = &proc_dointvec,
49555 + },
49556 +#endif
49557 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49558 + {
49559 + .procname = "audit_mount",
49560 + .data = &grsec_enable_mount,
49561 + .maxlen = sizeof(int),
49562 + .mode = 0600,
49563 + .proc_handler = &proc_dointvec,
49564 + },
49565 +#endif
49566 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49567 + {
49568 + .procname = "audit_textrel",
49569 + .data = &grsec_enable_audit_textrel,
49570 + .maxlen = sizeof(int),
49571 + .mode = 0600,
49572 + .proc_handler = &proc_dointvec,
49573 + },
49574 +#endif
49575 +#ifdef CONFIG_GRKERNSEC_DMESG
49576 + {
49577 + .procname = "dmesg",
49578 + .data = &grsec_enable_dmesg,
49579 + .maxlen = sizeof(int),
49580 + .mode = 0600,
49581 + .proc_handler = &proc_dointvec,
49582 + },
49583 +#endif
49584 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49585 + {
49586 + .procname = "chroot_findtask",
49587 + .data = &grsec_enable_chroot_findtask,
49588 + .maxlen = sizeof(int),
49589 + .mode = 0600,
49590 + .proc_handler = &proc_dointvec,
49591 + },
49592 +#endif
49593 +#ifdef CONFIG_GRKERNSEC_RESLOG
49594 + {
49595 + .procname = "resource_logging",
49596 + .data = &grsec_resource_logging,
49597 + .maxlen = sizeof(int),
49598 + .mode = 0600,
49599 + .proc_handler = &proc_dointvec,
49600 + },
49601 +#endif
49602 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49603 + {
49604 + .procname = "audit_ptrace",
49605 + .data = &grsec_enable_audit_ptrace,
49606 + .maxlen = sizeof(int),
49607 + .mode = 0600,
49608 + .proc_handler = &proc_dointvec,
49609 + },
49610 +#endif
49611 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49612 + {
49613 + .procname = "harden_ptrace",
49614 + .data = &grsec_enable_harden_ptrace,
49615 + .maxlen = sizeof(int),
49616 + .mode = 0600,
49617 + .proc_handler = &proc_dointvec,
49618 + },
49619 +#endif
49620 + {
49621 + .procname = "grsec_lock",
49622 + .data = &grsec_lock,
49623 + .maxlen = sizeof(int),
49624 + .mode = 0600,
49625 + .proc_handler = &proc_dointvec,
49626 + },
49627 +#endif
49628 +#ifdef CONFIG_GRKERNSEC_ROFS
49629 + {
49630 + .procname = "romount_protect",
49631 + .data = &grsec_enable_rofs,
49632 + .maxlen = sizeof(int),
49633 + .mode = 0600,
49634 + .proc_handler = &proc_dointvec_minmax,
49635 + .extra1 = &one,
49636 + .extra2 = &one,
49637 + },
49638 +#endif
49639 + { }
49640 +};
49641 +#endif
49642 diff -urNp linux-3.0.3/grsecurity/grsec_time.c linux-3.0.3/grsecurity/grsec_time.c
49643 --- linux-3.0.3/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49644 +++ linux-3.0.3/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
49645 @@ -0,0 +1,16 @@
49646 +#include <linux/kernel.h>
49647 +#include <linux/sched.h>
49648 +#include <linux/grinternal.h>
49649 +#include <linux/module.h>
49650 +
49651 +void
49652 +gr_log_timechange(void)
49653 +{
49654 +#ifdef CONFIG_GRKERNSEC_TIME
49655 + if (grsec_enable_time)
49656 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49657 +#endif
49658 + return;
49659 +}
49660 +
49661 +EXPORT_SYMBOL(gr_log_timechange);
49662 diff -urNp linux-3.0.3/grsecurity/grsec_tpe.c linux-3.0.3/grsecurity/grsec_tpe.c
49663 --- linux-3.0.3/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49664 +++ linux-3.0.3/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
49665 @@ -0,0 +1,39 @@
49666 +#include <linux/kernel.h>
49667 +#include <linux/sched.h>
49668 +#include <linux/file.h>
49669 +#include <linux/fs.h>
49670 +#include <linux/grinternal.h>
49671 +
49672 +extern int gr_acl_tpe_check(void);
49673 +
49674 +int
49675 +gr_tpe_allow(const struct file *file)
49676 +{
49677 +#ifdef CONFIG_GRKERNSEC
49678 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49679 + const struct cred *cred = current_cred();
49680 +
49681 + if (cred->uid && ((grsec_enable_tpe &&
49682 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49683 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49684 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49685 +#else
49686 + in_group_p(grsec_tpe_gid)
49687 +#endif
49688 + ) || gr_acl_tpe_check()) &&
49689 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49690 + (inode->i_mode & S_IWOTH))))) {
49691 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49692 + return 0;
49693 + }
49694 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49695 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49696 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49697 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49698 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49699 + return 0;
49700 + }
49701 +#endif
49702 +#endif
49703 + return 1;
49704 +}
49705 diff -urNp linux-3.0.3/grsecurity/grsum.c linux-3.0.3/grsecurity/grsum.c
49706 --- linux-3.0.3/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
49707 +++ linux-3.0.3/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
49708 @@ -0,0 +1,61 @@
49709 +#include <linux/err.h>
49710 +#include <linux/kernel.h>
49711 +#include <linux/sched.h>
49712 +#include <linux/mm.h>
49713 +#include <linux/scatterlist.h>
49714 +#include <linux/crypto.h>
49715 +#include <linux/gracl.h>
49716 +
49717 +
49718 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
49719 +#error "crypto and sha256 must be built into the kernel"
49720 +#endif
49721 +
49722 +int
49723 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
49724 +{
49725 + char *p;
49726 + struct crypto_hash *tfm;
49727 + struct hash_desc desc;
49728 + struct scatterlist sg;
49729 + unsigned char temp_sum[GR_SHA_LEN];
49730 + volatile int retval = 0;
49731 + volatile int dummy = 0;
49732 + unsigned int i;
49733 +
49734 + sg_init_table(&sg, 1);
49735 +
49736 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
49737 + if (IS_ERR(tfm)) {
49738 + /* should never happen, since sha256 should be built in */
49739 + return 1;
49740 + }
49741 +
49742 + desc.tfm = tfm;
49743 + desc.flags = 0;
49744 +
49745 + crypto_hash_init(&desc);
49746 +
49747 + p = salt;
49748 + sg_set_buf(&sg, p, GR_SALT_LEN);
49749 + crypto_hash_update(&desc, &sg, sg.length);
49750 +
49751 + p = entry->pw;
49752 + sg_set_buf(&sg, p, strlen(p));
49753 +
49754 + crypto_hash_update(&desc, &sg, sg.length);
49755 +
49756 + crypto_hash_final(&desc, temp_sum);
49757 +
49758 + memset(entry->pw, 0, GR_PW_LEN);
49759 +
49760 + for (i = 0; i < GR_SHA_LEN; i++)
49761 + if (sum[i] != temp_sum[i])
49762 + retval = 1;
49763 + else
49764 + dummy = 1; // waste a cycle
49765 +
49766 + crypto_free_hash(tfm);
49767 +
49768 + return retval;
49769 +}
49770 diff -urNp linux-3.0.3/grsecurity/Kconfig linux-3.0.3/grsecurity/Kconfig
49771 --- linux-3.0.3/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
49772 +++ linux-3.0.3/grsecurity/Kconfig 2011-08-23 21:48:14.000000000 -0400
49773 @@ -0,0 +1,1050 @@
49774 +#
49775 +# grecurity configuration
49776 +#
49777 +
49778 +menu "Grsecurity"
49779 +
49780 +config GRKERNSEC
49781 + bool "Grsecurity"
49782 + select CRYPTO
49783 + select CRYPTO_SHA256
49784 + help
49785 + If you say Y here, you will be able to configure many features
49786 + that will enhance the security of your system. It is highly
49787 + recommended that you say Y here and read through the help
49788 + for each option so that you fully understand the features and
49789 + can evaluate their usefulness for your machine.
49790 +
49791 +choice
49792 + prompt "Security Level"
49793 + depends on GRKERNSEC
49794 + default GRKERNSEC_CUSTOM
49795 +
49796 +config GRKERNSEC_LOW
49797 + bool "Low"
49798 + select GRKERNSEC_LINK
49799 + select GRKERNSEC_FIFO
49800 + select GRKERNSEC_EXECVE
49801 + select GRKERNSEC_RANDNET
49802 + select GRKERNSEC_DMESG
49803 + select GRKERNSEC_CHROOT
49804 + select GRKERNSEC_CHROOT_CHDIR
49805 +
49806 + help
49807 + If you choose this option, several of the grsecurity options will
49808 + be enabled that will give you greater protection against a number
49809 + of attacks, while assuring that none of your software will have any
49810 + conflicts with the additional security measures. If you run a lot
49811 + of unusual software, or you are having problems with the higher
49812 + security levels, you should say Y here. With this option, the
49813 + following features are enabled:
49814 +
49815 + - Linking restrictions
49816 + - FIFO restrictions
49817 + - Enforcing RLIMIT_NPROC on execve
49818 + - Restricted dmesg
49819 + - Enforced chdir("/") on chroot
49820 + - Runtime module disabling
49821 +
49822 +config GRKERNSEC_MEDIUM
49823 + bool "Medium"
49824 + select PAX
49825 + select PAX_EI_PAX
49826 + select PAX_PT_PAX_FLAGS
49827 + select PAX_HAVE_ACL_FLAGS
49828 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49829 + select GRKERNSEC_CHROOT
49830 + select GRKERNSEC_CHROOT_SYSCTL
49831 + select GRKERNSEC_LINK
49832 + select GRKERNSEC_FIFO
49833 + select GRKERNSEC_EXECVE
49834 + select GRKERNSEC_DMESG
49835 + select GRKERNSEC_RANDNET
49836 + select GRKERNSEC_FORKFAIL
49837 + select GRKERNSEC_TIME
49838 + select GRKERNSEC_SIGNAL
49839 + select GRKERNSEC_CHROOT
49840 + select GRKERNSEC_CHROOT_UNIX
49841 + select GRKERNSEC_CHROOT_MOUNT
49842 + select GRKERNSEC_CHROOT_PIVOT
49843 + select GRKERNSEC_CHROOT_DOUBLE
49844 + select GRKERNSEC_CHROOT_CHDIR
49845 + select GRKERNSEC_CHROOT_MKNOD
49846 + select GRKERNSEC_PROC
49847 + select GRKERNSEC_PROC_USERGROUP
49848 + select PAX_RANDUSTACK
49849 + select PAX_ASLR
49850 + select PAX_RANDMMAP
49851 + select PAX_REFCOUNT if (X86 || SPARC64)
49852 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49853 +
49854 + help
49855 + If you say Y here, several features in addition to those included
49856 + in the low additional security level will be enabled. These
49857 + features provide even more security to your system, though in rare
49858 + cases they may be incompatible with very old or poorly written
49859 + software. If you enable this option, make sure that your auth
49860 + service (identd) is running as gid 1001. With this option,
49861 + the following features (in addition to those provided in the
49862 + low additional security level) will be enabled:
49863 +
49864 + - Failed fork logging
49865 + - Time change logging
49866 + - Signal logging
49867 + - Deny mounts in chroot
49868 + - Deny double chrooting
49869 + - Deny sysctl writes in chroot
49870 + - Deny mknod in chroot
49871 + - Deny access to abstract AF_UNIX sockets out of chroot
49872 + - Deny pivot_root in chroot
49873 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
49874 + - /proc restrictions with special GID set to 10 (usually wheel)
49875 + - Address Space Layout Randomization (ASLR)
49876 + - Prevent exploitation of most refcount overflows
49877 + - Bounds checking of copying between the kernel and userland
49878 +
49879 +config GRKERNSEC_HIGH
49880 + bool "High"
49881 + select GRKERNSEC_LINK
49882 + select GRKERNSEC_FIFO
49883 + select GRKERNSEC_EXECVE
49884 + select GRKERNSEC_DMESG
49885 + select GRKERNSEC_FORKFAIL
49886 + select GRKERNSEC_TIME
49887 + select GRKERNSEC_SIGNAL
49888 + select GRKERNSEC_CHROOT
49889 + select GRKERNSEC_CHROOT_SHMAT
49890 + select GRKERNSEC_CHROOT_UNIX
49891 + select GRKERNSEC_CHROOT_MOUNT
49892 + select GRKERNSEC_CHROOT_FCHDIR
49893 + select GRKERNSEC_CHROOT_PIVOT
49894 + select GRKERNSEC_CHROOT_DOUBLE
49895 + select GRKERNSEC_CHROOT_CHDIR
49896 + select GRKERNSEC_CHROOT_MKNOD
49897 + select GRKERNSEC_CHROOT_CAPS
49898 + select GRKERNSEC_CHROOT_SYSCTL
49899 + select GRKERNSEC_CHROOT_FINDTASK
49900 + select GRKERNSEC_SYSFS_RESTRICT
49901 + select GRKERNSEC_PROC
49902 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49903 + select GRKERNSEC_HIDESYM
49904 + select GRKERNSEC_BRUTE
49905 + select GRKERNSEC_PROC_USERGROUP
49906 + select GRKERNSEC_KMEM
49907 + select GRKERNSEC_RESLOG
49908 + select GRKERNSEC_RANDNET
49909 + select GRKERNSEC_PROC_ADD
49910 + select GRKERNSEC_CHROOT_CHMOD
49911 + select GRKERNSEC_CHROOT_NICE
49912 + select GRKERNSEC_AUDIT_MOUNT
49913 + select GRKERNSEC_MODHARDEN if (MODULES)
49914 + select GRKERNSEC_HARDEN_PTRACE
49915 + select GRKERNSEC_VM86 if (X86_32)
49916 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49917 + select PAX
49918 + select PAX_RANDUSTACK
49919 + select PAX_ASLR
49920 + select PAX_RANDMMAP
49921 + select PAX_NOEXEC
49922 + select PAX_MPROTECT
49923 + select PAX_EI_PAX
49924 + select PAX_PT_PAX_FLAGS
49925 + select PAX_HAVE_ACL_FLAGS
49926 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49927 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
49928 + select PAX_RANDKSTACK if (X86_TSC && X86)
49929 + select PAX_SEGMEXEC if (X86_32)
49930 + select PAX_PAGEEXEC
49931 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49932 + select PAX_EMUTRAMP if (PARISC)
49933 + select PAX_EMUSIGRT if (PARISC)
49934 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49935 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49936 + select PAX_REFCOUNT if (X86 || SPARC64)
49937 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49938 + help
49939 + If you say Y here, many of the features of grsecurity will be
49940 + enabled, which will protect you against many kinds of attacks
49941 + against your system. The heightened security comes at a cost
49942 + of an increased chance of incompatibilities with rare software
49943 + on your machine. Since this security level enables PaX, you should
49944 + view <http://pax.grsecurity.net> and read about the PaX
49945 + project. While you are there, download chpax and run it on
49946 + binaries that cause problems with PaX. Also remember that
49947 + since the /proc restrictions are enabled, you must run your
49948 + identd as gid 1001. This security level enables the following
49949 + features in addition to those listed in the low and medium
49950 + security levels:
49951 +
49952 + - Additional /proc restrictions
49953 + - Chmod restrictions in chroot
49954 + - No signals, ptrace, or viewing of processes outside of chroot
49955 + - Capability restrictions in chroot
49956 + - Deny fchdir out of chroot
49957 + - Priority restrictions in chroot
49958 + - Segmentation-based implementation of PaX
49959 + - Mprotect restrictions
49960 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49961 + - Kernel stack randomization
49962 + - Mount/unmount/remount logging
49963 + - Kernel symbol hiding
49964 + - Prevention of memory exhaustion-based exploits
49965 + - Hardening of module auto-loading
49966 + - Ptrace restrictions
49967 + - Restricted vm86 mode
49968 + - Restricted sysfs/debugfs
49969 + - Active kernel exploit response
49970 +
49971 +config GRKERNSEC_CUSTOM
49972 + bool "Custom"
49973 + help
49974 + If you say Y here, you will be able to configure every grsecurity
49975 + option, which allows you to enable many more features that aren't
49976 + covered in the basic security levels. These additional features
49977 + include TPE, socket restrictions, and the sysctl system for
49978 + grsecurity. It is advised that you read through the help for
49979 + each option to determine its usefulness in your situation.
49980 +
49981 +endchoice
49982 +
49983 +menu "Address Space Protection"
49984 +depends on GRKERNSEC
49985 +
49986 +config GRKERNSEC_KMEM
49987 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
49988 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49989 + help
49990 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49991 + be written to via mmap or otherwise to modify the running kernel.
49992 + /dev/port will also not be allowed to be opened. If you have module
49993 + support disabled, enabling this will close up four ways that are
49994 + currently used to insert malicious code into the running kernel.
49995 + Even with all these features enabled, we still highly recommend that
49996 + you use the RBAC system, as it is still possible for an attacker to
49997 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49998 + If you are not using XFree86, you may be able to stop this additional
49999 + case by enabling the 'Disable privileged I/O' option. Though nothing
50000 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50001 + but only to video memory, which is the only writing we allow in this
50002 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50003 + not be allowed to mprotect it with PROT_WRITE later.
50004 + It is highly recommended that you say Y here if you meet all the
50005 + conditions above.
50006 +
50007 +config GRKERNSEC_VM86
50008 + bool "Restrict VM86 mode"
50009 + depends on X86_32
50010 +
50011 + help
50012 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50013 + make use of a special execution mode on 32bit x86 processors called
50014 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50015 + video cards and will still work with this option enabled. The purpose
50016 + of the option is to prevent exploitation of emulation errors in
50017 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50018 + Nearly all users should be able to enable this option.
50019 +
50020 +config GRKERNSEC_IO
50021 + bool "Disable privileged I/O"
50022 + depends on X86
50023 + select RTC_CLASS
50024 + select RTC_INTF_DEV
50025 + select RTC_DRV_CMOS
50026 +
50027 + help
50028 + If you say Y here, all ioperm and iopl calls will return an error.
50029 + Ioperm and iopl can be used to modify the running kernel.
50030 + Unfortunately, some programs need this access to operate properly,
50031 + the most notable of which are XFree86 and hwclock. hwclock can be
50032 + remedied by having RTC support in the kernel, so real-time
50033 + clock support is enabled if this option is enabled, to ensure
50034 + that hwclock operates correctly. XFree86 still will not
50035 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50036 + IF YOU USE XFree86. If you use XFree86 and you still want to
50037 + protect your kernel against modification, use the RBAC system.
50038 +
50039 +config GRKERNSEC_PROC_MEMMAP
50040 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50041 + default y if (PAX_NOEXEC || PAX_ASLR)
50042 + depends on PAX_NOEXEC || PAX_ASLR
50043 + help
50044 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50045 + give no information about the addresses of its mappings if
50046 + PaX features that rely on random addresses are enabled on the task.
50047 + If you use PaX it is greatly recommended that you say Y here as it
50048 + closes up a hole that makes the full ASLR useless for suid
50049 + binaries.
50050 +
50051 +config GRKERNSEC_BRUTE
50052 + bool "Deter exploit bruteforcing"
50053 + help
50054 + If you say Y here, attempts to bruteforce exploits against forking
50055 + daemons such as apache or sshd, as well as against suid/sgid binaries
50056 + will be deterred. When a child of a forking daemon is killed by PaX
50057 + or crashes due to an illegal instruction or other suspicious signal,
50058 + the parent process will be delayed 30 seconds upon every subsequent
50059 + fork until the administrator is able to assess the situation and
50060 + restart the daemon.
50061 + In the suid/sgid case, the attempt is logged, the user has all their
50062 + processes terminated, and they are prevented from executing any further
50063 + processes for 15 minutes.
50064 + It is recommended that you also enable signal logging in the auditing
50065 + section so that logs are generated when a process triggers a suspicious
50066 + signal.
50067 + If the sysctl option is enabled, a sysctl option with name
50068 + "deter_bruteforce" is created.
50069 +
50070 +
50071 +config GRKERNSEC_MODHARDEN
50072 + bool "Harden module auto-loading"
50073 + depends on MODULES
50074 + help
50075 + If you say Y here, module auto-loading in response to use of some
50076 + feature implemented by an unloaded module will be restricted to
50077 + root users. Enabling this option helps defend against attacks
50078 + by unprivileged users who abuse the auto-loading behavior to
50079 + cause a vulnerable module to load that is then exploited.
50080 +
50081 + If this option prevents a legitimate use of auto-loading for a
50082 + non-root user, the administrator can execute modprobe manually
50083 + with the exact name of the module mentioned in the alert log.
50084 + Alternatively, the administrator can add the module to the list
50085 + of modules loaded at boot by modifying init scripts.
50086 +
50087 + Modification of init scripts will most likely be needed on
50088 + Ubuntu servers with encrypted home directory support enabled,
50089 + as the first non-root user logging in will cause the ecb(aes),
50090 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50091 +
50092 +config GRKERNSEC_HIDESYM
50093 + bool "Hide kernel symbols"
50094 + help
50095 + If you say Y here, getting information on loaded modules, and
50096 + displaying all kernel symbols through a syscall will be restricted
50097 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50098 + /proc/kallsyms will be restricted to the root user. The RBAC
50099 + system can hide that entry even from root.
50100 +
50101 + This option also prevents leaking of kernel addresses through
50102 + several /proc entries.
50103 +
50104 + Note that this option is only effective provided the following
50105 + conditions are met:
50106 + 1) The kernel using grsecurity is not precompiled by some distribution
50107 + 2) You have also enabled GRKERNSEC_DMESG
50108 + 3) You are using the RBAC system and hiding other files such as your
50109 + kernel image and System.map. Alternatively, enabling this option
50110 + causes the permissions on /boot, /lib/modules, and the kernel
50111 + source directory to change at compile time to prevent
50112 + reading by non-root users.
50113 + If the above conditions are met, this option will aid in providing a
50114 + useful protection against local kernel exploitation of overflows
50115 + and arbitrary read/write vulnerabilities.
50116 +
50117 +config GRKERNSEC_KERN_LOCKOUT
50118 + bool "Active kernel exploit response"
50119 + depends on X86 || ARM || PPC || SPARC
50120 + help
50121 + If you say Y here, when a PaX alert is triggered due to suspicious
50122 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50123 + or an OOPs occurs due to bad memory accesses, instead of just
50124 + terminating the offending process (and potentially allowing
50125 + a subsequent exploit from the same user), we will take one of two
50126 + actions:
50127 + If the user was root, we will panic the system
50128 + If the user was non-root, we will log the attempt, terminate
50129 + all processes owned by the user, then prevent them from creating
50130 + any new processes until the system is restarted
50131 + This deters repeated kernel exploitation/bruteforcing attempts
50132 + and is useful for later forensics.
50133 +
50134 +endmenu
50135 +menu "Role Based Access Control Options"
50136 +depends on GRKERNSEC
50137 +
50138 +config GRKERNSEC_RBAC_DEBUG
50139 + bool
50140 +
50141 +config GRKERNSEC_NO_RBAC
50142 + bool "Disable RBAC system"
50143 + help
50144 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50145 + preventing the RBAC system from being enabled. You should only say Y
50146 + here if you have no intention of using the RBAC system, so as to prevent
50147 + an attacker with root access from misusing the RBAC system to hide files
50148 + and processes when loadable module support and /dev/[k]mem have been
50149 + locked down.
50150 +
50151 +config GRKERNSEC_ACL_HIDEKERN
50152 + bool "Hide kernel processes"
50153 + help
50154 + If you say Y here, all kernel threads will be hidden to all
50155 + processes but those whose subject has the "view hidden processes"
50156 + flag.
50157 +
50158 +config GRKERNSEC_ACL_MAXTRIES
50159 + int "Maximum tries before password lockout"
50160 + default 3
50161 + help
50162 + This option enforces the maximum number of times a user can attempt
50163 + to authorize themselves with the grsecurity RBAC system before being
50164 + denied the ability to attempt authorization again for a specified time.
50165 + The lower the number, the harder it will be to brute-force a password.
50166 +
50167 +config GRKERNSEC_ACL_TIMEOUT
50168 + int "Time to wait after max password tries, in seconds"
50169 + default 30
50170 + help
50171 + This option specifies the time the user must wait after attempting to
50172 + authorize to the RBAC system with the maximum number of invalid
50173 + passwords. The higher the number, the harder it will be to brute-force
50174 + a password.
50175 +
50176 +endmenu
50177 +menu "Filesystem Protections"
50178 +depends on GRKERNSEC
50179 +
50180 +config GRKERNSEC_PROC
50181 + bool "Proc restrictions"
50182 + help
50183 + If you say Y here, the permissions of the /proc filesystem
50184 + will be altered to enhance system security and privacy. You MUST
50185 + choose either a user only restriction or a user and group restriction.
50186 + Depending upon the option you choose, you can either restrict users to
50187 + see only the processes they themselves run, or choose a group that can
50188 + view all processes and files normally restricted to root if you choose
50189 + the "restrict to user only" option. NOTE: If you're running identd as
50190 + a non-root user, you will have to run it as the group you specify here.
50191 +
50192 +config GRKERNSEC_PROC_USER
50193 + bool "Restrict /proc to user only"
50194 + depends on GRKERNSEC_PROC
50195 + help
50196 + If you say Y here, non-root users will only be able to view their own
50197 + processes, and restricts them from viewing network-related information,
50198 + and viewing kernel symbol and module information.
50199 +
50200 +config GRKERNSEC_PROC_USERGROUP
50201 + bool "Allow special group"
50202 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50203 + help
50204 + If you say Y here, you will be able to select a group that will be
50205 + able to view all processes and network-related information. If you've
50206 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50207 + remain hidden. This option is useful if you want to run identd as
50208 + a non-root user.
50209 +
50210 +config GRKERNSEC_PROC_GID
50211 + int "GID for special group"
50212 + depends on GRKERNSEC_PROC_USERGROUP
50213 + default 1001
50214 +
50215 +config GRKERNSEC_PROC_ADD
50216 + bool "Additional restrictions"
50217 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50218 + help
50219 + If you say Y here, additional restrictions will be placed on
50220 + /proc that keep normal users from viewing device information and
50221 + slabinfo information that could be useful for exploits.
50222 +
50223 +config GRKERNSEC_LINK
50224 + bool "Linking restrictions"
50225 + help
50226 + If you say Y here, /tmp race exploits will be prevented, since users
50227 + will no longer be able to follow symlinks owned by other users in
50228 + world-writable +t directories (e.g. /tmp), unless the owner of the
50229 + symlink is the owner of the directory. users will also not be
50230 + able to hardlink to files they do not own. If the sysctl option is
50231 + enabled, a sysctl option with name "linking_restrictions" is created.
50232 +
50233 +config GRKERNSEC_FIFO
50234 + bool "FIFO restrictions"
50235 + help
50236 + If you say Y here, users will not be able to write to FIFOs they don't
50237 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50238 + the FIFO is the same owner of the directory it's held in. If the sysctl
50239 + option is enabled, a sysctl option with name "fifo_restrictions" is
50240 + created.
50241 +
50242 +config GRKERNSEC_SYSFS_RESTRICT
50243 + bool "Sysfs/debugfs restriction"
50244 + depends on SYSFS
50245 + help
50246 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50247 + any filesystem normally mounted under it (e.g. debugfs) will only
50248 + be accessible by root. These filesystems generally provide access
50249 + to hardware and debug information that isn't appropriate for unprivileged
50250 + users of the system. Sysfs and debugfs have also become a large source
50251 + of new vulnerabilities, ranging from infoleaks to local compromise.
50252 + There has been very little oversight with an eye toward security involved
50253 + in adding new exporters of information to these filesystems, so their
50254 + use is discouraged.
50255 + This option is equivalent to a chmod 0700 of the mount paths.
50256 +
50257 +config GRKERNSEC_ROFS
50258 + bool "Runtime read-only mount protection"
50259 + help
50260 + If you say Y here, a sysctl option with name "romount_protect" will
50261 + be created. By setting this option to 1 at runtime, filesystems
50262 + will be protected in the following ways:
50263 + * No new writable mounts will be allowed
50264 + * Existing read-only mounts won't be able to be remounted read/write
50265 + * Write operations will be denied on all block devices
50266 + This option acts independently of grsec_lock: once it is set to 1,
50267 + it cannot be turned off. Therefore, please be mindful of the resulting
50268 + behavior if this option is enabled in an init script on a read-only
50269 + filesystem. This feature is mainly intended for secure embedded systems.
50270 +
50271 +config GRKERNSEC_CHROOT
50272 + bool "Chroot jail restrictions"
50273 + help
50274 + If you say Y here, you will be able to choose several options that will
50275 + make breaking out of a chrooted jail much more difficult. If you
50276 + encounter no software incompatibilities with the following options, it
50277 + is recommended that you enable each one.
50278 +
50279 +config GRKERNSEC_CHROOT_MOUNT
50280 + bool "Deny mounts"
50281 + depends on GRKERNSEC_CHROOT
50282 + help
50283 + If you say Y here, processes inside a chroot will not be able to
50284 + mount or remount filesystems. If the sysctl option is enabled, a
50285 + sysctl option with name "chroot_deny_mount" is created.
50286 +
50287 +config GRKERNSEC_CHROOT_DOUBLE
50288 + bool "Deny double-chroots"
50289 + depends on GRKERNSEC_CHROOT
50290 + help
50291 + If you say Y here, processes inside a chroot will not be able to chroot
50292 + again outside the chroot. This is a widely used method of breaking
50293 + out of a chroot jail and should not be allowed. If the sysctl
50294 + option is enabled, a sysctl option with name
50295 + "chroot_deny_chroot" is created.
50296 +
50297 +config GRKERNSEC_CHROOT_PIVOT
50298 + bool "Deny pivot_root in chroot"
50299 + depends on GRKERNSEC_CHROOT
50300 + help
50301 + If you say Y here, processes inside a chroot will not be able to use
50302 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50303 + works similar to chroot in that it changes the root filesystem. This
50304 + function could be misused in a chrooted process to attempt to break out
50305 + of the chroot, and therefore should not be allowed. If the sysctl
50306 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50307 + created.
50308 +
50309 +config GRKERNSEC_CHROOT_CHDIR
50310 + bool "Enforce chdir(\"/\") on all chroots"
50311 + depends on GRKERNSEC_CHROOT
50312 + help
50313 + If you say Y here, the current working directory of all newly-chrooted
50314 + applications will be set to the the root directory of the chroot.
50315 + The man page on chroot(2) states:
50316 + Note that this call does not change the current working
50317 + directory, so that `.' can be outside the tree rooted at
50318 + `/'. In particular, the super-user can escape from a
50319 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50320 +
50321 + It is recommended that you say Y here, since it's not known to break
50322 + any software. If the sysctl option is enabled, a sysctl option with
50323 + name "chroot_enforce_chdir" is created.
50324 +
50325 +config GRKERNSEC_CHROOT_CHMOD
50326 + bool "Deny (f)chmod +s"
50327 + depends on GRKERNSEC_CHROOT
50328 + help
50329 + If you say Y here, processes inside a chroot will not be able to chmod
50330 + or fchmod files to make them have suid or sgid bits. This protects
50331 + against another published method of breaking a chroot. If the sysctl
50332 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50333 + created.
50334 +
50335 +config GRKERNSEC_CHROOT_FCHDIR
50336 + bool "Deny fchdir out of chroot"
50337 + depends on GRKERNSEC_CHROOT
50338 + help
50339 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50340 + to a file descriptor of the chrooting process that points to a directory
50341 + outside the filesystem will be stopped. If the sysctl option
50342 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50343 +
50344 +config GRKERNSEC_CHROOT_MKNOD
50345 + bool "Deny mknod"
50346 + depends on GRKERNSEC_CHROOT
50347 + help
50348 + If you say Y here, processes inside a chroot will not be allowed to
50349 + mknod. The problem with using mknod inside a chroot is that it
50350 + would allow an attacker to create a device entry that is the same
50351 + as one on the physical root of your system, which could range from
50352 + anything from the console device to a device for your harddrive (which
50353 + they could then use to wipe the drive or steal data). It is recommended
50354 + that you say Y here, unless you run into software incompatibilities.
50355 + If the sysctl option is enabled, a sysctl option with name
50356 + "chroot_deny_mknod" is created.
50357 +
50358 +config GRKERNSEC_CHROOT_SHMAT
50359 + bool "Deny shmat() out of chroot"
50360 + depends on GRKERNSEC_CHROOT
50361 + help
50362 + If you say Y here, processes inside a chroot will not be able to attach
50363 + to shared memory segments that were created outside of the chroot jail.
50364 + It is recommended that you say Y here. If the sysctl option is enabled,
50365 + a sysctl option with name "chroot_deny_shmat" is created.
50366 +
50367 +config GRKERNSEC_CHROOT_UNIX
50368 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50369 + depends on GRKERNSEC_CHROOT
50370 + help
50371 + If you say Y here, processes inside a chroot will not be able to
50372 + connect to abstract (meaning not belonging to a filesystem) Unix
50373 + domain sockets that were bound outside of a chroot. It is recommended
50374 + that you say Y here. If the sysctl option is enabled, a sysctl option
50375 + with name "chroot_deny_unix" is created.
50376 +
50377 +config GRKERNSEC_CHROOT_FINDTASK
50378 + bool "Protect outside processes"
50379 + depends on GRKERNSEC_CHROOT
50380 + help
50381 + If you say Y here, processes inside a chroot will not be able to
50382 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50383 + getsid, or view any process outside of the chroot. If the sysctl
50384 + option is enabled, a sysctl option with name "chroot_findtask" is
50385 + created.
50386 +
50387 +config GRKERNSEC_CHROOT_NICE
50388 + bool "Restrict priority changes"
50389 + depends on GRKERNSEC_CHROOT
50390 + help
50391 + If you say Y here, processes inside a chroot will not be able to raise
50392 + the priority of processes in the chroot, or alter the priority of
50393 + processes outside the chroot. This provides more security than simply
50394 + removing CAP_SYS_NICE from the process' capability set. If the
50395 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50396 + is created.
50397 +
50398 +config GRKERNSEC_CHROOT_SYSCTL
50399 + bool "Deny sysctl writes"
50400 + depends on GRKERNSEC_CHROOT
50401 + help
50402 + If you say Y here, an attacker in a chroot will not be able to
50403 + write to sysctl entries, either by sysctl(2) or through a /proc
50404 + interface. It is strongly recommended that you say Y here. If the
50405 + sysctl option is enabled, a sysctl option with name
50406 + "chroot_deny_sysctl" is created.
50407 +
50408 +config GRKERNSEC_CHROOT_CAPS
50409 + bool "Capability restrictions"
50410 + depends on GRKERNSEC_CHROOT
50411 + help
50412 + If you say Y here, the capabilities on all root processes within a
50413 + chroot jail will be lowered to stop module insertion, raw i/o,
50414 + system and net admin tasks, rebooting the system, modifying immutable
50415 + files, modifying IPC owned by another, and changing the system time.
50416 + This is left an option because it can break some apps. Disable this
50417 + if your chrooted apps are having problems performing those kinds of
50418 + tasks. If the sysctl option is enabled, a sysctl option with
50419 + name "chroot_caps" is created.
50420 +
50421 +endmenu
50422 +menu "Kernel Auditing"
50423 +depends on GRKERNSEC
50424 +
50425 +config GRKERNSEC_AUDIT_GROUP
50426 + bool "Single group for auditing"
50427 + help
50428 + If you say Y here, the exec, chdir, and (un)mount logging features
50429 + will only operate on a group you specify. This option is recommended
50430 + if you only want to watch certain users instead of having a large
50431 + amount of logs from the entire system. If the sysctl option is enabled,
50432 + a sysctl option with name "audit_group" is created.
50433 +
50434 +config GRKERNSEC_AUDIT_GID
50435 + int "GID for auditing"
50436 + depends on GRKERNSEC_AUDIT_GROUP
50437 + default 1007
50438 +
50439 +config GRKERNSEC_EXECLOG
50440 + bool "Exec logging"
50441 + help
50442 + If you say Y here, all execve() calls will be logged (since the
50443 + other exec*() calls are frontends to execve(), all execution
50444 + will be logged). Useful for shell-servers that like to keep track
50445 + of their users. If the sysctl option is enabled, a sysctl option with
50446 + name "exec_logging" is created.
50447 + WARNING: This option when enabled will produce a LOT of logs, especially
50448 + on an active system.
50449 +
50450 +config GRKERNSEC_RESLOG
50451 + bool "Resource logging"
50452 + help
50453 + If you say Y here, all attempts to overstep resource limits will
50454 + be logged with the resource name, the requested size, and the current
50455 + limit. It is highly recommended that you say Y here. If the sysctl
50456 + option is enabled, a sysctl option with name "resource_logging" is
50457 + created. If the RBAC system is enabled, the sysctl value is ignored.
50458 +
50459 +config GRKERNSEC_CHROOT_EXECLOG
50460 + bool "Log execs within chroot"
50461 + help
50462 + If you say Y here, all executions inside a chroot jail will be logged
50463 + to syslog. This can cause a large amount of logs if certain
50464 + applications (eg. djb's daemontools) are installed on the system, and
50465 + is therefore left as an option. If the sysctl option is enabled, a
50466 + sysctl option with name "chroot_execlog" is created.
50467 +
50468 +config GRKERNSEC_AUDIT_PTRACE
50469 + bool "Ptrace logging"
50470 + help
50471 + If you say Y here, all attempts to attach to a process via ptrace
50472 + will be logged. If the sysctl option is enabled, a sysctl option
50473 + with name "audit_ptrace" is created.
50474 +
50475 +config GRKERNSEC_AUDIT_CHDIR
50476 + bool "Chdir logging"
50477 + help
50478 + If you say Y here, all chdir() calls will be logged. If the sysctl
50479 + option is enabled, a sysctl option with name "audit_chdir" is created.
50480 +
50481 +config GRKERNSEC_AUDIT_MOUNT
50482 + bool "(Un)Mount logging"
50483 + help
50484 + If you say Y here, all mounts and unmounts will be logged. If the
50485 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50486 + created.
50487 +
50488 +config GRKERNSEC_SIGNAL
50489 + bool "Signal logging"
50490 + help
50491 + If you say Y here, certain important signals will be logged, such as
50492 + SIGSEGV, which will as a result inform you of when a error in a program
50493 + occurred, which in some cases could mean a possible exploit attempt.
50494 + If the sysctl option is enabled, a sysctl option with name
50495 + "signal_logging" is created.
50496 +
50497 +config GRKERNSEC_FORKFAIL
50498 + bool "Fork failure logging"
50499 + help
50500 + If you say Y here, all failed fork() attempts will be logged.
50501 + This could suggest a fork bomb, or someone attempting to overstep
50502 + their process limit. If the sysctl option is enabled, a sysctl option
50503 + with name "forkfail_logging" is created.
50504 +
50505 +config GRKERNSEC_TIME
50506 + bool "Time change logging"
50507 + help
50508 + If you say Y here, any changes of the system clock will be logged.
50509 + If the sysctl option is enabled, a sysctl option with name
50510 + "timechange_logging" is created.
50511 +
50512 +config GRKERNSEC_PROC_IPADDR
50513 + bool "/proc/<pid>/ipaddr support"
50514 + help
50515 + If you say Y here, a new entry will be added to each /proc/<pid>
50516 + directory that contains the IP address of the person using the task.
50517 + The IP is carried across local TCP and AF_UNIX stream sockets.
50518 + This information can be useful for IDS/IPSes to perform remote response
50519 + to a local attack. The entry is readable by only the owner of the
50520 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50521 + the RBAC system), and thus does not create privacy concerns.
50522 +
50523 +config GRKERNSEC_RWXMAP_LOG
50524 + bool 'Denied RWX mmap/mprotect logging'
50525 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50526 + help
50527 + If you say Y here, calls to mmap() and mprotect() with explicit
50528 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50529 + denied by the PAX_MPROTECT feature. If the sysctl option is
50530 + enabled, a sysctl option with name "rwxmap_logging" is created.
50531 +
50532 +config GRKERNSEC_AUDIT_TEXTREL
50533 + bool 'ELF text relocations logging (READ HELP)'
50534 + depends on PAX_MPROTECT
50535 + help
50536 + If you say Y here, text relocations will be logged with the filename
50537 + of the offending library or binary. The purpose of the feature is
50538 + to help Linux distribution developers get rid of libraries and
50539 + binaries that need text relocations which hinder the future progress
50540 + of PaX. Only Linux distribution developers should say Y here, and
50541 + never on a production machine, as this option creates an information
50542 + leak that could aid an attacker in defeating the randomization of
50543 + a single memory region. If the sysctl option is enabled, a sysctl
50544 + option with name "audit_textrel" is created.
50545 +
50546 +endmenu
50547 +
50548 +menu "Executable Protections"
50549 +depends on GRKERNSEC
50550 +
50551 +config GRKERNSEC_EXECVE
50552 + bool "Enforce RLIMIT_NPROC on execs"
50553 + help
50554 + If you say Y here, users with a resource limit on processes will
50555 + have the value checked during execve() calls. The current system
50556 + only checks the system limit during fork() calls. If the sysctl option
50557 + is enabled, a sysctl option with name "execve_limiting" is created.
50558 +
50559 +config GRKERNSEC_DMESG
50560 + bool "Dmesg(8) restriction"
50561 + help
50562 + If you say Y here, non-root users will not be able to use dmesg(8)
50563 + to view up to the last 4kb of messages in the kernel's log buffer.
50564 + The kernel's log buffer often contains kernel addresses and other
50565 + identifying information useful to an attacker in fingerprinting a
50566 + system for a targeted exploit.
50567 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50568 + created.
50569 +
50570 +config GRKERNSEC_HARDEN_PTRACE
50571 + bool "Deter ptrace-based process snooping"
50572 + help
50573 + If you say Y here, TTY sniffers and other malicious monitoring
50574 + programs implemented through ptrace will be defeated. If you
50575 + have been using the RBAC system, this option has already been
50576 + enabled for several years for all users, with the ability to make
50577 + fine-grained exceptions.
50578 +
50579 + This option only affects the ability of non-root users to ptrace
50580 + processes that are not a descendent of the ptracing process.
50581 + This means that strace ./binary and gdb ./binary will still work,
50582 + but attaching to arbitrary processes will not. If the sysctl
50583 + option is enabled, a sysctl option with name "harden_ptrace" is
50584 + created.
50585 +
50586 +config GRKERNSEC_TPE
50587 + bool "Trusted Path Execution (TPE)"
50588 + help
50589 + If you say Y here, you will be able to choose a gid to add to the
50590 + supplementary groups of users you want to mark as "untrusted."
50591 + These users will not be able to execute any files that are not in
50592 + root-owned directories writable only by root. If the sysctl option
50593 + is enabled, a sysctl option with name "tpe" is created.
50594 +
50595 +config GRKERNSEC_TPE_ALL
50596 + bool "Partially restrict all non-root users"
50597 + depends on GRKERNSEC_TPE
50598 + help
50599 + If you say Y here, all non-root users will be covered under
50600 + a weaker TPE restriction. This is separate from, and in addition to,
50601 + the main TPE options that you have selected elsewhere. Thus, if a
50602 + "trusted" GID is chosen, this restriction applies to even that GID.
50603 + Under this restriction, all non-root users will only be allowed to
50604 + execute files in directories they own that are not group or
50605 + world-writable, or in directories owned by root and writable only by
50606 + root. If the sysctl option is enabled, a sysctl option with name
50607 + "tpe_restrict_all" is created.
50608 +
50609 +config GRKERNSEC_TPE_INVERT
50610 + bool "Invert GID option"
50611 + depends on GRKERNSEC_TPE
50612 + help
50613 + If you say Y here, the group you specify in the TPE configuration will
50614 + decide what group TPE restrictions will be *disabled* for. This
50615 + option is useful if you want TPE restrictions to be applied to most
50616 + users on the system. If the sysctl option is enabled, a sysctl option
50617 + with name "tpe_invert" is created. Unlike other sysctl options, this
50618 + entry will default to on for backward-compatibility.
50619 +
50620 +config GRKERNSEC_TPE_GID
50621 + int "GID for untrusted users"
50622 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50623 + default 1005
50624 + help
50625 + Setting this GID determines what group TPE restrictions will be
50626 + *enabled* for. If the sysctl option is enabled, a sysctl option
50627 + with name "tpe_gid" is created.
50628 +
50629 +config GRKERNSEC_TPE_GID
50630 + int "GID for trusted users"
50631 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50632 + default 1005
50633 + help
50634 + Setting this GID determines what group TPE restrictions will be
50635 + *disabled* for. If the sysctl option is enabled, a sysctl option
50636 + with name "tpe_gid" is created.
50637 +
50638 +endmenu
50639 +menu "Network Protections"
50640 +depends on GRKERNSEC
50641 +
50642 +config GRKERNSEC_RANDNET
50643 + bool "Larger entropy pools"
50644 + help
50645 + If you say Y here, the entropy pools used for many features of Linux
50646 + and grsecurity will be doubled in size. Since several grsecurity
50647 + features use additional randomness, it is recommended that you say Y
50648 + here. Saying Y here has a similar effect as modifying
50649 + /proc/sys/kernel/random/poolsize.
50650 +
50651 +config GRKERNSEC_BLACKHOLE
50652 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50653 + depends on NET
50654 + help
50655 + If you say Y here, neither TCP resets nor ICMP
50656 + destination-unreachable packets will be sent in response to packets
50657 + sent to ports for which no associated listening process exists.
50658 + This feature supports both IPV4 and IPV6 and exempts the
50659 + loopback interface from blackholing. Enabling this feature
50660 + makes a host more resilient to DoS attacks and reduces network
50661 + visibility against scanners.
50662 +
50663 + The blackhole feature as-implemented is equivalent to the FreeBSD
50664 + blackhole feature, as it prevents RST responses to all packets, not
50665 + just SYNs. Under most application behavior this causes no
50666 + problems, but applications (like haproxy) may not close certain
50667 + connections in a way that cleanly terminates them on the remote
50668 + end, leaving the remote host in LAST_ACK state. Because of this
50669 + side-effect and to prevent intentional LAST_ACK DoSes, this
50670 + feature also adds automatic mitigation against such attacks.
50671 + The mitigation drastically reduces the amount of time a socket
50672 + can spend in LAST_ACK state. If you're using haproxy and not
50673 + all servers it connects to have this option enabled, consider
50674 + disabling this feature on the haproxy host.
50675 +
50676 + If the sysctl option is enabled, two sysctl options with names
50677 + "ip_blackhole" and "lastack_retries" will be created.
50678 + While "ip_blackhole" takes the standard zero/non-zero on/off
50679 + toggle, "lastack_retries" uses the same kinds of values as
50680 + "tcp_retries1" and "tcp_retries2". The default value of 4
50681 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50682 + state.
50683 +
50684 +config GRKERNSEC_SOCKET
50685 + bool "Socket restrictions"
50686 + depends on NET
50687 + help
50688 + If you say Y here, you will be able to choose from several options.
50689 + If you assign a GID on your system and add it to the supplementary
50690 + groups of users you want to restrict socket access to, this patch
50691 + will perform up to three things, based on the option(s) you choose.
50692 +
50693 +config GRKERNSEC_SOCKET_ALL
50694 + bool "Deny any sockets to group"
50695 + depends on GRKERNSEC_SOCKET
50696 + help
50697 + If you say Y here, you will be able to choose a GID of whose users will
50698 + be unable to connect to other hosts from your machine or run server
50699 + applications from your machine. If the sysctl option is enabled, a
50700 + sysctl option with name "socket_all" is created.
50701 +
50702 +config GRKERNSEC_SOCKET_ALL_GID
50703 + int "GID to deny all sockets for"
50704 + depends on GRKERNSEC_SOCKET_ALL
50705 + default 1004
50706 + help
50707 + Here you can choose the GID to disable socket access for. Remember to
50708 + add the users you want socket access disabled for to the GID
50709 + specified here. If the sysctl option is enabled, a sysctl option
50710 + with name "socket_all_gid" is created.
50711 +
50712 +config GRKERNSEC_SOCKET_CLIENT
50713 + bool "Deny client sockets to group"
50714 + depends on GRKERNSEC_SOCKET
50715 + help
50716 + If you say Y here, you will be able to choose a GID of whose users will
50717 + be unable to connect to other hosts from your machine, but will be
50718 + able to run servers. If this option is enabled, all users in the group
50719 + you specify will have to use passive mode when initiating ftp transfers
50720 + from the shell on your machine. If the sysctl option is enabled, a
50721 + sysctl option with name "socket_client" is created.
50722 +
50723 +config GRKERNSEC_SOCKET_CLIENT_GID
50724 + int "GID to deny client sockets for"
50725 + depends on GRKERNSEC_SOCKET_CLIENT
50726 + default 1003
50727 + help
50728 + Here you can choose the GID to disable client socket access for.
50729 + Remember to add the users you want client socket access disabled for to
50730 + the GID specified here. If the sysctl option is enabled, a sysctl
50731 + option with name "socket_client_gid" is created.
50732 +
50733 +config GRKERNSEC_SOCKET_SERVER
50734 + bool "Deny server sockets to group"
50735 + depends on GRKERNSEC_SOCKET
50736 + help
50737 + If you say Y here, you will be able to choose a GID of whose users will
50738 + be unable to run server applications from your machine. If the sysctl
50739 + option is enabled, a sysctl option with name "socket_server" is created.
50740 +
50741 +config GRKERNSEC_SOCKET_SERVER_GID
50742 + int "GID to deny server sockets for"
50743 + depends on GRKERNSEC_SOCKET_SERVER
50744 + default 1002
50745 + help
50746 + Here you can choose the GID to disable server socket access for.
50747 + Remember to add the users you want server socket access disabled for to
50748 + the GID specified here. If the sysctl option is enabled, a sysctl
50749 + option with name "socket_server_gid" is created.
50750 +
50751 +endmenu
50752 +menu "Sysctl support"
50753 +depends on GRKERNSEC && SYSCTL
50754 +
50755 +config GRKERNSEC_SYSCTL
50756 + bool "Sysctl support"
50757 + help
50758 + If you say Y here, you will be able to change the options that
50759 + grsecurity runs with at bootup, without having to recompile your
50760 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50761 + to enable (1) or disable (0) various features. All the sysctl entries
50762 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50763 + All features enabled in the kernel configuration are disabled at boot
50764 + if you do not say Y to the "Turn on features by default" option.
50765 + All options should be set at startup, and the grsec_lock entry should
50766 + be set to a non-zero value after all the options are set.
50767 + *THIS IS EXTREMELY IMPORTANT*
50768 +
50769 +config GRKERNSEC_SYSCTL_DISTRO
50770 + bool "Extra sysctl support for distro makers (READ HELP)"
50771 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50772 + help
50773 + If you say Y here, additional sysctl options will be created
50774 + for features that affect processes running as root. Therefore,
50775 + it is critical when using this option that the grsec_lock entry be
50776 + enabled after boot. Only distros with prebuilt kernel packages
50777 + with this option enabled that can ensure grsec_lock is enabled
50778 + after boot should use this option.
50779 + *Failure to set grsec_lock after boot makes all grsec features
50780 + this option covers useless*
50781 +
50782 + Currently this option creates the following sysctl entries:
50783 + "Disable Privileged I/O": "disable_priv_io"
50784 +
50785 +config GRKERNSEC_SYSCTL_ON
50786 + bool "Turn on features by default"
50787 + depends on GRKERNSEC_SYSCTL
50788 + help
50789 + If you say Y here, instead of having all features enabled in the
50790 + kernel configuration disabled at boot time, the features will be
50791 + enabled at boot time. It is recommended you say Y here unless
50792 + there is some reason you would want all sysctl-tunable features to
50793 + be disabled by default. As mentioned elsewhere, it is important
50794 + to enable the grsec_lock entry once you have finished modifying
50795 + the sysctl entries.
50796 +
50797 +endmenu
50798 +menu "Logging Options"
50799 +depends on GRKERNSEC
50800 +
50801 +config GRKERNSEC_FLOODTIME
50802 + int "Seconds in between log messages (minimum)"
50803 + default 10
50804 + help
50805 + This option allows you to enforce the number of seconds between
50806 + grsecurity log messages. The default should be suitable for most
50807 + people, however, if you choose to change it, choose a value small enough
50808 + to allow informative logs to be produced, but large enough to
50809 + prevent flooding.
50810 +
50811 +config GRKERNSEC_FLOODBURST
50812 + int "Number of messages in a burst (maximum)"
50813 + default 4
50814 + help
50815 + This option allows you to choose the maximum number of messages allowed
50816 + within the flood time interval you chose in a separate option. The
50817 + default should be suitable for most people, however if you find that
50818 + many of your logs are being interpreted as flooding, you may want to
50819 + raise this value.
50820 +
50821 +endmenu
50822 +
50823 +endmenu
50824 diff -urNp linux-3.0.3/grsecurity/Makefile linux-3.0.3/grsecurity/Makefile
50825 --- linux-3.0.3/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
50826 +++ linux-3.0.3/grsecurity/Makefile 2011-08-23 21:48:14.000000000 -0400
50827 @@ -0,0 +1,34 @@
50828 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50829 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50830 +# into an RBAC system
50831 +#
50832 +# All code in this directory and various hooks inserted throughout the kernel
50833 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50834 +# under the GPL v2 or higher
50835 +
50836 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50837 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50838 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50839 +
50840 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50841 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50842 + gracl_learn.o grsec_log.o
50843 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50844 +
50845 +ifdef CONFIG_NET
50846 +obj-y += grsec_sock.o
50847 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50848 +endif
50849 +
50850 +ifndef CONFIG_GRKERNSEC
50851 +obj-y += grsec_disabled.o
50852 +endif
50853 +
50854 +ifdef CONFIG_GRKERNSEC_HIDESYM
50855 +extra-y := grsec_hidesym.o
50856 +$(obj)/grsec_hidesym.o:
50857 + @-chmod -f 500 /boot
50858 + @-chmod -f 500 /lib/modules
50859 + @-chmod -f 700 .
50860 + @echo ' grsec: protected kernel image paths'
50861 +endif
50862 diff -urNp linux-3.0.3/include/acpi/acpi_bus.h linux-3.0.3/include/acpi/acpi_bus.h
50863 --- linux-3.0.3/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
50864 +++ linux-3.0.3/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
50865 @@ -107,7 +107,7 @@ struct acpi_device_ops {
50866 acpi_op_bind bind;
50867 acpi_op_unbind unbind;
50868 acpi_op_notify notify;
50869 -};
50870 +} __no_const;
50871
50872 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
50873
50874 diff -urNp linux-3.0.3/include/asm-generic/atomic-long.h linux-3.0.3/include/asm-generic/atomic-long.h
50875 --- linux-3.0.3/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
50876 +++ linux-3.0.3/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
50877 @@ -22,6 +22,12 @@
50878
50879 typedef atomic64_t atomic_long_t;
50880
50881 +#ifdef CONFIG_PAX_REFCOUNT
50882 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
50883 +#else
50884 +typedef atomic64_t atomic_long_unchecked_t;
50885 +#endif
50886 +
50887 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
50888
50889 static inline long atomic_long_read(atomic_long_t *l)
50890 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
50891 return (long)atomic64_read(v);
50892 }
50893
50894 +#ifdef CONFIG_PAX_REFCOUNT
50895 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
50896 +{
50897 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50898 +
50899 + return (long)atomic64_read_unchecked(v);
50900 +}
50901 +#endif
50902 +
50903 static inline void atomic_long_set(atomic_long_t *l, long i)
50904 {
50905 atomic64_t *v = (atomic64_t *)l;
50906 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
50907 atomic64_set(v, i);
50908 }
50909
50910 +#ifdef CONFIG_PAX_REFCOUNT
50911 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
50912 +{
50913 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50914 +
50915 + atomic64_set_unchecked(v, i);
50916 +}
50917 +#endif
50918 +
50919 static inline void atomic_long_inc(atomic_long_t *l)
50920 {
50921 atomic64_t *v = (atomic64_t *)l;
50922 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
50923 atomic64_inc(v);
50924 }
50925
50926 +#ifdef CONFIG_PAX_REFCOUNT
50927 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
50928 +{
50929 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50930 +
50931 + atomic64_inc_unchecked(v);
50932 +}
50933 +#endif
50934 +
50935 static inline void atomic_long_dec(atomic_long_t *l)
50936 {
50937 atomic64_t *v = (atomic64_t *)l;
50938 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
50939 atomic64_dec(v);
50940 }
50941
50942 +#ifdef CONFIG_PAX_REFCOUNT
50943 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
50944 +{
50945 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50946 +
50947 + atomic64_dec_unchecked(v);
50948 +}
50949 +#endif
50950 +
50951 static inline void atomic_long_add(long i, atomic_long_t *l)
50952 {
50953 atomic64_t *v = (atomic64_t *)l;
50954 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
50955 atomic64_add(i, v);
50956 }
50957
50958 +#ifdef CONFIG_PAX_REFCOUNT
50959 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
50960 +{
50961 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50962 +
50963 + atomic64_add_unchecked(i, v);
50964 +}
50965 +#endif
50966 +
50967 static inline void atomic_long_sub(long i, atomic_long_t *l)
50968 {
50969 atomic64_t *v = (atomic64_t *)l;
50970 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
50971 atomic64_sub(i, v);
50972 }
50973
50974 +#ifdef CONFIG_PAX_REFCOUNT
50975 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
50976 +{
50977 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50978 +
50979 + atomic64_sub_unchecked(i, v);
50980 +}
50981 +#endif
50982 +
50983 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
50984 {
50985 atomic64_t *v = (atomic64_t *)l;
50986 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
50987 return (long)atomic64_inc_return(v);
50988 }
50989
50990 +#ifdef CONFIG_PAX_REFCOUNT
50991 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
50992 +{
50993 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
50994 +
50995 + return (long)atomic64_inc_return_unchecked(v);
50996 +}
50997 +#endif
50998 +
50999 static inline long atomic_long_dec_return(atomic_long_t *l)
51000 {
51001 atomic64_t *v = (atomic64_t *)l;
51002 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51003
51004 typedef atomic_t atomic_long_t;
51005
51006 +#ifdef CONFIG_PAX_REFCOUNT
51007 +typedef atomic_unchecked_t atomic_long_unchecked_t;
51008 +#else
51009 +typedef atomic_t atomic_long_unchecked_t;
51010 +#endif
51011 +
51012 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51013 static inline long atomic_long_read(atomic_long_t *l)
51014 {
51015 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51016 return (long)atomic_read(v);
51017 }
51018
51019 +#ifdef CONFIG_PAX_REFCOUNT
51020 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51021 +{
51022 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51023 +
51024 + return (long)atomic_read_unchecked(v);
51025 +}
51026 +#endif
51027 +
51028 static inline void atomic_long_set(atomic_long_t *l, long i)
51029 {
51030 atomic_t *v = (atomic_t *)l;
51031 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51032 atomic_set(v, i);
51033 }
51034
51035 +#ifdef CONFIG_PAX_REFCOUNT
51036 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51037 +{
51038 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51039 +
51040 + atomic_set_unchecked(v, i);
51041 +}
51042 +#endif
51043 +
51044 static inline void atomic_long_inc(atomic_long_t *l)
51045 {
51046 atomic_t *v = (atomic_t *)l;
51047 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51048 atomic_inc(v);
51049 }
51050
51051 +#ifdef CONFIG_PAX_REFCOUNT
51052 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51053 +{
51054 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51055 +
51056 + atomic_inc_unchecked(v);
51057 +}
51058 +#endif
51059 +
51060 static inline void atomic_long_dec(atomic_long_t *l)
51061 {
51062 atomic_t *v = (atomic_t *)l;
51063 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51064 atomic_dec(v);
51065 }
51066
51067 +#ifdef CONFIG_PAX_REFCOUNT
51068 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51069 +{
51070 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51071 +
51072 + atomic_dec_unchecked(v);
51073 +}
51074 +#endif
51075 +
51076 static inline void atomic_long_add(long i, atomic_long_t *l)
51077 {
51078 atomic_t *v = (atomic_t *)l;
51079 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51080 atomic_add(i, v);
51081 }
51082
51083 +#ifdef CONFIG_PAX_REFCOUNT
51084 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51085 +{
51086 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51087 +
51088 + atomic_add_unchecked(i, v);
51089 +}
51090 +#endif
51091 +
51092 static inline void atomic_long_sub(long i, atomic_long_t *l)
51093 {
51094 atomic_t *v = (atomic_t *)l;
51095 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51096 atomic_sub(i, v);
51097 }
51098
51099 +#ifdef CONFIG_PAX_REFCOUNT
51100 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51101 +{
51102 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51103 +
51104 + atomic_sub_unchecked(i, v);
51105 +}
51106 +#endif
51107 +
51108 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51109 {
51110 atomic_t *v = (atomic_t *)l;
51111 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51112 return (long)atomic_inc_return(v);
51113 }
51114
51115 +#ifdef CONFIG_PAX_REFCOUNT
51116 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51117 +{
51118 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51119 +
51120 + return (long)atomic_inc_return_unchecked(v);
51121 +}
51122 +#endif
51123 +
51124 static inline long atomic_long_dec_return(atomic_long_t *l)
51125 {
51126 atomic_t *v = (atomic_t *)l;
51127 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51128
51129 #endif /* BITS_PER_LONG == 64 */
51130
51131 +#ifdef CONFIG_PAX_REFCOUNT
51132 +static inline void pax_refcount_needs_these_functions(void)
51133 +{
51134 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
51135 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51136 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51137 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51138 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51139 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51140 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51141 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51142 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51143 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51144 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51145 +
51146 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51147 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51148 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51149 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51150 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51151 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51152 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51153 +}
51154 +#else
51155 +#define atomic_read_unchecked(v) atomic_read(v)
51156 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51157 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51158 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51159 +#define atomic_inc_unchecked(v) atomic_inc(v)
51160 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51161 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51162 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51163 +#define atomic_dec_unchecked(v) atomic_dec(v)
51164 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51165 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51166 +
51167 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
51168 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51169 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51170 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51171 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51172 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51173 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51174 +#endif
51175 +
51176 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51177 diff -urNp linux-3.0.3/include/asm-generic/cache.h linux-3.0.3/include/asm-generic/cache.h
51178 --- linux-3.0.3/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
51179 +++ linux-3.0.3/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
51180 @@ -6,7 +6,7 @@
51181 * cache lines need to provide their own cache.h.
51182 */
51183
51184 -#define L1_CACHE_SHIFT 5
51185 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51186 +#define L1_CACHE_SHIFT 5UL
51187 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51188
51189 #endif /* __ASM_GENERIC_CACHE_H */
51190 diff -urNp linux-3.0.3/include/asm-generic/int-l64.h linux-3.0.3/include/asm-generic/int-l64.h
51191 --- linux-3.0.3/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
51192 +++ linux-3.0.3/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
51193 @@ -46,6 +46,8 @@ typedef unsigned int u32;
51194 typedef signed long s64;
51195 typedef unsigned long u64;
51196
51197 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51198 +
51199 #define S8_C(x) x
51200 #define U8_C(x) x ## U
51201 #define S16_C(x) x
51202 diff -urNp linux-3.0.3/include/asm-generic/int-ll64.h linux-3.0.3/include/asm-generic/int-ll64.h
51203 --- linux-3.0.3/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
51204 +++ linux-3.0.3/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
51205 @@ -51,6 +51,8 @@ typedef unsigned int u32;
51206 typedef signed long long s64;
51207 typedef unsigned long long u64;
51208
51209 +typedef unsigned long long intoverflow_t;
51210 +
51211 #define S8_C(x) x
51212 #define U8_C(x) x ## U
51213 #define S16_C(x) x
51214 diff -urNp linux-3.0.3/include/asm-generic/kmap_types.h linux-3.0.3/include/asm-generic/kmap_types.h
51215 --- linux-3.0.3/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
51216 +++ linux-3.0.3/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
51217 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51218 KMAP_D(17) KM_NMI,
51219 KMAP_D(18) KM_NMI_PTE,
51220 KMAP_D(19) KM_KDB,
51221 +KMAP_D(20) KM_CLEARPAGE,
51222 /*
51223 * Remember to update debug_kmap_atomic() when adding new kmap types!
51224 */
51225 -KMAP_D(20) KM_TYPE_NR
51226 +KMAP_D(21) KM_TYPE_NR
51227 };
51228
51229 #undef KMAP_D
51230 diff -urNp linux-3.0.3/include/asm-generic/pgtable.h linux-3.0.3/include/asm-generic/pgtable.h
51231 --- linux-3.0.3/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
51232 +++ linux-3.0.3/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
51233 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
51234 #endif /* __HAVE_ARCH_PMD_WRITE */
51235 #endif
51236
51237 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51238 +static inline unsigned long pax_open_kernel(void) { return 0; }
51239 +#endif
51240 +
51241 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51242 +static inline unsigned long pax_close_kernel(void) { return 0; }
51243 +#endif
51244 +
51245 #endif /* !__ASSEMBLY__ */
51246
51247 #endif /* _ASM_GENERIC_PGTABLE_H */
51248 diff -urNp linux-3.0.3/include/asm-generic/pgtable-nopmd.h linux-3.0.3/include/asm-generic/pgtable-nopmd.h
51249 --- linux-3.0.3/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
51250 +++ linux-3.0.3/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
51251 @@ -1,14 +1,19 @@
51252 #ifndef _PGTABLE_NOPMD_H
51253 #define _PGTABLE_NOPMD_H
51254
51255 -#ifndef __ASSEMBLY__
51256 -
51257 #include <asm-generic/pgtable-nopud.h>
51258
51259 -struct mm_struct;
51260 -
51261 #define __PAGETABLE_PMD_FOLDED
51262
51263 +#define PMD_SHIFT PUD_SHIFT
51264 +#define PTRS_PER_PMD 1
51265 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51266 +#define PMD_MASK (~(PMD_SIZE-1))
51267 +
51268 +#ifndef __ASSEMBLY__
51269 +
51270 +struct mm_struct;
51271 +
51272 /*
51273 * Having the pmd type consist of a pud gets the size right, and allows
51274 * us to conceptually access the pud entry that this pmd is folded into
51275 @@ -16,11 +21,6 @@ struct mm_struct;
51276 */
51277 typedef struct { pud_t pud; } pmd_t;
51278
51279 -#define PMD_SHIFT PUD_SHIFT
51280 -#define PTRS_PER_PMD 1
51281 -#define PMD_SIZE (1UL << PMD_SHIFT)
51282 -#define PMD_MASK (~(PMD_SIZE-1))
51283 -
51284 /*
51285 * The "pud_xxx()" functions here are trivial for a folded two-level
51286 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51287 diff -urNp linux-3.0.3/include/asm-generic/pgtable-nopud.h linux-3.0.3/include/asm-generic/pgtable-nopud.h
51288 --- linux-3.0.3/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
51289 +++ linux-3.0.3/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
51290 @@ -1,10 +1,15 @@
51291 #ifndef _PGTABLE_NOPUD_H
51292 #define _PGTABLE_NOPUD_H
51293
51294 -#ifndef __ASSEMBLY__
51295 -
51296 #define __PAGETABLE_PUD_FOLDED
51297
51298 +#define PUD_SHIFT PGDIR_SHIFT
51299 +#define PTRS_PER_PUD 1
51300 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51301 +#define PUD_MASK (~(PUD_SIZE-1))
51302 +
51303 +#ifndef __ASSEMBLY__
51304 +
51305 /*
51306 * Having the pud type consist of a pgd gets the size right, and allows
51307 * us to conceptually access the pgd entry that this pud is folded into
51308 @@ -12,11 +17,6 @@
51309 */
51310 typedef struct { pgd_t pgd; } pud_t;
51311
51312 -#define PUD_SHIFT PGDIR_SHIFT
51313 -#define PTRS_PER_PUD 1
51314 -#define PUD_SIZE (1UL << PUD_SHIFT)
51315 -#define PUD_MASK (~(PUD_SIZE-1))
51316 -
51317 /*
51318 * The "pgd_xxx()" functions here are trivial for a folded two-level
51319 * setup: the pud is never bad, and a pud always exists (as it's folded
51320 diff -urNp linux-3.0.3/include/asm-generic/vmlinux.lds.h linux-3.0.3/include/asm-generic/vmlinux.lds.h
51321 --- linux-3.0.3/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
51322 +++ linux-3.0.3/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
51323 @@ -217,6 +217,7 @@
51324 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51325 VMLINUX_SYMBOL(__start_rodata) = .; \
51326 *(.rodata) *(.rodata.*) \
51327 + *(.data..read_only) \
51328 *(__vermagic) /* Kernel version magic */ \
51329 . = ALIGN(8); \
51330 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51331 @@ -723,17 +724,18 @@
51332 * section in the linker script will go there too. @phdr should have
51333 * a leading colon.
51334 *
51335 - * Note that this macros defines __per_cpu_load as an absolute symbol.
51336 + * Note that this macros defines per_cpu_load as an absolute symbol.
51337 * If there is no need to put the percpu section at a predetermined
51338 * address, use PERCPU_SECTION.
51339 */
51340 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51341 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
51342 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51343 + per_cpu_load = .; \
51344 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51345 - LOAD_OFFSET) { \
51346 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51347 PERCPU_INPUT(cacheline) \
51348 } phdr \
51349 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51350 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51351
51352 /**
51353 * PERCPU_SECTION - define output section for percpu area, simple version
51354 diff -urNp linux-3.0.3/include/drm/drm_crtc_helper.h linux-3.0.3/include/drm/drm_crtc_helper.h
51355 --- linux-3.0.3/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
51356 +++ linux-3.0.3/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
51357 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51358
51359 /* disable crtc when not in use - more explicit than dpms off */
51360 void (*disable)(struct drm_crtc *crtc);
51361 -};
51362 +} __no_const;
51363
51364 struct drm_encoder_helper_funcs {
51365 void (*dpms)(struct drm_encoder *encoder, int mode);
51366 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51367 struct drm_connector *connector);
51368 /* disable encoder when not in use - more explicit than dpms off */
51369 void (*disable)(struct drm_encoder *encoder);
51370 -};
51371 +} __no_const;
51372
51373 struct drm_connector_helper_funcs {
51374 int (*get_modes)(struct drm_connector *connector);
51375 diff -urNp linux-3.0.3/include/drm/drmP.h linux-3.0.3/include/drm/drmP.h
51376 --- linux-3.0.3/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
51377 +++ linux-3.0.3/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
51378 @@ -73,6 +73,7 @@
51379 #include <linux/workqueue.h>
51380 #include <linux/poll.h>
51381 #include <asm/pgalloc.h>
51382 +#include <asm/local.h>
51383 #include "drm.h"
51384
51385 #include <linux/idr.h>
51386 @@ -1033,7 +1034,7 @@ struct drm_device {
51387
51388 /** \name Usage Counters */
51389 /*@{ */
51390 - int open_count; /**< Outstanding files open */
51391 + local_t open_count; /**< Outstanding files open */
51392 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51393 atomic_t vma_count; /**< Outstanding vma areas open */
51394 int buf_use; /**< Buffers in use -- cannot alloc */
51395 @@ -1044,7 +1045,7 @@ struct drm_device {
51396 /*@{ */
51397 unsigned long counters;
51398 enum drm_stat_type types[15];
51399 - atomic_t counts[15];
51400 + atomic_unchecked_t counts[15];
51401 /*@} */
51402
51403 struct list_head filelist;
51404 diff -urNp linux-3.0.3/include/drm/ttm/ttm_memory.h linux-3.0.3/include/drm/ttm/ttm_memory.h
51405 --- linux-3.0.3/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
51406 +++ linux-3.0.3/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
51407 @@ -47,7 +47,7 @@
51408
51409 struct ttm_mem_shrink {
51410 int (*do_shrink) (struct ttm_mem_shrink *);
51411 -};
51412 +} __no_const;
51413
51414 /**
51415 * struct ttm_mem_global - Global memory accounting structure.
51416 diff -urNp linux-3.0.3/include/linux/a.out.h linux-3.0.3/include/linux/a.out.h
51417 --- linux-3.0.3/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
51418 +++ linux-3.0.3/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
51419 @@ -39,6 +39,14 @@ enum machine_type {
51420 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51421 };
51422
51423 +/* Constants for the N_FLAGS field */
51424 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51425 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51426 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51427 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51428 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51429 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51430 +
51431 #if !defined (N_MAGIC)
51432 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51433 #endif
51434 diff -urNp linux-3.0.3/include/linux/atmdev.h linux-3.0.3/include/linux/atmdev.h
51435 --- linux-3.0.3/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
51436 +++ linux-3.0.3/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
51437 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51438 #endif
51439
51440 struct k_atm_aal_stats {
51441 -#define __HANDLE_ITEM(i) atomic_t i
51442 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
51443 __AAL_STAT_ITEMS
51444 #undef __HANDLE_ITEM
51445 };
51446 diff -urNp linux-3.0.3/include/linux/binfmts.h linux-3.0.3/include/linux/binfmts.h
51447 --- linux-3.0.3/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
51448 +++ linux-3.0.3/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
51449 @@ -88,6 +88,7 @@ struct linux_binfmt {
51450 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51451 int (*load_shlib)(struct file *);
51452 int (*core_dump)(struct coredump_params *cprm);
51453 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51454 unsigned long min_coredump; /* minimal dump size */
51455 };
51456
51457 diff -urNp linux-3.0.3/include/linux/blkdev.h linux-3.0.3/include/linux/blkdev.h
51458 --- linux-3.0.3/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
51459 +++ linux-3.0.3/include/linux/blkdev.h 2011-08-23 21:47:56.000000000 -0400
51460 @@ -1307,7 +1307,7 @@ struct block_device_operations {
51461 int (*getgeo)(struct block_device *, struct hd_geometry *);
51462 /* this callback is with swap_lock and sometimes page table lock held */
51463 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51464 - struct module *owner;
51465 + struct module * const owner;
51466 };
51467
51468 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51469 diff -urNp linux-3.0.3/include/linux/blktrace_api.h linux-3.0.3/include/linux/blktrace_api.h
51470 --- linux-3.0.3/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
51471 +++ linux-3.0.3/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
51472 @@ -161,7 +161,7 @@ struct blk_trace {
51473 struct dentry *dir;
51474 struct dentry *dropped_file;
51475 struct dentry *msg_file;
51476 - atomic_t dropped;
51477 + atomic_unchecked_t dropped;
51478 };
51479
51480 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51481 diff -urNp linux-3.0.3/include/linux/byteorder/little_endian.h linux-3.0.3/include/linux/byteorder/little_endian.h
51482 --- linux-3.0.3/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
51483 +++ linux-3.0.3/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
51484 @@ -42,51 +42,51 @@
51485
51486 static inline __le64 __cpu_to_le64p(const __u64 *p)
51487 {
51488 - return (__force __le64)*p;
51489 + return (__force const __le64)*p;
51490 }
51491 static inline __u64 __le64_to_cpup(const __le64 *p)
51492 {
51493 - return (__force __u64)*p;
51494 + return (__force const __u64)*p;
51495 }
51496 static inline __le32 __cpu_to_le32p(const __u32 *p)
51497 {
51498 - return (__force __le32)*p;
51499 + return (__force const __le32)*p;
51500 }
51501 static inline __u32 __le32_to_cpup(const __le32 *p)
51502 {
51503 - return (__force __u32)*p;
51504 + return (__force const __u32)*p;
51505 }
51506 static inline __le16 __cpu_to_le16p(const __u16 *p)
51507 {
51508 - return (__force __le16)*p;
51509 + return (__force const __le16)*p;
51510 }
51511 static inline __u16 __le16_to_cpup(const __le16 *p)
51512 {
51513 - return (__force __u16)*p;
51514 + return (__force const __u16)*p;
51515 }
51516 static inline __be64 __cpu_to_be64p(const __u64 *p)
51517 {
51518 - return (__force __be64)__swab64p(p);
51519 + return (__force const __be64)__swab64p(p);
51520 }
51521 static inline __u64 __be64_to_cpup(const __be64 *p)
51522 {
51523 - return __swab64p((__u64 *)p);
51524 + return __swab64p((const __u64 *)p);
51525 }
51526 static inline __be32 __cpu_to_be32p(const __u32 *p)
51527 {
51528 - return (__force __be32)__swab32p(p);
51529 + return (__force const __be32)__swab32p(p);
51530 }
51531 static inline __u32 __be32_to_cpup(const __be32 *p)
51532 {
51533 - return __swab32p((__u32 *)p);
51534 + return __swab32p((const __u32 *)p);
51535 }
51536 static inline __be16 __cpu_to_be16p(const __u16 *p)
51537 {
51538 - return (__force __be16)__swab16p(p);
51539 + return (__force const __be16)__swab16p(p);
51540 }
51541 static inline __u16 __be16_to_cpup(const __be16 *p)
51542 {
51543 - return __swab16p((__u16 *)p);
51544 + return __swab16p((const __u16 *)p);
51545 }
51546 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51547 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51548 diff -urNp linux-3.0.3/include/linux/cache.h linux-3.0.3/include/linux/cache.h
51549 --- linux-3.0.3/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
51550 +++ linux-3.0.3/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
51551 @@ -16,6 +16,10 @@
51552 #define __read_mostly
51553 #endif
51554
51555 +#ifndef __read_only
51556 +#define __read_only __read_mostly
51557 +#endif
51558 +
51559 #ifndef ____cacheline_aligned
51560 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51561 #endif
51562 diff -urNp linux-3.0.3/include/linux/capability.h linux-3.0.3/include/linux/capability.h
51563 --- linux-3.0.3/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
51564 +++ linux-3.0.3/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
51565 @@ -547,6 +547,9 @@ extern bool capable(int cap);
51566 extern bool ns_capable(struct user_namespace *ns, int cap);
51567 extern bool task_ns_capable(struct task_struct *t, int cap);
51568 extern bool nsown_capable(int cap);
51569 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51570 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51571 +extern bool capable_nolog(int cap);
51572
51573 /* audit system wants to get cap info from files as well */
51574 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51575 diff -urNp linux-3.0.3/include/linux/cleancache.h linux-3.0.3/include/linux/cleancache.h
51576 --- linux-3.0.3/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
51577 +++ linux-3.0.3/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
51578 @@ -31,7 +31,7 @@ struct cleancache_ops {
51579 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
51580 void (*flush_inode)(int, struct cleancache_filekey);
51581 void (*flush_fs)(int);
51582 -};
51583 +} __no_const;
51584
51585 extern struct cleancache_ops
51586 cleancache_register_ops(struct cleancache_ops *ops);
51587 diff -urNp linux-3.0.3/include/linux/compiler-gcc4.h linux-3.0.3/include/linux/compiler-gcc4.h
51588 --- linux-3.0.3/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
51589 +++ linux-3.0.3/include/linux/compiler-gcc4.h 2011-08-23 21:47:56.000000000 -0400
51590 @@ -31,6 +31,9 @@
51591
51592
51593 #if __GNUC_MINOR__ >= 5
51594 +
51595 +#define __no_const __attribute__((no_const))
51596 +
51597 /*
51598 * Mark a position in code as unreachable. This can be used to
51599 * suppress control flow warnings after asm blocks that transfer
51600 @@ -46,6 +49,11 @@
51601 #define __noclone __attribute__((__noclone__))
51602
51603 #endif
51604 +
51605 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51606 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51607 +#define __bos0(ptr) __bos((ptr), 0)
51608 +#define __bos1(ptr) __bos((ptr), 1)
51609 #endif
51610
51611 #if __GNUC_MINOR__ > 0
51612 diff -urNp linux-3.0.3/include/linux/compiler.h linux-3.0.3/include/linux/compiler.h
51613 --- linux-3.0.3/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
51614 +++ linux-3.0.3/include/linux/compiler.h 2011-08-23 21:47:56.000000000 -0400
51615 @@ -264,6 +264,10 @@ void ftrace_likely_update(struct ftrace_
51616 # define __attribute_const__ /* unimplemented */
51617 #endif
51618
51619 +#ifndef __no_const
51620 +# define __no_const
51621 +#endif
51622 +
51623 /*
51624 * Tell gcc if a function is cold. The compiler will assume any path
51625 * directly leading to the call is unlikely.
51626 @@ -273,6 +277,22 @@ void ftrace_likely_update(struct ftrace_
51627 #define __cold
51628 #endif
51629
51630 +#ifndef __alloc_size
51631 +#define __alloc_size(...)
51632 +#endif
51633 +
51634 +#ifndef __bos
51635 +#define __bos(ptr, arg)
51636 +#endif
51637 +
51638 +#ifndef __bos0
51639 +#define __bos0(ptr)
51640 +#endif
51641 +
51642 +#ifndef __bos1
51643 +#define __bos1(ptr)
51644 +#endif
51645 +
51646 /* Simple shorthand for a section definition */
51647 #ifndef __section
51648 # define __section(S) __attribute__ ((__section__(#S)))
51649 @@ -306,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
51650 * use is to mediate communication between process-level code and irq/NMI
51651 * handlers, all running on the same CPU.
51652 */
51653 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51654 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51655 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51656
51657 #endif /* __LINUX_COMPILER_H */
51658 diff -urNp linux-3.0.3/include/linux/cpuset.h linux-3.0.3/include/linux/cpuset.h
51659 --- linux-3.0.3/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
51660 +++ linux-3.0.3/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
51661 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51662 * nodemask.
51663 */
51664 smp_mb();
51665 - --ACCESS_ONCE(current->mems_allowed_change_disable);
51666 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51667 }
51668
51669 static inline void set_mems_allowed(nodemask_t nodemask)
51670 diff -urNp linux-3.0.3/include/linux/crypto.h linux-3.0.3/include/linux/crypto.h
51671 --- linux-3.0.3/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
51672 +++ linux-3.0.3/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
51673 @@ -361,7 +361,7 @@ struct cipher_tfm {
51674 const u8 *key, unsigned int keylen);
51675 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51676 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51677 -};
51678 +} __no_const;
51679
51680 struct hash_tfm {
51681 int (*init)(struct hash_desc *desc);
51682 @@ -382,13 +382,13 @@ struct compress_tfm {
51683 int (*cot_decompress)(struct crypto_tfm *tfm,
51684 const u8 *src, unsigned int slen,
51685 u8 *dst, unsigned int *dlen);
51686 -};
51687 +} __no_const;
51688
51689 struct rng_tfm {
51690 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51691 unsigned int dlen);
51692 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51693 -};
51694 +} __no_const;
51695
51696 #define crt_ablkcipher crt_u.ablkcipher
51697 #define crt_aead crt_u.aead
51698 diff -urNp linux-3.0.3/include/linux/decompress/mm.h linux-3.0.3/include/linux/decompress/mm.h
51699 --- linux-3.0.3/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
51700 +++ linux-3.0.3/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
51701 @@ -77,7 +77,7 @@ static void free(void *where)
51702 * warnings when not needed (indeed large_malloc / large_free are not
51703 * needed by inflate */
51704
51705 -#define malloc(a) kmalloc(a, GFP_KERNEL)
51706 +#define malloc(a) kmalloc((a), GFP_KERNEL)
51707 #define free(a) kfree(a)
51708
51709 #define large_malloc(a) vmalloc(a)
51710 diff -urNp linux-3.0.3/include/linux/dma-mapping.h linux-3.0.3/include/linux/dma-mapping.h
51711 --- linux-3.0.3/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
51712 +++ linux-3.0.3/include/linux/dma-mapping.h 2011-08-23 21:47:56.000000000 -0400
51713 @@ -49,7 +49,7 @@ struct dma_map_ops {
51714 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
51715 int (*dma_supported)(struct device *dev, u64 mask);
51716 int (*set_dma_mask)(struct device *dev, u64 mask);
51717 - int is_phys;
51718 + const int is_phys;
51719 };
51720
51721 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
51722 diff -urNp linux-3.0.3/include/linux/efi.h linux-3.0.3/include/linux/efi.h
51723 --- linux-3.0.3/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
51724 +++ linux-3.0.3/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
51725 @@ -410,7 +410,7 @@ struct efivar_operations {
51726 efi_get_variable_t *get_variable;
51727 efi_get_next_variable_t *get_next_variable;
51728 efi_set_variable_t *set_variable;
51729 -};
51730 +} __no_const;
51731
51732 struct efivars {
51733 /*
51734 diff -urNp linux-3.0.3/include/linux/elf.h linux-3.0.3/include/linux/elf.h
51735 --- linux-3.0.3/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
51736 +++ linux-3.0.3/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
51737 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
51738 #define PT_GNU_EH_FRAME 0x6474e550
51739
51740 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
51741 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
51742 +
51743 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
51744 +
51745 +/* Constants for the e_flags field */
51746 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51747 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
51748 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
51749 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
51750 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51751 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51752
51753 /*
51754 * Extended Numbering
51755 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
51756 #define DT_DEBUG 21
51757 #define DT_TEXTREL 22
51758 #define DT_JMPREL 23
51759 +#define DT_FLAGS 30
51760 + #define DF_TEXTREL 0x00000004
51761 #define DT_ENCODING 32
51762 #define OLD_DT_LOOS 0x60000000
51763 #define DT_LOOS 0x6000000d
51764 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
51765 #define PF_W 0x2
51766 #define PF_X 0x1
51767
51768 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
51769 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
51770 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
51771 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
51772 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
51773 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
51774 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
51775 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
51776 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
51777 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
51778 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
51779 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
51780 +
51781 typedef struct elf32_phdr{
51782 Elf32_Word p_type;
51783 Elf32_Off p_offset;
51784 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
51785 #define EI_OSABI 7
51786 #define EI_PAD 8
51787
51788 +#define EI_PAX 14
51789 +
51790 #define ELFMAG0 0x7f /* EI_MAG */
51791 #define ELFMAG1 'E'
51792 #define ELFMAG2 'L'
51793 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
51794 #define elf_note elf32_note
51795 #define elf_addr_t Elf32_Off
51796 #define Elf_Half Elf32_Half
51797 +#define elf_dyn Elf32_Dyn
51798
51799 #else
51800
51801 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
51802 #define elf_note elf64_note
51803 #define elf_addr_t Elf64_Off
51804 #define Elf_Half Elf64_Half
51805 +#define elf_dyn Elf64_Dyn
51806
51807 #endif
51808
51809 diff -urNp linux-3.0.3/include/linux/firewire.h linux-3.0.3/include/linux/firewire.h
51810 --- linux-3.0.3/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
51811 +++ linux-3.0.3/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
51812 @@ -428,7 +428,7 @@ struct fw_iso_context {
51813 union {
51814 fw_iso_callback_t sc;
51815 fw_iso_mc_callback_t mc;
51816 - } callback;
51817 + } __no_const callback;
51818 void *callback_data;
51819 };
51820
51821 diff -urNp linux-3.0.3/include/linux/fscache-cache.h linux-3.0.3/include/linux/fscache-cache.h
51822 --- linux-3.0.3/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
51823 +++ linux-3.0.3/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
51824 @@ -102,7 +102,7 @@ struct fscache_operation {
51825 fscache_operation_release_t release;
51826 };
51827
51828 -extern atomic_t fscache_op_debug_id;
51829 +extern atomic_unchecked_t fscache_op_debug_id;
51830 extern void fscache_op_work_func(struct work_struct *work);
51831
51832 extern void fscache_enqueue_operation(struct fscache_operation *);
51833 @@ -122,7 +122,7 @@ static inline void fscache_operation_ini
51834 {
51835 INIT_WORK(&op->work, fscache_op_work_func);
51836 atomic_set(&op->usage, 1);
51837 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
51838 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51839 op->processor = processor;
51840 op->release = release;
51841 INIT_LIST_HEAD(&op->pend_link);
51842 diff -urNp linux-3.0.3/include/linux/fs.h linux-3.0.3/include/linux/fs.h
51843 --- linux-3.0.3/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
51844 +++ linux-3.0.3/include/linux/fs.h 2011-08-23 21:48:14.000000000 -0400
51845 @@ -109,6 +109,11 @@ struct inodes_stat_t {
51846 /* File was opened by fanotify and shouldn't generate fanotify events */
51847 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
51848
51849 +/* Hack for grsec so as not to require read permission simply to execute
51850 + * a binary
51851 + */
51852 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
51853 +
51854 /*
51855 * The below are the various read and write types that we support. Some of
51856 * them include behavioral modifiers that send information down to the
51857 @@ -1544,7 +1549,7 @@ struct block_device_operations;
51858 * the big kernel lock held in all filesystems.
51859 */
51860 struct file_operations {
51861 - struct module *owner;
51862 + struct module * const owner;
51863 loff_t (*llseek) (struct file *, loff_t, int);
51864 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
51865 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
51866 @@ -1572,6 +1577,7 @@ struct file_operations {
51867 long (*fallocate)(struct file *file, int mode, loff_t offset,
51868 loff_t len);
51869 };
51870 +typedef struct file_operations __no_const file_operations_no_const;
51871
51872 #define IPERM_FLAG_RCU 0x0001
51873
51874 diff -urNp linux-3.0.3/include/linux/fsnotify.h linux-3.0.3/include/linux/fsnotify.h
51875 --- linux-3.0.3/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
51876 +++ linux-3.0.3/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
51877 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
51878 */
51879 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
51880 {
51881 - return kstrdup(name, GFP_KERNEL);
51882 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
51883 }
51884
51885 /*
51886 diff -urNp linux-3.0.3/include/linux/fs_struct.h linux-3.0.3/include/linux/fs_struct.h
51887 --- linux-3.0.3/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
51888 +++ linux-3.0.3/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
51889 @@ -6,7 +6,7 @@
51890 #include <linux/seqlock.h>
51891
51892 struct fs_struct {
51893 - int users;
51894 + atomic_t users;
51895 spinlock_t lock;
51896 seqcount_t seq;
51897 int umask;
51898 diff -urNp linux-3.0.3/include/linux/ftrace_event.h linux-3.0.3/include/linux/ftrace_event.h
51899 --- linux-3.0.3/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
51900 +++ linux-3.0.3/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
51901 @@ -96,7 +96,7 @@ struct trace_event_functions {
51902 trace_print_func raw;
51903 trace_print_func hex;
51904 trace_print_func binary;
51905 -};
51906 +} __no_const;
51907
51908 struct trace_event {
51909 struct hlist_node node;
51910 @@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
51911 extern int trace_add_event_call(struct ftrace_event_call *call);
51912 extern void trace_remove_event_call(struct ftrace_event_call *call);
51913
51914 -#define is_signed_type(type) (((type)(-1)) < 0)
51915 +#define is_signed_type(type) (((type)(-1)) < (type)1)
51916
51917 int trace_set_clr_event(const char *system, const char *event, int set);
51918
51919 diff -urNp linux-3.0.3/include/linux/genhd.h linux-3.0.3/include/linux/genhd.h
51920 --- linux-3.0.3/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
51921 +++ linux-3.0.3/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
51922 @@ -184,7 +184,7 @@ struct gendisk {
51923 struct kobject *slave_dir;
51924
51925 struct timer_rand_state *random;
51926 - atomic_t sync_io; /* RAID */
51927 + atomic_unchecked_t sync_io; /* RAID */
51928 struct disk_events *ev;
51929 #ifdef CONFIG_BLK_DEV_INTEGRITY
51930 struct blk_integrity *integrity;
51931 diff -urNp linux-3.0.3/include/linux/gracl.h linux-3.0.3/include/linux/gracl.h
51932 --- linux-3.0.3/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
51933 +++ linux-3.0.3/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
51934 @@ -0,0 +1,317 @@
51935 +#ifndef GR_ACL_H
51936 +#define GR_ACL_H
51937 +
51938 +#include <linux/grdefs.h>
51939 +#include <linux/resource.h>
51940 +#include <linux/capability.h>
51941 +#include <linux/dcache.h>
51942 +#include <asm/resource.h>
51943 +
51944 +/* Major status information */
51945 +
51946 +#define GR_VERSION "grsecurity 2.2.2"
51947 +#define GRSECURITY_VERSION 0x2202
51948 +
51949 +enum {
51950 + GR_SHUTDOWN = 0,
51951 + GR_ENABLE = 1,
51952 + GR_SPROLE = 2,
51953 + GR_RELOAD = 3,
51954 + GR_SEGVMOD = 4,
51955 + GR_STATUS = 5,
51956 + GR_UNSPROLE = 6,
51957 + GR_PASSSET = 7,
51958 + GR_SPROLEPAM = 8,
51959 +};
51960 +
51961 +/* Password setup definitions
51962 + * kernel/grhash.c */
51963 +enum {
51964 + GR_PW_LEN = 128,
51965 + GR_SALT_LEN = 16,
51966 + GR_SHA_LEN = 32,
51967 +};
51968 +
51969 +enum {
51970 + GR_SPROLE_LEN = 64,
51971 +};
51972 +
51973 +enum {
51974 + GR_NO_GLOB = 0,
51975 + GR_REG_GLOB,
51976 + GR_CREATE_GLOB
51977 +};
51978 +
51979 +#define GR_NLIMITS 32
51980 +
51981 +/* Begin Data Structures */
51982 +
51983 +struct sprole_pw {
51984 + unsigned char *rolename;
51985 + unsigned char salt[GR_SALT_LEN];
51986 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
51987 +};
51988 +
51989 +struct name_entry {
51990 + __u32 key;
51991 + ino_t inode;
51992 + dev_t device;
51993 + char *name;
51994 + __u16 len;
51995 + __u8 deleted;
51996 + struct name_entry *prev;
51997 + struct name_entry *next;
51998 +};
51999 +
52000 +struct inodev_entry {
52001 + struct name_entry *nentry;
52002 + struct inodev_entry *prev;
52003 + struct inodev_entry *next;
52004 +};
52005 +
52006 +struct acl_role_db {
52007 + struct acl_role_label **r_hash;
52008 + __u32 r_size;
52009 +};
52010 +
52011 +struct inodev_db {
52012 + struct inodev_entry **i_hash;
52013 + __u32 i_size;
52014 +};
52015 +
52016 +struct name_db {
52017 + struct name_entry **n_hash;
52018 + __u32 n_size;
52019 +};
52020 +
52021 +struct crash_uid {
52022 + uid_t uid;
52023 + unsigned long expires;
52024 +};
52025 +
52026 +struct gr_hash_struct {
52027 + void **table;
52028 + void **nametable;
52029 + void *first;
52030 + __u32 table_size;
52031 + __u32 used_size;
52032 + int type;
52033 +};
52034 +
52035 +/* Userspace Grsecurity ACL data structures */
52036 +
52037 +struct acl_subject_label {
52038 + char *filename;
52039 + ino_t inode;
52040 + dev_t device;
52041 + __u32 mode;
52042 + kernel_cap_t cap_mask;
52043 + kernel_cap_t cap_lower;
52044 + kernel_cap_t cap_invert_audit;
52045 +
52046 + struct rlimit res[GR_NLIMITS];
52047 + __u32 resmask;
52048 +
52049 + __u8 user_trans_type;
52050 + __u8 group_trans_type;
52051 + uid_t *user_transitions;
52052 + gid_t *group_transitions;
52053 + __u16 user_trans_num;
52054 + __u16 group_trans_num;
52055 +
52056 + __u32 sock_families[2];
52057 + __u32 ip_proto[8];
52058 + __u32 ip_type;
52059 + struct acl_ip_label **ips;
52060 + __u32 ip_num;
52061 + __u32 inaddr_any_override;
52062 +
52063 + __u32 crashes;
52064 + unsigned long expires;
52065 +
52066 + struct acl_subject_label *parent_subject;
52067 + struct gr_hash_struct *hash;
52068 + struct acl_subject_label *prev;
52069 + struct acl_subject_label *next;
52070 +
52071 + struct acl_object_label **obj_hash;
52072 + __u32 obj_hash_size;
52073 + __u16 pax_flags;
52074 +};
52075 +
52076 +struct role_allowed_ip {
52077 + __u32 addr;
52078 + __u32 netmask;
52079 +
52080 + struct role_allowed_ip *prev;
52081 + struct role_allowed_ip *next;
52082 +};
52083 +
52084 +struct role_transition {
52085 + char *rolename;
52086 +
52087 + struct role_transition *prev;
52088 + struct role_transition *next;
52089 +};
52090 +
52091 +struct acl_role_label {
52092 + char *rolename;
52093 + uid_t uidgid;
52094 + __u16 roletype;
52095 +
52096 + __u16 auth_attempts;
52097 + unsigned long expires;
52098 +
52099 + struct acl_subject_label *root_label;
52100 + struct gr_hash_struct *hash;
52101 +
52102 + struct acl_role_label *prev;
52103 + struct acl_role_label *next;
52104 +
52105 + struct role_transition *transitions;
52106 + struct role_allowed_ip *allowed_ips;
52107 + uid_t *domain_children;
52108 + __u16 domain_child_num;
52109 +
52110 + struct acl_subject_label **subj_hash;
52111 + __u32 subj_hash_size;
52112 +};
52113 +
52114 +struct user_acl_role_db {
52115 + struct acl_role_label **r_table;
52116 + __u32 num_pointers; /* Number of allocations to track */
52117 + __u32 num_roles; /* Number of roles */
52118 + __u32 num_domain_children; /* Number of domain children */
52119 + __u32 num_subjects; /* Number of subjects */
52120 + __u32 num_objects; /* Number of objects */
52121 +};
52122 +
52123 +struct acl_object_label {
52124 + char *filename;
52125 + ino_t inode;
52126 + dev_t device;
52127 + __u32 mode;
52128 +
52129 + struct acl_subject_label *nested;
52130 + struct acl_object_label *globbed;
52131 +
52132 + /* next two structures not used */
52133 +
52134 + struct acl_object_label *prev;
52135 + struct acl_object_label *next;
52136 +};
52137 +
52138 +struct acl_ip_label {
52139 + char *iface;
52140 + __u32 addr;
52141 + __u32 netmask;
52142 + __u16 low, high;
52143 + __u8 mode;
52144 + __u32 type;
52145 + __u32 proto[8];
52146 +
52147 + /* next two structures not used */
52148 +
52149 + struct acl_ip_label *prev;
52150 + struct acl_ip_label *next;
52151 +};
52152 +
52153 +struct gr_arg {
52154 + struct user_acl_role_db role_db;
52155 + unsigned char pw[GR_PW_LEN];
52156 + unsigned char salt[GR_SALT_LEN];
52157 + unsigned char sum[GR_SHA_LEN];
52158 + unsigned char sp_role[GR_SPROLE_LEN];
52159 + struct sprole_pw *sprole_pws;
52160 + dev_t segv_device;
52161 + ino_t segv_inode;
52162 + uid_t segv_uid;
52163 + __u16 num_sprole_pws;
52164 + __u16 mode;
52165 +};
52166 +
52167 +struct gr_arg_wrapper {
52168 + struct gr_arg *arg;
52169 + __u32 version;
52170 + __u32 size;
52171 +};
52172 +
52173 +struct subject_map {
52174 + struct acl_subject_label *user;
52175 + struct acl_subject_label *kernel;
52176 + struct subject_map *prev;
52177 + struct subject_map *next;
52178 +};
52179 +
52180 +struct acl_subj_map_db {
52181 + struct subject_map **s_hash;
52182 + __u32 s_size;
52183 +};
52184 +
52185 +/* End Data Structures Section */
52186 +
52187 +/* Hash functions generated by empirical testing by Brad Spengler
52188 + Makes good use of the low bits of the inode. Generally 0-1 times
52189 + in loop for successful match. 0-3 for unsuccessful match.
52190 + Shift/add algorithm with modulus of table size and an XOR*/
52191 +
52192 +static __inline__ unsigned int
52193 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52194 +{
52195 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
52196 +}
52197 +
52198 + static __inline__ unsigned int
52199 +shash(const struct acl_subject_label *userp, const unsigned int sz)
52200 +{
52201 + return ((const unsigned long)userp % sz);
52202 +}
52203 +
52204 +static __inline__ unsigned int
52205 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52206 +{
52207 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52208 +}
52209 +
52210 +static __inline__ unsigned int
52211 +nhash(const char *name, const __u16 len, const unsigned int sz)
52212 +{
52213 + return full_name_hash((const unsigned char *)name, len) % sz;
52214 +}
52215 +
52216 +#define FOR_EACH_ROLE_START(role) \
52217 + role = role_list; \
52218 + while (role) {
52219 +
52220 +#define FOR_EACH_ROLE_END(role) \
52221 + role = role->prev; \
52222 + }
52223 +
52224 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52225 + subj = NULL; \
52226 + iter = 0; \
52227 + while (iter < role->subj_hash_size) { \
52228 + if (subj == NULL) \
52229 + subj = role->subj_hash[iter]; \
52230 + if (subj == NULL) { \
52231 + iter++; \
52232 + continue; \
52233 + }
52234 +
52235 +#define FOR_EACH_SUBJECT_END(subj,iter) \
52236 + subj = subj->next; \
52237 + if (subj == NULL) \
52238 + iter++; \
52239 + }
52240 +
52241 +
52242 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52243 + subj = role->hash->first; \
52244 + while (subj != NULL) {
52245 +
52246 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52247 + subj = subj->next; \
52248 + }
52249 +
52250 +#endif
52251 +
52252 diff -urNp linux-3.0.3/include/linux/gralloc.h linux-3.0.3/include/linux/gralloc.h
52253 --- linux-3.0.3/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52254 +++ linux-3.0.3/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
52255 @@ -0,0 +1,9 @@
52256 +#ifndef __GRALLOC_H
52257 +#define __GRALLOC_H
52258 +
52259 +void acl_free_all(void);
52260 +int acl_alloc_stack_init(unsigned long size);
52261 +void *acl_alloc(unsigned long len);
52262 +void *acl_alloc_num(unsigned long num, unsigned long len);
52263 +
52264 +#endif
52265 diff -urNp linux-3.0.3/include/linux/grdefs.h linux-3.0.3/include/linux/grdefs.h
52266 --- linux-3.0.3/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52267 +++ linux-3.0.3/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
52268 @@ -0,0 +1,140 @@
52269 +#ifndef GRDEFS_H
52270 +#define GRDEFS_H
52271 +
52272 +/* Begin grsecurity status declarations */
52273 +
52274 +enum {
52275 + GR_READY = 0x01,
52276 + GR_STATUS_INIT = 0x00 // disabled state
52277 +};
52278 +
52279 +/* Begin ACL declarations */
52280 +
52281 +/* Role flags */
52282 +
52283 +enum {
52284 + GR_ROLE_USER = 0x0001,
52285 + GR_ROLE_GROUP = 0x0002,
52286 + GR_ROLE_DEFAULT = 0x0004,
52287 + GR_ROLE_SPECIAL = 0x0008,
52288 + GR_ROLE_AUTH = 0x0010,
52289 + GR_ROLE_NOPW = 0x0020,
52290 + GR_ROLE_GOD = 0x0040,
52291 + GR_ROLE_LEARN = 0x0080,
52292 + GR_ROLE_TPE = 0x0100,
52293 + GR_ROLE_DOMAIN = 0x0200,
52294 + GR_ROLE_PAM = 0x0400,
52295 + GR_ROLE_PERSIST = 0x0800
52296 +};
52297 +
52298 +/* ACL Subject and Object mode flags */
52299 +enum {
52300 + GR_DELETED = 0x80000000
52301 +};
52302 +
52303 +/* ACL Object-only mode flags */
52304 +enum {
52305 + GR_READ = 0x00000001,
52306 + GR_APPEND = 0x00000002,
52307 + GR_WRITE = 0x00000004,
52308 + GR_EXEC = 0x00000008,
52309 + GR_FIND = 0x00000010,
52310 + GR_INHERIT = 0x00000020,
52311 + GR_SETID = 0x00000040,
52312 + GR_CREATE = 0x00000080,
52313 + GR_DELETE = 0x00000100,
52314 + GR_LINK = 0x00000200,
52315 + GR_AUDIT_READ = 0x00000400,
52316 + GR_AUDIT_APPEND = 0x00000800,
52317 + GR_AUDIT_WRITE = 0x00001000,
52318 + GR_AUDIT_EXEC = 0x00002000,
52319 + GR_AUDIT_FIND = 0x00004000,
52320 + GR_AUDIT_INHERIT= 0x00008000,
52321 + GR_AUDIT_SETID = 0x00010000,
52322 + GR_AUDIT_CREATE = 0x00020000,
52323 + GR_AUDIT_DELETE = 0x00040000,
52324 + GR_AUDIT_LINK = 0x00080000,
52325 + GR_PTRACERD = 0x00100000,
52326 + GR_NOPTRACE = 0x00200000,
52327 + GR_SUPPRESS = 0x00400000,
52328 + GR_NOLEARN = 0x00800000,
52329 + GR_INIT_TRANSFER= 0x01000000
52330 +};
52331 +
52332 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52333 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52334 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52335 +
52336 +/* ACL subject-only mode flags */
52337 +enum {
52338 + GR_KILL = 0x00000001,
52339 + GR_VIEW = 0x00000002,
52340 + GR_PROTECTED = 0x00000004,
52341 + GR_LEARN = 0x00000008,
52342 + GR_OVERRIDE = 0x00000010,
52343 + /* just a placeholder, this mode is only used in userspace */
52344 + GR_DUMMY = 0x00000020,
52345 + GR_PROTSHM = 0x00000040,
52346 + GR_KILLPROC = 0x00000080,
52347 + GR_KILLIPPROC = 0x00000100,
52348 + /* just a placeholder, this mode is only used in userspace */
52349 + GR_NOTROJAN = 0x00000200,
52350 + GR_PROTPROCFD = 0x00000400,
52351 + GR_PROCACCT = 0x00000800,
52352 + GR_RELAXPTRACE = 0x00001000,
52353 + GR_NESTED = 0x00002000,
52354 + GR_INHERITLEARN = 0x00004000,
52355 + GR_PROCFIND = 0x00008000,
52356 + GR_POVERRIDE = 0x00010000,
52357 + GR_KERNELAUTH = 0x00020000,
52358 + GR_ATSECURE = 0x00040000,
52359 + GR_SHMEXEC = 0x00080000
52360 +};
52361 +
52362 +enum {
52363 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52364 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52365 + GR_PAX_ENABLE_MPROTECT = 0x0004,
52366 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
52367 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52368 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52369 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52370 + GR_PAX_DISABLE_MPROTECT = 0x0400,
52371 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
52372 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52373 +};
52374 +
52375 +enum {
52376 + GR_ID_USER = 0x01,
52377 + GR_ID_GROUP = 0x02,
52378 +};
52379 +
52380 +enum {
52381 + GR_ID_ALLOW = 0x01,
52382 + GR_ID_DENY = 0x02,
52383 +};
52384 +
52385 +#define GR_CRASH_RES 31
52386 +#define GR_UIDTABLE_MAX 500
52387 +
52388 +/* begin resource learning section */
52389 +enum {
52390 + GR_RLIM_CPU_BUMP = 60,
52391 + GR_RLIM_FSIZE_BUMP = 50000,
52392 + GR_RLIM_DATA_BUMP = 10000,
52393 + GR_RLIM_STACK_BUMP = 1000,
52394 + GR_RLIM_CORE_BUMP = 10000,
52395 + GR_RLIM_RSS_BUMP = 500000,
52396 + GR_RLIM_NPROC_BUMP = 1,
52397 + GR_RLIM_NOFILE_BUMP = 5,
52398 + GR_RLIM_MEMLOCK_BUMP = 50000,
52399 + GR_RLIM_AS_BUMP = 500000,
52400 + GR_RLIM_LOCKS_BUMP = 2,
52401 + GR_RLIM_SIGPENDING_BUMP = 5,
52402 + GR_RLIM_MSGQUEUE_BUMP = 10000,
52403 + GR_RLIM_NICE_BUMP = 1,
52404 + GR_RLIM_RTPRIO_BUMP = 1,
52405 + GR_RLIM_RTTIME_BUMP = 1000000
52406 +};
52407 +
52408 +#endif
52409 diff -urNp linux-3.0.3/include/linux/grinternal.h linux-3.0.3/include/linux/grinternal.h
52410 --- linux-3.0.3/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52411 +++ linux-3.0.3/include/linux/grinternal.h 2011-08-23 21:48:14.000000000 -0400
52412 @@ -0,0 +1,219 @@
52413 +#ifndef __GRINTERNAL_H
52414 +#define __GRINTERNAL_H
52415 +
52416 +#ifdef CONFIG_GRKERNSEC
52417 +
52418 +#include <linux/fs.h>
52419 +#include <linux/mnt_namespace.h>
52420 +#include <linux/nsproxy.h>
52421 +#include <linux/gracl.h>
52422 +#include <linux/grdefs.h>
52423 +#include <linux/grmsg.h>
52424 +
52425 +void gr_add_learn_entry(const char *fmt, ...)
52426 + __attribute__ ((format (printf, 1, 2)));
52427 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52428 + const struct vfsmount *mnt);
52429 +__u32 gr_check_create(const struct dentry *new_dentry,
52430 + const struct dentry *parent,
52431 + const struct vfsmount *mnt, const __u32 mode);
52432 +int gr_check_protected_task(const struct task_struct *task);
52433 +__u32 to_gr_audit(const __u32 reqmode);
52434 +int gr_set_acls(const int type);
52435 +int gr_apply_subject_to_task(struct task_struct *task);
52436 +int gr_acl_is_enabled(void);
52437 +char gr_roletype_to_char(void);
52438 +
52439 +void gr_handle_alertkill(struct task_struct *task);
52440 +char *gr_to_filename(const struct dentry *dentry,
52441 + const struct vfsmount *mnt);
52442 +char *gr_to_filename1(const struct dentry *dentry,
52443 + const struct vfsmount *mnt);
52444 +char *gr_to_filename2(const struct dentry *dentry,
52445 + const struct vfsmount *mnt);
52446 +char *gr_to_filename3(const struct dentry *dentry,
52447 + const struct vfsmount *mnt);
52448 +
52449 +extern int grsec_enable_harden_ptrace;
52450 +extern int grsec_enable_link;
52451 +extern int grsec_enable_fifo;
52452 +extern int grsec_enable_execve;
52453 +extern int grsec_enable_shm;
52454 +extern int grsec_enable_execlog;
52455 +extern int grsec_enable_signal;
52456 +extern int grsec_enable_audit_ptrace;
52457 +extern int grsec_enable_forkfail;
52458 +extern int grsec_enable_time;
52459 +extern int grsec_enable_rofs;
52460 +extern int grsec_enable_chroot_shmat;
52461 +extern int grsec_enable_chroot_mount;
52462 +extern int grsec_enable_chroot_double;
52463 +extern int grsec_enable_chroot_pivot;
52464 +extern int grsec_enable_chroot_chdir;
52465 +extern int grsec_enable_chroot_chmod;
52466 +extern int grsec_enable_chroot_mknod;
52467 +extern int grsec_enable_chroot_fchdir;
52468 +extern int grsec_enable_chroot_nice;
52469 +extern int grsec_enable_chroot_execlog;
52470 +extern int grsec_enable_chroot_caps;
52471 +extern int grsec_enable_chroot_sysctl;
52472 +extern int grsec_enable_chroot_unix;
52473 +extern int grsec_enable_tpe;
52474 +extern int grsec_tpe_gid;
52475 +extern int grsec_enable_tpe_all;
52476 +extern int grsec_enable_tpe_invert;
52477 +extern int grsec_enable_socket_all;
52478 +extern int grsec_socket_all_gid;
52479 +extern int grsec_enable_socket_client;
52480 +extern int grsec_socket_client_gid;
52481 +extern int grsec_enable_socket_server;
52482 +extern int grsec_socket_server_gid;
52483 +extern int grsec_audit_gid;
52484 +extern int grsec_enable_group;
52485 +extern int grsec_enable_audit_textrel;
52486 +extern int grsec_enable_log_rwxmaps;
52487 +extern int grsec_enable_mount;
52488 +extern int grsec_enable_chdir;
52489 +extern int grsec_resource_logging;
52490 +extern int grsec_enable_blackhole;
52491 +extern int grsec_lastack_retries;
52492 +extern int grsec_enable_brute;
52493 +extern int grsec_lock;
52494 +
52495 +extern spinlock_t grsec_alert_lock;
52496 +extern unsigned long grsec_alert_wtime;
52497 +extern unsigned long grsec_alert_fyet;
52498 +
52499 +extern spinlock_t grsec_audit_lock;
52500 +
52501 +extern rwlock_t grsec_exec_file_lock;
52502 +
52503 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52504 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52505 + (tsk)->exec_file->f_vfsmnt) : "/")
52506 +
52507 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52508 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52509 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52510 +
52511 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52512 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
52513 + (tsk)->exec_file->f_vfsmnt) : "/")
52514 +
52515 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52516 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52517 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52518 +
52519 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52520 +
52521 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52522 +
52523 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52524 + (task)->pid, (cred)->uid, \
52525 + (cred)->euid, (cred)->gid, (cred)->egid, \
52526 + gr_parent_task_fullpath(task), \
52527 + (task)->real_parent->comm, (task)->real_parent->pid, \
52528 + (pcred)->uid, (pcred)->euid, \
52529 + (pcred)->gid, (pcred)->egid
52530 +
52531 +#define GR_CHROOT_CAPS {{ \
52532 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52533 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52534 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52535 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52536 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52537 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52538 +
52539 +#define security_learn(normal_msg,args...) \
52540 +({ \
52541 + read_lock(&grsec_exec_file_lock); \
52542 + gr_add_learn_entry(normal_msg "\n", ## args); \
52543 + read_unlock(&grsec_exec_file_lock); \
52544 +})
52545 +
52546 +enum {
52547 + GR_DO_AUDIT,
52548 + GR_DONT_AUDIT,
52549 + /* used for non-audit messages that we shouldn't kill the task on */
52550 + GR_DONT_AUDIT_GOOD
52551 +};
52552 +
52553 +enum {
52554 + GR_TTYSNIFF,
52555 + GR_RBAC,
52556 + GR_RBAC_STR,
52557 + GR_STR_RBAC,
52558 + GR_RBAC_MODE2,
52559 + GR_RBAC_MODE3,
52560 + GR_FILENAME,
52561 + GR_SYSCTL_HIDDEN,
52562 + GR_NOARGS,
52563 + GR_ONE_INT,
52564 + GR_ONE_INT_TWO_STR,
52565 + GR_ONE_STR,
52566 + GR_STR_INT,
52567 + GR_TWO_STR_INT,
52568 + GR_TWO_INT,
52569 + GR_TWO_U64,
52570 + GR_THREE_INT,
52571 + GR_FIVE_INT_TWO_STR,
52572 + GR_TWO_STR,
52573 + GR_THREE_STR,
52574 + GR_FOUR_STR,
52575 + GR_STR_FILENAME,
52576 + GR_FILENAME_STR,
52577 + GR_FILENAME_TWO_INT,
52578 + GR_FILENAME_TWO_INT_STR,
52579 + GR_TEXTREL,
52580 + GR_PTRACE,
52581 + GR_RESOURCE,
52582 + GR_CAP,
52583 + GR_SIG,
52584 + GR_SIG2,
52585 + GR_CRASH1,
52586 + GR_CRASH2,
52587 + GR_PSACCT,
52588 + GR_RWXMAP
52589 +};
52590 +
52591 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52592 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52593 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52594 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52595 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52596 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52597 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52598 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52599 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52600 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52601 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52602 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52603 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52604 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52605 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52606 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52607 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52608 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52609 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52610 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52611 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52612 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52613 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52614 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52615 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52616 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52617 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52618 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52619 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52620 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52621 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52622 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52623 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52624 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52625 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52626 +
52627 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52628 +
52629 +#endif
52630 +
52631 +#endif
52632 diff -urNp linux-3.0.3/include/linux/grmsg.h linux-3.0.3/include/linux/grmsg.h
52633 --- linux-3.0.3/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52634 +++ linux-3.0.3/include/linux/grmsg.h 2011-08-23 21:48:14.000000000 -0400
52635 @@ -0,0 +1,108 @@
52636 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52637 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52638 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52639 +#define GR_STOPMOD_MSG "denied modification of module state by "
52640 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52641 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52642 +#define GR_IOPERM_MSG "denied use of ioperm() by "
52643 +#define GR_IOPL_MSG "denied use of iopl() by "
52644 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52645 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52646 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52647 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52648 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52649 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52650 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52651 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52652 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52653 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52654 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52655 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52656 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52657 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52658 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52659 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52660 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52661 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52662 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52663 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52664 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52665 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52666 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52667 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52668 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52669 +#define GR_NPROC_MSG "denied overstep of process limit by "
52670 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52671 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52672 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52673 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52674 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52675 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52676 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52677 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52678 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52679 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52680 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52681 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52682 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52683 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52684 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52685 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52686 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52687 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52688 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52689 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52690 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52691 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52692 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52693 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52694 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52695 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52696 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52697 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52698 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52699 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52700 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52701 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52702 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52703 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52704 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52705 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52706 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52707 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52708 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52709 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
52710 +#define GR_NICE_CHROOT_MSG "denied priority change by "
52711 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52712 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52713 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52714 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52715 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52716 +#define GR_TIME_MSG "time set by "
52717 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52718 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52719 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52720 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52721 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52722 +#define GR_BIND_MSG "denied bind() by "
52723 +#define GR_CONNECT_MSG "denied connect() by "
52724 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52725 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
52726 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
52727 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
52728 +#define GR_CAP_ACL_MSG "use of %s denied for "
52729 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
52730 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
52731 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
52732 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
52733 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
52734 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
52735 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
52736 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
52737 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
52738 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
52739 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
52740 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
52741 +#define GR_VM86_MSG "denied use of vm86 by "
52742 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
52743 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
52744 diff -urNp linux-3.0.3/include/linux/grsecurity.h linux-3.0.3/include/linux/grsecurity.h
52745 --- linux-3.0.3/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
52746 +++ linux-3.0.3/include/linux/grsecurity.h 2011-08-23 21:48:14.000000000 -0400
52747 @@ -0,0 +1,228 @@
52748 +#ifndef GR_SECURITY_H
52749 +#define GR_SECURITY_H
52750 +#include <linux/fs.h>
52751 +#include <linux/fs_struct.h>
52752 +#include <linux/binfmts.h>
52753 +#include <linux/gracl.h>
52754 +
52755 +/* notify of brain-dead configs */
52756 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52757 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
52758 +#endif
52759 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
52760 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
52761 +#endif
52762 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52763 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52764 +#endif
52765 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52766 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52767 +#endif
52768 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
52769 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
52770 +#endif
52771 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
52772 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
52773 +#endif
52774 +
52775 +#include <linux/compat.h>
52776 +
52777 +struct user_arg_ptr {
52778 +#ifdef CONFIG_COMPAT
52779 + bool is_compat;
52780 +#endif
52781 + union {
52782 + const char __user *const __user *native;
52783 +#ifdef CONFIG_COMPAT
52784 + compat_uptr_t __user *compat;
52785 +#endif
52786 + } ptr;
52787 +};
52788 +
52789 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
52790 +void gr_handle_brute_check(void);
52791 +void gr_handle_kernel_exploit(void);
52792 +int gr_process_user_ban(void);
52793 +
52794 +char gr_roletype_to_char(void);
52795 +
52796 +int gr_acl_enable_at_secure(void);
52797 +
52798 +int gr_check_user_change(int real, int effective, int fs);
52799 +int gr_check_group_change(int real, int effective, int fs);
52800 +
52801 +void gr_del_task_from_ip_table(struct task_struct *p);
52802 +
52803 +int gr_pid_is_chrooted(struct task_struct *p);
52804 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
52805 +int gr_handle_chroot_nice(void);
52806 +int gr_handle_chroot_sysctl(const int op);
52807 +int gr_handle_chroot_setpriority(struct task_struct *p,
52808 + const int niceval);
52809 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
52810 +int gr_handle_chroot_chroot(const struct dentry *dentry,
52811 + const struct vfsmount *mnt);
52812 +int gr_handle_chroot_caps(struct path *path);
52813 +void gr_handle_chroot_chdir(struct path *path);
52814 +int gr_handle_chroot_chmod(const struct dentry *dentry,
52815 + const struct vfsmount *mnt, const int mode);
52816 +int gr_handle_chroot_mknod(const struct dentry *dentry,
52817 + const struct vfsmount *mnt, const int mode);
52818 +int gr_handle_chroot_mount(const struct dentry *dentry,
52819 + const struct vfsmount *mnt,
52820 + const char *dev_name);
52821 +int gr_handle_chroot_pivot(void);
52822 +int gr_handle_chroot_unix(const pid_t pid);
52823 +
52824 +int gr_handle_rawio(const struct inode *inode);
52825 +int gr_handle_nproc(void);
52826 +
52827 +void gr_handle_ioperm(void);
52828 +void gr_handle_iopl(void);
52829 +
52830 +int gr_tpe_allow(const struct file *file);
52831 +
52832 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
52833 +void gr_clear_chroot_entries(struct task_struct *task);
52834 +
52835 +void gr_log_forkfail(const int retval);
52836 +void gr_log_timechange(void);
52837 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
52838 +void gr_log_chdir(const struct dentry *dentry,
52839 + const struct vfsmount *mnt);
52840 +void gr_log_chroot_exec(const struct dentry *dentry,
52841 + const struct vfsmount *mnt);
52842 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
52843 +void gr_log_remount(const char *devname, const int retval);
52844 +void gr_log_unmount(const char *devname, const int retval);
52845 +void gr_log_mount(const char *from, const char *to, const int retval);
52846 +void gr_log_textrel(struct vm_area_struct *vma);
52847 +void gr_log_rwxmmap(struct file *file);
52848 +void gr_log_rwxmprotect(struct file *file);
52849 +
52850 +int gr_handle_follow_link(const struct inode *parent,
52851 + const struct inode *inode,
52852 + const struct dentry *dentry,
52853 + const struct vfsmount *mnt);
52854 +int gr_handle_fifo(const struct dentry *dentry,
52855 + const struct vfsmount *mnt,
52856 + const struct dentry *dir, const int flag,
52857 + const int acc_mode);
52858 +int gr_handle_hardlink(const struct dentry *dentry,
52859 + const struct vfsmount *mnt,
52860 + struct inode *inode,
52861 + const int mode, const char *to);
52862 +
52863 +int gr_is_capable(const int cap);
52864 +int gr_is_capable_nolog(const int cap);
52865 +void gr_learn_resource(const struct task_struct *task, const int limit,
52866 + const unsigned long wanted, const int gt);
52867 +void gr_copy_label(struct task_struct *tsk);
52868 +void gr_handle_crash(struct task_struct *task, const int sig);
52869 +int gr_handle_signal(const struct task_struct *p, const int sig);
52870 +int gr_check_crash_uid(const uid_t uid);
52871 +int gr_check_protected_task(const struct task_struct *task);
52872 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
52873 +int gr_acl_handle_mmap(const struct file *file,
52874 + const unsigned long prot);
52875 +int gr_acl_handle_mprotect(const struct file *file,
52876 + const unsigned long prot);
52877 +int gr_check_hidden_task(const struct task_struct *tsk);
52878 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
52879 + const struct vfsmount *mnt);
52880 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
52881 + const struct vfsmount *mnt);
52882 +__u32 gr_acl_handle_access(const struct dentry *dentry,
52883 + const struct vfsmount *mnt, const int fmode);
52884 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
52885 + const struct vfsmount *mnt, mode_t mode);
52886 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
52887 + const struct vfsmount *mnt, mode_t mode);
52888 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
52889 + const struct vfsmount *mnt);
52890 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
52891 + const struct vfsmount *mnt);
52892 +int gr_handle_ptrace(struct task_struct *task, const long request);
52893 +int gr_handle_proc_ptrace(struct task_struct *task);
52894 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
52895 + const struct vfsmount *mnt);
52896 +int gr_check_crash_exec(const struct file *filp);
52897 +int gr_acl_is_enabled(void);
52898 +void gr_set_kernel_label(struct task_struct *task);
52899 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
52900 + const gid_t gid);
52901 +int gr_set_proc_label(const struct dentry *dentry,
52902 + const struct vfsmount *mnt,
52903 + const int unsafe_share);
52904 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
52905 + const struct vfsmount *mnt);
52906 +__u32 gr_acl_handle_open(const struct dentry *dentry,
52907 + const struct vfsmount *mnt, const int fmode);
52908 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
52909 + const struct dentry *p_dentry,
52910 + const struct vfsmount *p_mnt, const int fmode,
52911 + const int imode);
52912 +void gr_handle_create(const struct dentry *dentry,
52913 + const struct vfsmount *mnt);
52914 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
52915 + const struct dentry *parent_dentry,
52916 + const struct vfsmount *parent_mnt,
52917 + const int mode);
52918 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
52919 + const struct dentry *parent_dentry,
52920 + const struct vfsmount *parent_mnt);
52921 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
52922 + const struct vfsmount *mnt);
52923 +void gr_handle_delete(const ino_t ino, const dev_t dev);
52924 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
52925 + const struct vfsmount *mnt);
52926 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
52927 + const struct dentry *parent_dentry,
52928 + const struct vfsmount *parent_mnt,
52929 + const char *from);
52930 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
52931 + const struct dentry *parent_dentry,
52932 + const struct vfsmount *parent_mnt,
52933 + const struct dentry *old_dentry,
52934 + const struct vfsmount *old_mnt, const char *to);
52935 +int gr_acl_handle_rename(struct dentry *new_dentry,
52936 + struct dentry *parent_dentry,
52937 + const struct vfsmount *parent_mnt,
52938 + struct dentry *old_dentry,
52939 + struct inode *old_parent_inode,
52940 + struct vfsmount *old_mnt, const char *newname);
52941 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52942 + struct dentry *old_dentry,
52943 + struct dentry *new_dentry,
52944 + struct vfsmount *mnt, const __u8 replace);
52945 +__u32 gr_check_link(const struct dentry *new_dentry,
52946 + const struct dentry *parent_dentry,
52947 + const struct vfsmount *parent_mnt,
52948 + const struct dentry *old_dentry,
52949 + const struct vfsmount *old_mnt);
52950 +int gr_acl_handle_filldir(const struct file *file, const char *name,
52951 + const unsigned int namelen, const ino_t ino);
52952 +
52953 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
52954 + const struct vfsmount *mnt);
52955 +void gr_acl_handle_exit(void);
52956 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
52957 +int gr_acl_handle_procpidmem(const struct task_struct *task);
52958 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
52959 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
52960 +void gr_audit_ptrace(struct task_struct *task);
52961 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
52962 +
52963 +#ifdef CONFIG_GRKERNSEC
52964 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
52965 +void gr_handle_vm86(void);
52966 +void gr_handle_mem_readwrite(u64 from, u64 to);
52967 +
52968 +extern int grsec_enable_dmesg;
52969 +extern int grsec_disable_privio;
52970 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52971 +extern int grsec_enable_chroot_findtask;
52972 +#endif
52973 +#endif
52974 +
52975 +#endif
52976 diff -urNp linux-3.0.3/include/linux/grsock.h linux-3.0.3/include/linux/grsock.h
52977 --- linux-3.0.3/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
52978 +++ linux-3.0.3/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
52979 @@ -0,0 +1,19 @@
52980 +#ifndef __GRSOCK_H
52981 +#define __GRSOCK_H
52982 +
52983 +extern void gr_attach_curr_ip(const struct sock *sk);
52984 +extern int gr_handle_sock_all(const int family, const int type,
52985 + const int protocol);
52986 +extern int gr_handle_sock_server(const struct sockaddr *sck);
52987 +extern int gr_handle_sock_server_other(const struct sock *sck);
52988 +extern int gr_handle_sock_client(const struct sockaddr *sck);
52989 +extern int gr_search_connect(struct socket * sock,
52990 + struct sockaddr_in * addr);
52991 +extern int gr_search_bind(struct socket * sock,
52992 + struct sockaddr_in * addr);
52993 +extern int gr_search_listen(struct socket * sock);
52994 +extern int gr_search_accept(struct socket * sock);
52995 +extern int gr_search_socket(const int domain, const int type,
52996 + const int protocol);
52997 +
52998 +#endif
52999 diff -urNp linux-3.0.3/include/linux/hid.h linux-3.0.3/include/linux/hid.h
53000 --- linux-3.0.3/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
53001 +++ linux-3.0.3/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
53002 @@ -675,7 +675,7 @@ struct hid_ll_driver {
53003 unsigned int code, int value);
53004
53005 int (*parse)(struct hid_device *hdev);
53006 -};
53007 +} __no_const;
53008
53009 #define PM_HINT_FULLON 1<<5
53010 #define PM_HINT_NORMAL 1<<1
53011 diff -urNp linux-3.0.3/include/linux/highmem.h linux-3.0.3/include/linux/highmem.h
53012 --- linux-3.0.3/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
53013 +++ linux-3.0.3/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
53014 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53015 kunmap_atomic(kaddr, KM_USER0);
53016 }
53017
53018 +static inline void sanitize_highpage(struct page *page)
53019 +{
53020 + void *kaddr;
53021 + unsigned long flags;
53022 +
53023 + local_irq_save(flags);
53024 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
53025 + clear_page(kaddr);
53026 + kunmap_atomic(kaddr, KM_CLEARPAGE);
53027 + local_irq_restore(flags);
53028 +}
53029 +
53030 static inline void zero_user_segments(struct page *page,
53031 unsigned start1, unsigned end1,
53032 unsigned start2, unsigned end2)
53033 diff -urNp linux-3.0.3/include/linux/i2c.h linux-3.0.3/include/linux/i2c.h
53034 --- linux-3.0.3/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
53035 +++ linux-3.0.3/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
53036 @@ -346,6 +346,7 @@ struct i2c_algorithm {
53037 /* To determine what the adapter supports */
53038 u32 (*functionality) (struct i2c_adapter *);
53039 };
53040 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53041
53042 /*
53043 * i2c_adapter is the structure used to identify a physical i2c bus along
53044 diff -urNp linux-3.0.3/include/linux/i2o.h linux-3.0.3/include/linux/i2o.h
53045 --- linux-3.0.3/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
53046 +++ linux-3.0.3/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
53047 @@ -564,7 +564,7 @@ struct i2o_controller {
53048 struct i2o_device *exec; /* Executive */
53049 #if BITS_PER_LONG == 64
53050 spinlock_t context_list_lock; /* lock for context_list */
53051 - atomic_t context_list_counter; /* needed for unique contexts */
53052 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53053 struct list_head context_list; /* list of context id's
53054 and pointers */
53055 #endif
53056 diff -urNp linux-3.0.3/include/linux/init.h linux-3.0.3/include/linux/init.h
53057 --- linux-3.0.3/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
53058 +++ linux-3.0.3/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
53059 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53060
53061 /* Each module must use one module_init(). */
53062 #define module_init(initfn) \
53063 - static inline initcall_t __inittest(void) \
53064 + static inline __used initcall_t __inittest(void) \
53065 { return initfn; } \
53066 int init_module(void) __attribute__((alias(#initfn)));
53067
53068 /* This is only required if you want to be unloadable. */
53069 #define module_exit(exitfn) \
53070 - static inline exitcall_t __exittest(void) \
53071 + static inline __used exitcall_t __exittest(void) \
53072 { return exitfn; } \
53073 void cleanup_module(void) __attribute__((alias(#exitfn)));
53074
53075 diff -urNp linux-3.0.3/include/linux/init_task.h linux-3.0.3/include/linux/init_task.h
53076 --- linux-3.0.3/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
53077 +++ linux-3.0.3/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
53078 @@ -126,6 +126,12 @@ extern struct cred init_cred;
53079 # define INIT_PERF_EVENTS(tsk)
53080 #endif
53081
53082 +#ifdef CONFIG_X86
53083 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53084 +#else
53085 +#define INIT_TASK_THREAD_INFO
53086 +#endif
53087 +
53088 /*
53089 * INIT_TASK is used to set up the first task table, touch at
53090 * your own risk!. Base=0, limit=0x1fffff (=2MB)
53091 @@ -164,6 +170,7 @@ extern struct cred init_cred;
53092 RCU_INIT_POINTER(.cred, &init_cred), \
53093 .comm = "swapper", \
53094 .thread = INIT_THREAD, \
53095 + INIT_TASK_THREAD_INFO \
53096 .fs = &init_fs, \
53097 .files = &init_files, \
53098 .signal = &init_signals, \
53099 diff -urNp linux-3.0.3/include/linux/intel-iommu.h linux-3.0.3/include/linux/intel-iommu.h
53100 --- linux-3.0.3/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
53101 +++ linux-3.0.3/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
53102 @@ -296,7 +296,7 @@ struct iommu_flush {
53103 u8 fm, u64 type);
53104 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53105 unsigned int size_order, u64 type);
53106 -};
53107 +} __no_const;
53108
53109 enum {
53110 SR_DMAR_FECTL_REG,
53111 diff -urNp linux-3.0.3/include/linux/interrupt.h linux-3.0.3/include/linux/interrupt.h
53112 --- linux-3.0.3/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
53113 +++ linux-3.0.3/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
53114 @@ -422,7 +422,7 @@ enum
53115 /* map softirq index to softirq name. update 'softirq_to_name' in
53116 * kernel/softirq.c when adding a new softirq.
53117 */
53118 -extern char *softirq_to_name[NR_SOFTIRQS];
53119 +extern const char * const softirq_to_name[NR_SOFTIRQS];
53120
53121 /* softirq mask and active fields moved to irq_cpustat_t in
53122 * asm/hardirq.h to get better cache usage. KAO
53123 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53124
53125 struct softirq_action
53126 {
53127 - void (*action)(struct softirq_action *);
53128 + void (*action)(void);
53129 };
53130
53131 asmlinkage void do_softirq(void);
53132 asmlinkage void __do_softirq(void);
53133 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53134 +extern void open_softirq(int nr, void (*action)(void));
53135 extern void softirq_init(void);
53136 static inline void __raise_softirq_irqoff(unsigned int nr)
53137 {
53138 diff -urNp linux-3.0.3/include/linux/kallsyms.h linux-3.0.3/include/linux/kallsyms.h
53139 --- linux-3.0.3/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
53140 +++ linux-3.0.3/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
53141 @@ -15,7 +15,8 @@
53142
53143 struct module;
53144
53145 -#ifdef CONFIG_KALLSYMS
53146 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53147 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53148 /* Lookup the address for a symbol. Returns 0 if not found. */
53149 unsigned long kallsyms_lookup_name(const char *name);
53150
53151 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53152 /* Stupid that this does nothing, but I didn't create this mess. */
53153 #define __print_symbol(fmt, addr)
53154 #endif /*CONFIG_KALLSYMS*/
53155 +#else /* when included by kallsyms.c, vsnprintf.c, or
53156 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53157 +extern void __print_symbol(const char *fmt, unsigned long address);
53158 +extern int sprint_backtrace(char *buffer, unsigned long address);
53159 +extern int sprint_symbol(char *buffer, unsigned long address);
53160 +const char *kallsyms_lookup(unsigned long addr,
53161 + unsigned long *symbolsize,
53162 + unsigned long *offset,
53163 + char **modname, char *namebuf);
53164 +#endif
53165
53166 /* This macro allows us to keep printk typechecking */
53167 static void __check_printsym_format(const char *fmt, ...)
53168 diff -urNp linux-3.0.3/include/linux/kgdb.h linux-3.0.3/include/linux/kgdb.h
53169 --- linux-3.0.3/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
53170 +++ linux-3.0.3/include/linux/kgdb.h 2011-08-23 21:47:56.000000000 -0400
53171 @@ -53,7 +53,7 @@ extern int kgdb_connected;
53172 extern int kgdb_io_module_registered;
53173
53174 extern atomic_t kgdb_setting_breakpoint;
53175 -extern atomic_t kgdb_cpu_doing_single_step;
53176 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53177
53178 extern struct task_struct *kgdb_usethread;
53179 extern struct task_struct *kgdb_contthread;
53180 @@ -241,8 +241,8 @@ extern void kgdb_arch_late(void);
53181 * hardware debug registers.
53182 */
53183 struct kgdb_arch {
53184 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53185 - unsigned long flags;
53186 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53187 + const unsigned long flags;
53188
53189 int (*set_breakpoint)(unsigned long, char *);
53190 int (*remove_breakpoint)(unsigned long, char *);
53191 @@ -268,14 +268,14 @@ struct kgdb_arch {
53192 * not a console
53193 */
53194 struct kgdb_io {
53195 - const char *name;
53196 + const char * const name;
53197 int (*read_char) (void);
53198 void (*write_char) (u8);
53199 void (*flush) (void);
53200 int (*init) (void);
53201 void (*pre_exception) (void);
53202 void (*post_exception) (void);
53203 - int is_console;
53204 + const int is_console;
53205 };
53206
53207 extern struct kgdb_arch arch_kgdb_ops;
53208 diff -urNp linux-3.0.3/include/linux/kmod.h linux-3.0.3/include/linux/kmod.h
53209 --- linux-3.0.3/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
53210 +++ linux-3.0.3/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
53211 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
53212 * usually useless though. */
53213 extern int __request_module(bool wait, const char *name, ...) \
53214 __attribute__((format(printf, 2, 3)));
53215 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53216 + __attribute__((format(printf, 3, 4)));
53217 #define request_module(mod...) __request_module(true, mod)
53218 #define request_module_nowait(mod...) __request_module(false, mod)
53219 #define try_then_request_module(x, mod...) \
53220 diff -urNp linux-3.0.3/include/linux/kvm_host.h linux-3.0.3/include/linux/kvm_host.h
53221 --- linux-3.0.3/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
53222 +++ linux-3.0.3/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
53223 @@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53224 void vcpu_load(struct kvm_vcpu *vcpu);
53225 void vcpu_put(struct kvm_vcpu *vcpu);
53226
53227 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53228 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53229 struct module *module);
53230 void kvm_exit(void);
53231
53232 @@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53233 struct kvm_guest_debug *dbg);
53234 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53235
53236 -int kvm_arch_init(void *opaque);
53237 +int kvm_arch_init(const void *opaque);
53238 void kvm_arch_exit(void);
53239
53240 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53241 diff -urNp linux-3.0.3/include/linux/libata.h linux-3.0.3/include/linux/libata.h
53242 --- linux-3.0.3/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
53243 +++ linux-3.0.3/include/linux/libata.h 2011-08-23 21:47:56.000000000 -0400
53244 @@ -898,7 +898,7 @@ struct ata_port_operations {
53245 * ->inherits must be the last field and all the preceding
53246 * fields must be pointers.
53247 */
53248 - const struct ata_port_operations *inherits;
53249 + const struct ata_port_operations * const inherits;
53250 };
53251
53252 struct ata_port_info {
53253 diff -urNp linux-3.0.3/include/linux/mca.h linux-3.0.3/include/linux/mca.h
53254 --- linux-3.0.3/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
53255 +++ linux-3.0.3/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
53256 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53257 int region);
53258 void * (*mca_transform_memory)(struct mca_device *,
53259 void *memory);
53260 -};
53261 +} __no_const;
53262
53263 struct mca_bus {
53264 u64 default_dma_mask;
53265 diff -urNp linux-3.0.3/include/linux/memory.h linux-3.0.3/include/linux/memory.h
53266 --- linux-3.0.3/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
53267 +++ linux-3.0.3/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
53268 @@ -144,7 +144,7 @@ struct memory_accessor {
53269 size_t count);
53270 ssize_t (*write)(struct memory_accessor *, const char *buf,
53271 off_t offset, size_t count);
53272 -};
53273 +} __no_const;
53274
53275 /*
53276 * Kernel text modification mutex, used for code patching. Users of this lock
53277 diff -urNp linux-3.0.3/include/linux/mfd/abx500.h linux-3.0.3/include/linux/mfd/abx500.h
53278 --- linux-3.0.3/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
53279 +++ linux-3.0.3/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
53280 @@ -234,6 +234,7 @@ struct abx500_ops {
53281 int (*event_registers_startup_state_get) (struct device *, u8 *);
53282 int (*startup_irq_enabled) (struct device *, unsigned int);
53283 };
53284 +typedef struct abx500_ops __no_const abx500_ops_no_const;
53285
53286 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53287 void abx500_remove_ops(struct device *dev);
53288 diff -urNp linux-3.0.3/include/linux/mm.h linux-3.0.3/include/linux/mm.h
53289 --- linux-3.0.3/include/linux/mm.h 2011-08-23 21:44:40.000000000 -0400
53290 +++ linux-3.0.3/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
53291 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53292
53293 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53294 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53295 +
53296 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53297 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53298 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53299 +#else
53300 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53301 +#endif
53302 +
53303 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53304 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53305
53306 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
53307 int set_page_dirty_lock(struct page *page);
53308 int clear_page_dirty_for_io(struct page *page);
53309
53310 -/* Is the vma a continuation of the stack vma above it? */
53311 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53312 -{
53313 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53314 -}
53315 -
53316 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
53317 - unsigned long addr)
53318 -{
53319 - return (vma->vm_flags & VM_GROWSDOWN) &&
53320 - (vma->vm_start == addr) &&
53321 - !vma_growsdown(vma->vm_prev, addr);
53322 -}
53323 -
53324 -/* Is the vma a continuation of the stack vma below it? */
53325 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53326 -{
53327 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53328 -}
53329 -
53330 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
53331 - unsigned long addr)
53332 -{
53333 - return (vma->vm_flags & VM_GROWSUP) &&
53334 - (vma->vm_end == addr) &&
53335 - !vma_growsup(vma->vm_next, addr);
53336 -}
53337 -
53338 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53339 unsigned long old_addr, struct vm_area_struct *new_vma,
53340 unsigned long new_addr, unsigned long len);
53341 @@ -1169,6 +1148,15 @@ struct shrinker {
53342 extern void register_shrinker(struct shrinker *);
53343 extern void unregister_shrinker(struct shrinker *);
53344
53345 +#ifdef CONFIG_MMU
53346 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
53347 +#else
53348 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
53349 +{
53350 + return __pgprot(0);
53351 +}
53352 +#endif
53353 +
53354 int vma_wants_writenotify(struct vm_area_struct *vma);
53355
53356 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53357 @@ -1452,6 +1440,7 @@ out:
53358 }
53359
53360 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53361 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53362
53363 extern unsigned long do_brk(unsigned long, unsigned long);
53364
53365 @@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
53366 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53367 struct vm_area_struct **pprev);
53368
53369 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53370 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53371 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53372 +
53373 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53374 NULL if none. Assume start_addr < end_addr. */
53375 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53376 @@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
53377 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53378 }
53379
53380 -#ifdef CONFIG_MMU
53381 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
53382 -#else
53383 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53384 -{
53385 - return __pgprot(0);
53386 -}
53387 -#endif
53388 -
53389 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53390 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53391 unsigned long pfn, unsigned long size, pgprot_t);
53392 @@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
53393 extern int sysctl_memory_failure_early_kill;
53394 extern int sysctl_memory_failure_recovery;
53395 extern void shake_page(struct page *p, int access);
53396 -extern atomic_long_t mce_bad_pages;
53397 +extern atomic_long_unchecked_t mce_bad_pages;
53398 extern int soft_offline_page(struct page *page, int flags);
53399
53400 extern void dump_page(struct page *page);
53401 @@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
53402 unsigned int pages_per_huge_page);
53403 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53404
53405 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53406 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53407 +#else
53408 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53409 +#endif
53410 +
53411 #endif /* __KERNEL__ */
53412 #endif /* _LINUX_MM_H */
53413 diff -urNp linux-3.0.3/include/linux/mm_types.h linux-3.0.3/include/linux/mm_types.h
53414 --- linux-3.0.3/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
53415 +++ linux-3.0.3/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
53416 @@ -184,6 +184,8 @@ struct vm_area_struct {
53417 #ifdef CONFIG_NUMA
53418 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53419 #endif
53420 +
53421 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53422 };
53423
53424 struct core_thread {
53425 @@ -316,6 +318,24 @@ struct mm_struct {
53426 #ifdef CONFIG_CPUMASK_OFFSTACK
53427 struct cpumask cpumask_allocation;
53428 #endif
53429 +
53430 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53431 + unsigned long pax_flags;
53432 +#endif
53433 +
53434 +#ifdef CONFIG_PAX_DLRESOLVE
53435 + unsigned long call_dl_resolve;
53436 +#endif
53437 +
53438 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53439 + unsigned long call_syscall;
53440 +#endif
53441 +
53442 +#ifdef CONFIG_PAX_ASLR
53443 + unsigned long delta_mmap; /* randomized offset */
53444 + unsigned long delta_stack; /* randomized offset */
53445 +#endif
53446 +
53447 };
53448
53449 static inline void mm_init_cpumask(struct mm_struct *mm)
53450 diff -urNp linux-3.0.3/include/linux/mmu_notifier.h linux-3.0.3/include/linux/mmu_notifier.h
53451 --- linux-3.0.3/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
53452 +++ linux-3.0.3/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
53453 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53454 */
53455 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53456 ({ \
53457 - pte_t __pte; \
53458 + pte_t ___pte; \
53459 struct vm_area_struct *___vma = __vma; \
53460 unsigned long ___address = __address; \
53461 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53462 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53463 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53464 - __pte; \
53465 + ___pte; \
53466 })
53467
53468 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53469 diff -urNp linux-3.0.3/include/linux/mmzone.h linux-3.0.3/include/linux/mmzone.h
53470 --- linux-3.0.3/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
53471 +++ linux-3.0.3/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
53472 @@ -350,7 +350,7 @@ struct zone {
53473 unsigned long flags; /* zone flags, see below */
53474
53475 /* Zone statistics */
53476 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53477 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53478
53479 /*
53480 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53481 diff -urNp linux-3.0.3/include/linux/mod_devicetable.h linux-3.0.3/include/linux/mod_devicetable.h
53482 --- linux-3.0.3/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
53483 +++ linux-3.0.3/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
53484 @@ -12,7 +12,7 @@
53485 typedef unsigned long kernel_ulong_t;
53486 #endif
53487
53488 -#define PCI_ANY_ID (~0)
53489 +#define PCI_ANY_ID ((__u16)~0)
53490
53491 struct pci_device_id {
53492 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53493 @@ -131,7 +131,7 @@ struct usb_device_id {
53494 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53495 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53496
53497 -#define HID_ANY_ID (~0)
53498 +#define HID_ANY_ID (~0U)
53499
53500 struct hid_device_id {
53501 __u16 bus;
53502 diff -urNp linux-3.0.3/include/linux/module.h linux-3.0.3/include/linux/module.h
53503 --- linux-3.0.3/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
53504 +++ linux-3.0.3/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
53505 @@ -16,6 +16,7 @@
53506 #include <linux/kobject.h>
53507 #include <linux/moduleparam.h>
53508 #include <linux/tracepoint.h>
53509 +#include <linux/fs.h>
53510
53511 #include <linux/percpu.h>
53512 #include <asm/module.h>
53513 @@ -325,19 +326,16 @@ struct module
53514 int (*init)(void);
53515
53516 /* If this is non-NULL, vfree after init() returns */
53517 - void *module_init;
53518 + void *module_init_rx, *module_init_rw;
53519
53520 /* Here is the actual code + data, vfree'd on unload. */
53521 - void *module_core;
53522 + void *module_core_rx, *module_core_rw;
53523
53524 /* Here are the sizes of the init and core sections */
53525 - unsigned int init_size, core_size;
53526 + unsigned int init_size_rw, core_size_rw;
53527
53528 /* The size of the executable code in each section. */
53529 - unsigned int init_text_size, core_text_size;
53530 -
53531 - /* Size of RO sections of the module (text+rodata) */
53532 - unsigned int init_ro_size, core_ro_size;
53533 + unsigned int init_size_rx, core_size_rx;
53534
53535 /* Arch-specific module values */
53536 struct mod_arch_specific arch;
53537 @@ -393,6 +391,10 @@ struct module
53538 #ifdef CONFIG_EVENT_TRACING
53539 struct ftrace_event_call **trace_events;
53540 unsigned int num_trace_events;
53541 + struct file_operations trace_id;
53542 + struct file_operations trace_enable;
53543 + struct file_operations trace_format;
53544 + struct file_operations trace_filter;
53545 #endif
53546 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53547 unsigned int num_ftrace_callsites;
53548 @@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
53549 bool is_module_percpu_address(unsigned long addr);
53550 bool is_module_text_address(unsigned long addr);
53551
53552 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53553 +{
53554 +
53555 +#ifdef CONFIG_PAX_KERNEXEC
53556 + if (ktla_ktva(addr) >= (unsigned long)start &&
53557 + ktla_ktva(addr) < (unsigned long)start + size)
53558 + return 1;
53559 +#endif
53560 +
53561 + return ((void *)addr >= start && (void *)addr < start + size);
53562 +}
53563 +
53564 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53565 +{
53566 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53567 +}
53568 +
53569 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53570 +{
53571 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53572 +}
53573 +
53574 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53575 +{
53576 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53577 +}
53578 +
53579 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53580 +{
53581 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53582 +}
53583 +
53584 static inline int within_module_core(unsigned long addr, struct module *mod)
53585 {
53586 - return (unsigned long)mod->module_core <= addr &&
53587 - addr < (unsigned long)mod->module_core + mod->core_size;
53588 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53589 }
53590
53591 static inline int within_module_init(unsigned long addr, struct module *mod)
53592 {
53593 - return (unsigned long)mod->module_init <= addr &&
53594 - addr < (unsigned long)mod->module_init + mod->init_size;
53595 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53596 }
53597
53598 /* Search for module by name: must hold module_mutex. */
53599 diff -urNp linux-3.0.3/include/linux/moduleloader.h linux-3.0.3/include/linux/moduleloader.h
53600 --- linux-3.0.3/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
53601 +++ linux-3.0.3/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
53602 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53603 sections. Returns NULL on failure. */
53604 void *module_alloc(unsigned long size);
53605
53606 +#ifdef CONFIG_PAX_KERNEXEC
53607 +void *module_alloc_exec(unsigned long size);
53608 +#else
53609 +#define module_alloc_exec(x) module_alloc(x)
53610 +#endif
53611 +
53612 /* Free memory returned from module_alloc. */
53613 void module_free(struct module *mod, void *module_region);
53614
53615 +#ifdef CONFIG_PAX_KERNEXEC
53616 +void module_free_exec(struct module *mod, void *module_region);
53617 +#else
53618 +#define module_free_exec(x, y) module_free((x), (y))
53619 +#endif
53620 +
53621 /* Apply the given relocation to the (simplified) ELF. Return -error
53622 or 0. */
53623 int apply_relocate(Elf_Shdr *sechdrs,
53624 diff -urNp linux-3.0.3/include/linux/moduleparam.h linux-3.0.3/include/linux/moduleparam.h
53625 --- linux-3.0.3/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
53626 +++ linux-3.0.3/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
53627 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53628 * @len is usually just sizeof(string).
53629 */
53630 #define module_param_string(name, string, len, perm) \
53631 - static const struct kparam_string __param_string_##name \
53632 + static const struct kparam_string __param_string_##name __used \
53633 = { len, string }; \
53634 __module_param_call(MODULE_PARAM_PREFIX, name, \
53635 &param_ops_string, \
53636 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53637 * module_param_named() for why this might be necessary.
53638 */
53639 #define module_param_array_named(name, array, type, nump, perm) \
53640 - static const struct kparam_array __param_arr_##name \
53641 + static const struct kparam_array __param_arr_##name __used \
53642 = { .max = ARRAY_SIZE(array), .num = nump, \
53643 .ops = &param_ops_##type, \
53644 .elemsize = sizeof(array[0]), .elem = array }; \
53645 diff -urNp linux-3.0.3/include/linux/namei.h linux-3.0.3/include/linux/namei.h
53646 --- linux-3.0.3/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
53647 +++ linux-3.0.3/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
53648 @@ -24,7 +24,7 @@ struct nameidata {
53649 unsigned seq;
53650 int last_type;
53651 unsigned depth;
53652 - char *saved_names[MAX_NESTED_LINKS + 1];
53653 + const char *saved_names[MAX_NESTED_LINKS + 1];
53654
53655 /* Intent data */
53656 union {
53657 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53658 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53659 extern void unlock_rename(struct dentry *, struct dentry *);
53660
53661 -static inline void nd_set_link(struct nameidata *nd, char *path)
53662 +static inline void nd_set_link(struct nameidata *nd, const char *path)
53663 {
53664 nd->saved_names[nd->depth] = path;
53665 }
53666
53667 -static inline char *nd_get_link(struct nameidata *nd)
53668 +static inline const char *nd_get_link(const struct nameidata *nd)
53669 {
53670 return nd->saved_names[nd->depth];
53671 }
53672 diff -urNp linux-3.0.3/include/linux/netdevice.h linux-3.0.3/include/linux/netdevice.h
53673 --- linux-3.0.3/include/linux/netdevice.h 2011-08-23 21:44:40.000000000 -0400
53674 +++ linux-3.0.3/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
53675 @@ -979,6 +979,7 @@ struct net_device_ops {
53676 int (*ndo_set_features)(struct net_device *dev,
53677 u32 features);
53678 };
53679 +typedef struct net_device_ops __no_const net_device_ops_no_const;
53680
53681 /*
53682 * The DEVICE structure.
53683 diff -urNp linux-3.0.3/include/linux/netfilter/xt_gradm.h linux-3.0.3/include/linux/netfilter/xt_gradm.h
53684 --- linux-3.0.3/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53685 +++ linux-3.0.3/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
53686 @@ -0,0 +1,9 @@
53687 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
53688 +#define _LINUX_NETFILTER_XT_GRADM_H 1
53689 +
53690 +struct xt_gradm_mtinfo {
53691 + __u16 flags;
53692 + __u16 invflags;
53693 +};
53694 +
53695 +#endif
53696 diff -urNp linux-3.0.3/include/linux/oprofile.h linux-3.0.3/include/linux/oprofile.h
53697 --- linux-3.0.3/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
53698 +++ linux-3.0.3/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
53699 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53700 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53701 char const * name, ulong * val);
53702
53703 -/** Create a file for read-only access to an atomic_t. */
53704 +/** Create a file for read-only access to an atomic_unchecked_t. */
53705 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53706 - char const * name, atomic_t * val);
53707 + char const * name, atomic_unchecked_t * val);
53708
53709 /** create a directory */
53710 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53711 diff -urNp linux-3.0.3/include/linux/padata.h linux-3.0.3/include/linux/padata.h
53712 --- linux-3.0.3/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
53713 +++ linux-3.0.3/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
53714 @@ -129,7 +129,7 @@ struct parallel_data {
53715 struct padata_instance *pinst;
53716 struct padata_parallel_queue __percpu *pqueue;
53717 struct padata_serial_queue __percpu *squeue;
53718 - atomic_t seq_nr;
53719 + atomic_unchecked_t seq_nr;
53720 atomic_t reorder_objects;
53721 atomic_t refcnt;
53722 unsigned int max_seq_nr;
53723 diff -urNp linux-3.0.3/include/linux/perf_event.h linux-3.0.3/include/linux/perf_event.h
53724 --- linux-3.0.3/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
53725 +++ linux-3.0.3/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
53726 @@ -761,8 +761,8 @@ struct perf_event {
53727
53728 enum perf_event_active_state state;
53729 unsigned int attach_state;
53730 - local64_t count;
53731 - atomic64_t child_count;
53732 + local64_t count; /* PaX: fix it one day */
53733 + atomic64_unchecked_t child_count;
53734
53735 /*
53736 * These are the total time in nanoseconds that the event
53737 @@ -813,8 +813,8 @@ struct perf_event {
53738 * These accumulate total time (in nanoseconds) that children
53739 * events have been enabled and running, respectively.
53740 */
53741 - atomic64_t child_total_time_enabled;
53742 - atomic64_t child_total_time_running;
53743 + atomic64_unchecked_t child_total_time_enabled;
53744 + atomic64_unchecked_t child_total_time_running;
53745
53746 /*
53747 * Protect attach/detach and child_list:
53748 diff -urNp linux-3.0.3/include/linux/pipe_fs_i.h linux-3.0.3/include/linux/pipe_fs_i.h
53749 --- linux-3.0.3/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
53750 +++ linux-3.0.3/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
53751 @@ -46,9 +46,9 @@ struct pipe_buffer {
53752 struct pipe_inode_info {
53753 wait_queue_head_t wait;
53754 unsigned int nrbufs, curbuf, buffers;
53755 - unsigned int readers;
53756 - unsigned int writers;
53757 - unsigned int waiting_writers;
53758 + atomic_t readers;
53759 + atomic_t writers;
53760 + atomic_t waiting_writers;
53761 unsigned int r_counter;
53762 unsigned int w_counter;
53763 struct page *tmp_page;
53764 diff -urNp linux-3.0.3/include/linux/pm_runtime.h linux-3.0.3/include/linux/pm_runtime.h
53765 --- linux-3.0.3/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
53766 +++ linux-3.0.3/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
53767 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
53768
53769 static inline void pm_runtime_mark_last_busy(struct device *dev)
53770 {
53771 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
53772 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
53773 }
53774
53775 #else /* !CONFIG_PM_RUNTIME */
53776 diff -urNp linux-3.0.3/include/linux/poison.h linux-3.0.3/include/linux/poison.h
53777 --- linux-3.0.3/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
53778 +++ linux-3.0.3/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
53779 @@ -19,8 +19,8 @@
53780 * under normal circumstances, used to verify that nobody uses
53781 * non-initialized list entries.
53782 */
53783 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
53784 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
53785 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
53786 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
53787
53788 /********** include/linux/timer.h **********/
53789 /*
53790 diff -urNp linux-3.0.3/include/linux/preempt.h linux-3.0.3/include/linux/preempt.h
53791 --- linux-3.0.3/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
53792 +++ linux-3.0.3/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
53793 @@ -115,7 +115,7 @@ struct preempt_ops {
53794 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
53795 void (*sched_out)(struct preempt_notifier *notifier,
53796 struct task_struct *next);
53797 -};
53798 +} __no_const;
53799
53800 /**
53801 * preempt_notifier - key for installing preemption notifiers
53802 diff -urNp linux-3.0.3/include/linux/proc_fs.h linux-3.0.3/include/linux/proc_fs.h
53803 --- linux-3.0.3/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
53804 +++ linux-3.0.3/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
53805 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
53806 return proc_create_data(name, mode, parent, proc_fops, NULL);
53807 }
53808
53809 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
53810 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
53811 +{
53812 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53813 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
53814 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53815 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
53816 +#else
53817 + return proc_create_data(name, mode, parent, proc_fops, NULL);
53818 +#endif
53819 +}
53820 +
53821 +
53822 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
53823 mode_t mode, struct proc_dir_entry *base,
53824 read_proc_t *read_proc, void * data)
53825 @@ -258,7 +271,7 @@ union proc_op {
53826 int (*proc_show)(struct seq_file *m,
53827 struct pid_namespace *ns, struct pid *pid,
53828 struct task_struct *task);
53829 -};
53830 +} __no_const;
53831
53832 struct ctl_table_header;
53833 struct ctl_table;
53834 diff -urNp linux-3.0.3/include/linux/ptrace.h linux-3.0.3/include/linux/ptrace.h
53835 --- linux-3.0.3/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
53836 +++ linux-3.0.3/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
53837 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
53838 extern void exit_ptrace(struct task_struct *tracer);
53839 #define PTRACE_MODE_READ 1
53840 #define PTRACE_MODE_ATTACH 2
53841 -/* Returns 0 on success, -errno on denial. */
53842 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
53843 /* Returns true on success, false on denial. */
53844 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
53845 +/* Returns true on success, false on denial. */
53846 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
53847
53848 static inline int ptrace_reparented(struct task_struct *child)
53849 {
53850 diff -urNp linux-3.0.3/include/linux/random.h linux-3.0.3/include/linux/random.h
53851 --- linux-3.0.3/include/linux/random.h 2011-08-23 21:44:40.000000000 -0400
53852 +++ linux-3.0.3/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
53853 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
53854
53855 u32 prandom32(struct rnd_state *);
53856
53857 +static inline unsigned long pax_get_random_long(void)
53858 +{
53859 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
53860 +}
53861 +
53862 /*
53863 * Handle minimum values for seeds
53864 */
53865 static inline u32 __seed(u32 x, u32 m)
53866 {
53867 - return (x < m) ? x + m : x;
53868 + return (x <= m) ? x + m + 1 : x;
53869 }
53870
53871 /**
53872 diff -urNp linux-3.0.3/include/linux/reboot.h linux-3.0.3/include/linux/reboot.h
53873 --- linux-3.0.3/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
53874 +++ linux-3.0.3/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
53875 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
53876 * Architecture-specific implementations of sys_reboot commands.
53877 */
53878
53879 -extern void machine_restart(char *cmd);
53880 -extern void machine_halt(void);
53881 -extern void machine_power_off(void);
53882 +extern void machine_restart(char *cmd) __noreturn;
53883 +extern void machine_halt(void) __noreturn;
53884 +extern void machine_power_off(void) __noreturn;
53885
53886 extern void machine_shutdown(void);
53887 struct pt_regs;
53888 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
53889 */
53890
53891 extern void kernel_restart_prepare(char *cmd);
53892 -extern void kernel_restart(char *cmd);
53893 -extern void kernel_halt(void);
53894 -extern void kernel_power_off(void);
53895 +extern void kernel_restart(char *cmd) __noreturn;
53896 +extern void kernel_halt(void) __noreturn;
53897 +extern void kernel_power_off(void) __noreturn;
53898
53899 extern int C_A_D; /* for sysctl */
53900 void ctrl_alt_del(void);
53901 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
53902 * Emergency restart, callable from an interrupt handler.
53903 */
53904
53905 -extern void emergency_restart(void);
53906 +extern void emergency_restart(void) __noreturn;
53907 #include <asm/emergency-restart.h>
53908
53909 #endif
53910 diff -urNp linux-3.0.3/include/linux/reiserfs_fs.h linux-3.0.3/include/linux/reiserfs_fs.h
53911 --- linux-3.0.3/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
53912 +++ linux-3.0.3/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
53913 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
53914 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
53915
53916 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
53917 -#define get_generation(s) atomic_read (&fs_generation(s))
53918 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
53919 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
53920 #define __fs_changed(gen,s) (gen != get_generation (s))
53921 #define fs_changed(gen,s) \
53922 diff -urNp linux-3.0.3/include/linux/reiserfs_fs_sb.h linux-3.0.3/include/linux/reiserfs_fs_sb.h
53923 --- linux-3.0.3/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
53924 +++ linux-3.0.3/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
53925 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
53926 /* Comment? -Hans */
53927 wait_queue_head_t s_wait;
53928 /* To be obsoleted soon by per buffer seals.. -Hans */
53929 - atomic_t s_generation_counter; // increased by one every time the
53930 + atomic_unchecked_t s_generation_counter; // increased by one every time the
53931 // tree gets re-balanced
53932 unsigned long s_properties; /* File system properties. Currently holds
53933 on-disk FS format */
53934 diff -urNp linux-3.0.3/include/linux/relay.h linux-3.0.3/include/linux/relay.h
53935 --- linux-3.0.3/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
53936 +++ linux-3.0.3/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
53937 @@ -159,7 +159,7 @@ struct rchan_callbacks
53938 * The callback should return 0 if successful, negative if not.
53939 */
53940 int (*remove_buf_file)(struct dentry *dentry);
53941 -};
53942 +} __no_const;
53943
53944 /*
53945 * CONFIG_RELAY kernel API, kernel/relay.c
53946 diff -urNp linux-3.0.3/include/linux/rfkill.h linux-3.0.3/include/linux/rfkill.h
53947 --- linux-3.0.3/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
53948 +++ linux-3.0.3/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
53949 @@ -147,6 +147,7 @@ struct rfkill_ops {
53950 void (*query)(struct rfkill *rfkill, void *data);
53951 int (*set_block)(void *data, bool blocked);
53952 };
53953 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
53954
53955 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
53956 /**
53957 diff -urNp linux-3.0.3/include/linux/rmap.h linux-3.0.3/include/linux/rmap.h
53958 --- linux-3.0.3/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
53959 +++ linux-3.0.3/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
53960 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
53961 void anon_vma_init(void); /* create anon_vma_cachep */
53962 int anon_vma_prepare(struct vm_area_struct *);
53963 void unlink_anon_vmas(struct vm_area_struct *);
53964 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
53965 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
53966 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
53967 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
53968 void __anon_vma_link(struct vm_area_struct *);
53969
53970 static inline void anon_vma_merge(struct vm_area_struct *vma,
53971 diff -urNp linux-3.0.3/include/linux/sched.h linux-3.0.3/include/linux/sched.h
53972 --- linux-3.0.3/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
53973 +++ linux-3.0.3/include/linux/sched.h 2011-08-23 21:48:14.000000000 -0400
53974 @@ -100,6 +100,7 @@ struct bio_list;
53975 struct fs_struct;
53976 struct perf_event_context;
53977 struct blk_plug;
53978 +struct linux_binprm;
53979
53980 /*
53981 * List of flags we want to share for kernel threads,
53982 @@ -380,10 +381,13 @@ struct user_namespace;
53983 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
53984
53985 extern int sysctl_max_map_count;
53986 +extern unsigned long sysctl_heap_stack_gap;
53987
53988 #include <linux/aio.h>
53989
53990 #ifdef CONFIG_MMU
53991 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
53992 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
53993 extern void arch_pick_mmap_layout(struct mm_struct *mm);
53994 extern unsigned long
53995 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
53996 @@ -629,6 +633,17 @@ struct signal_struct {
53997 #ifdef CONFIG_TASKSTATS
53998 struct taskstats *stats;
53999 #endif
54000 +
54001 +#ifdef CONFIG_GRKERNSEC
54002 + u32 curr_ip;
54003 + u32 saved_ip;
54004 + u32 gr_saddr;
54005 + u32 gr_daddr;
54006 + u16 gr_sport;
54007 + u16 gr_dport;
54008 + u8 used_accept:1;
54009 +#endif
54010 +
54011 #ifdef CONFIG_AUDIT
54012 unsigned audit_tty;
54013 struct tty_audit_buf *tty_audit_buf;
54014 @@ -710,6 +725,11 @@ struct user_struct {
54015 struct key *session_keyring; /* UID's default session keyring */
54016 #endif
54017
54018 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54019 + unsigned int banned;
54020 + unsigned long ban_expires;
54021 +#endif
54022 +
54023 /* Hash table maintenance information */
54024 struct hlist_node uidhash_node;
54025 uid_t uid;
54026 @@ -1340,8 +1360,8 @@ struct task_struct {
54027 struct list_head thread_group;
54028
54029 struct completion *vfork_done; /* for vfork() */
54030 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54031 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54032 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54033 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54034
54035 cputime_t utime, stime, utimescaled, stimescaled;
54036 cputime_t gtime;
54037 @@ -1357,13 +1377,6 @@ struct task_struct {
54038 struct task_cputime cputime_expires;
54039 struct list_head cpu_timers[3];
54040
54041 -/* process credentials */
54042 - const struct cred __rcu *real_cred; /* objective and real subjective task
54043 - * credentials (COW) */
54044 - const struct cred __rcu *cred; /* effective (overridable) subjective task
54045 - * credentials (COW) */
54046 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54047 -
54048 char comm[TASK_COMM_LEN]; /* executable name excluding path
54049 - access with [gs]et_task_comm (which lock
54050 it with task_lock())
54051 @@ -1380,8 +1393,16 @@ struct task_struct {
54052 #endif
54053 /* CPU-specific state of this task */
54054 struct thread_struct thread;
54055 +/* thread_info moved to task_struct */
54056 +#ifdef CONFIG_X86
54057 + struct thread_info tinfo;
54058 +#endif
54059 /* filesystem information */
54060 struct fs_struct *fs;
54061 +
54062 + const struct cred __rcu *cred; /* effective (overridable) subjective task
54063 + * credentials (COW) */
54064 +
54065 /* open file information */
54066 struct files_struct *files;
54067 /* namespaces */
54068 @@ -1428,6 +1449,11 @@ struct task_struct {
54069 struct rt_mutex_waiter *pi_blocked_on;
54070 #endif
54071
54072 +/* process credentials */
54073 + const struct cred __rcu *real_cred; /* objective and real subjective task
54074 + * credentials (COW) */
54075 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54076 +
54077 #ifdef CONFIG_DEBUG_MUTEXES
54078 /* mutex deadlock detection */
54079 struct mutex_waiter *blocked_on;
54080 @@ -1538,6 +1564,21 @@ struct task_struct {
54081 unsigned long default_timer_slack_ns;
54082
54083 struct list_head *scm_work_list;
54084 +
54085 +#ifdef CONFIG_GRKERNSEC
54086 + /* grsecurity */
54087 + struct dentry *gr_chroot_dentry;
54088 + struct acl_subject_label *acl;
54089 + struct acl_role_label *role;
54090 + struct file *exec_file;
54091 + u16 acl_role_id;
54092 + /* is this the task that authenticated to the special role */
54093 + u8 acl_sp_role;
54094 + u8 is_writable;
54095 + u8 brute;
54096 + u8 gr_is_chrooted;
54097 +#endif
54098 +
54099 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54100 /* Index of current stored address in ret_stack */
54101 int curr_ret_stack;
54102 @@ -1572,6 +1613,57 @@ struct task_struct {
54103 #endif
54104 };
54105
54106 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54107 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54108 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54109 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54110 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54111 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54112 +
54113 +#ifdef CONFIG_PAX_SOFTMODE
54114 +extern int pax_softmode;
54115 +#endif
54116 +
54117 +extern int pax_check_flags(unsigned long *);
54118 +
54119 +/* if tsk != current then task_lock must be held on it */
54120 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54121 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
54122 +{
54123 + if (likely(tsk->mm))
54124 + return tsk->mm->pax_flags;
54125 + else
54126 + return 0UL;
54127 +}
54128 +
54129 +/* if tsk != current then task_lock must be held on it */
54130 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54131 +{
54132 + if (likely(tsk->mm)) {
54133 + tsk->mm->pax_flags = flags;
54134 + return 0;
54135 + }
54136 + return -EINVAL;
54137 +}
54138 +#endif
54139 +
54140 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54141 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
54142 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54143 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54144 +#endif
54145 +
54146 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54147 +extern void pax_report_insns(void *pc, void *sp);
54148 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
54149 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54150 +
54151 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54152 +extern void pax_track_stack(void);
54153 +#else
54154 +static inline void pax_track_stack(void) {}
54155 +#endif
54156 +
54157 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54158 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54159
54160 @@ -2056,7 +2148,9 @@ void yield(void);
54161 extern struct exec_domain default_exec_domain;
54162
54163 union thread_union {
54164 +#ifndef CONFIG_X86
54165 struct thread_info thread_info;
54166 +#endif
54167 unsigned long stack[THREAD_SIZE/sizeof(long)];
54168 };
54169
54170 @@ -2089,6 +2183,7 @@ extern struct pid_namespace init_pid_ns;
54171 */
54172
54173 extern struct task_struct *find_task_by_vpid(pid_t nr);
54174 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54175 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54176 struct pid_namespace *ns);
54177
54178 @@ -2225,7 +2320,7 @@ extern void __cleanup_sighand(struct sig
54179 extern void exit_itimers(struct signal_struct *);
54180 extern void flush_itimer_signals(void);
54181
54182 -extern NORET_TYPE void do_group_exit(int);
54183 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54184
54185 extern void daemonize(const char *, ...);
54186 extern int allow_signal(int);
54187 @@ -2393,13 +2488,17 @@ static inline unsigned long *end_of_stac
54188
54189 #endif
54190
54191 -static inline int object_is_on_stack(void *obj)
54192 +static inline int object_starts_on_stack(void *obj)
54193 {
54194 - void *stack = task_stack_page(current);
54195 + const void *stack = task_stack_page(current);
54196
54197 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54198 }
54199
54200 +#ifdef CONFIG_PAX_USERCOPY
54201 +extern int object_is_on_stack(const void *obj, unsigned long len);
54202 +#endif
54203 +
54204 extern void thread_info_cache_init(void);
54205
54206 #ifdef CONFIG_DEBUG_STACK_USAGE
54207 diff -urNp linux-3.0.3/include/linux/screen_info.h linux-3.0.3/include/linux/screen_info.h
54208 --- linux-3.0.3/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
54209 +++ linux-3.0.3/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
54210 @@ -43,7 +43,8 @@ struct screen_info {
54211 __u16 pages; /* 0x32 */
54212 __u16 vesa_attributes; /* 0x34 */
54213 __u32 capabilities; /* 0x36 */
54214 - __u8 _reserved[6]; /* 0x3a */
54215 + __u16 vesapm_size; /* 0x3a */
54216 + __u8 _reserved[4]; /* 0x3c */
54217 } __attribute__((packed));
54218
54219 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54220 diff -urNp linux-3.0.3/include/linux/security.h linux-3.0.3/include/linux/security.h
54221 --- linux-3.0.3/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
54222 +++ linux-3.0.3/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
54223 @@ -36,6 +36,7 @@
54224 #include <linux/key.h>
54225 #include <linux/xfrm.h>
54226 #include <linux/slab.h>
54227 +#include <linux/grsecurity.h>
54228 #include <net/flow.h>
54229
54230 /* Maximum number of letters for an LSM name string */
54231 diff -urNp linux-3.0.3/include/linux/seq_file.h linux-3.0.3/include/linux/seq_file.h
54232 --- linux-3.0.3/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
54233 +++ linux-3.0.3/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
54234 @@ -32,6 +32,7 @@ struct seq_operations {
54235 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54236 int (*show) (struct seq_file *m, void *v);
54237 };
54238 +typedef struct seq_operations __no_const seq_operations_no_const;
54239
54240 #define SEQ_SKIP 1
54241
54242 diff -urNp linux-3.0.3/include/linux/shmem_fs.h linux-3.0.3/include/linux/shmem_fs.h
54243 --- linux-3.0.3/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
54244 +++ linux-3.0.3/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
54245 @@ -10,7 +10,7 @@
54246
54247 #define SHMEM_NR_DIRECT 16
54248
54249 -#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
54250 +#define SHMEM_SYMLINK_INLINE_LEN 64
54251
54252 struct shmem_inode_info {
54253 spinlock_t lock;
54254 diff -urNp linux-3.0.3/include/linux/shm.h linux-3.0.3/include/linux/shm.h
54255 --- linux-3.0.3/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
54256 +++ linux-3.0.3/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
54257 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54258 pid_t shm_cprid;
54259 pid_t shm_lprid;
54260 struct user_struct *mlock_user;
54261 +#ifdef CONFIG_GRKERNSEC
54262 + time_t shm_createtime;
54263 + pid_t shm_lapid;
54264 +#endif
54265 };
54266
54267 /* shm_mode upper byte flags */
54268 diff -urNp linux-3.0.3/include/linux/skbuff.h linux-3.0.3/include/linux/skbuff.h
54269 --- linux-3.0.3/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
54270 +++ linux-3.0.3/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
54271 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54272 */
54273 static inline int skb_queue_empty(const struct sk_buff_head *list)
54274 {
54275 - return list->next == (struct sk_buff *)list;
54276 + return list->next == (const struct sk_buff *)list;
54277 }
54278
54279 /**
54280 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54281 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54282 const struct sk_buff *skb)
54283 {
54284 - return skb->next == (struct sk_buff *)list;
54285 + return skb->next == (const struct sk_buff *)list;
54286 }
54287
54288 /**
54289 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54290 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54291 const struct sk_buff *skb)
54292 {
54293 - return skb->prev == (struct sk_buff *)list;
54294 + return skb->prev == (const struct sk_buff *)list;
54295 }
54296
54297 /**
54298 @@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
54299 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54300 */
54301 #ifndef NET_SKB_PAD
54302 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54303 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54304 #endif
54305
54306 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54307 diff -urNp linux-3.0.3/include/linux/slab_def.h linux-3.0.3/include/linux/slab_def.h
54308 --- linux-3.0.3/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
54309 +++ linux-3.0.3/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
54310 @@ -96,10 +96,10 @@ struct kmem_cache {
54311 unsigned long node_allocs;
54312 unsigned long node_frees;
54313 unsigned long node_overflow;
54314 - atomic_t allochit;
54315 - atomic_t allocmiss;
54316 - atomic_t freehit;
54317 - atomic_t freemiss;
54318 + atomic_unchecked_t allochit;
54319 + atomic_unchecked_t allocmiss;
54320 + atomic_unchecked_t freehit;
54321 + atomic_unchecked_t freemiss;
54322
54323 /*
54324 * If debugging is enabled, then the allocator can add additional
54325 diff -urNp linux-3.0.3/include/linux/slab.h linux-3.0.3/include/linux/slab.h
54326 --- linux-3.0.3/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
54327 +++ linux-3.0.3/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
54328 @@ -11,12 +11,20 @@
54329
54330 #include <linux/gfp.h>
54331 #include <linux/types.h>
54332 +#include <linux/err.h>
54333
54334 /*
54335 * Flags to pass to kmem_cache_create().
54336 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54337 */
54338 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54339 +
54340 +#ifdef CONFIG_PAX_USERCOPY
54341 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54342 +#else
54343 +#define SLAB_USERCOPY 0x00000000UL
54344 +#endif
54345 +
54346 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54347 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54348 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54349 @@ -87,10 +95,13 @@
54350 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54351 * Both make kfree a no-op.
54352 */
54353 -#define ZERO_SIZE_PTR ((void *)16)
54354 +#define ZERO_SIZE_PTR \
54355 +({ \
54356 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54357 + (void *)(-MAX_ERRNO-1L); \
54358 +})
54359
54360 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54361 - (unsigned long)ZERO_SIZE_PTR)
54362 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54363
54364 /*
54365 * struct kmem_cache related prototypes
54366 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54367 void kfree(const void *);
54368 void kzfree(const void *);
54369 size_t ksize(const void *);
54370 +void check_object_size(const void *ptr, unsigned long n, bool to);
54371
54372 /*
54373 * Allocator specific definitions. These are mainly used to establish optimized
54374 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54375
54376 void __init kmem_cache_init_late(void);
54377
54378 +#define kmalloc(x, y) \
54379 +({ \
54380 + void *___retval; \
54381 + intoverflow_t ___x = (intoverflow_t)x; \
54382 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54383 + ___retval = NULL; \
54384 + else \
54385 + ___retval = kmalloc((size_t)___x, (y)); \
54386 + ___retval; \
54387 +})
54388 +
54389 +#define kmalloc_node(x, y, z) \
54390 +({ \
54391 + void *___retval; \
54392 + intoverflow_t ___x = (intoverflow_t)x; \
54393 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54394 + ___retval = NULL; \
54395 + else \
54396 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
54397 + ___retval; \
54398 +})
54399 +
54400 +#define kzalloc(x, y) \
54401 +({ \
54402 + void *___retval; \
54403 + intoverflow_t ___x = (intoverflow_t)x; \
54404 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54405 + ___retval = NULL; \
54406 + else \
54407 + ___retval = kzalloc((size_t)___x, (y)); \
54408 + ___retval; \
54409 +})
54410 +
54411 +#define __krealloc(x, y, z) \
54412 +({ \
54413 + void *___retval; \
54414 + intoverflow_t ___y = (intoverflow_t)y; \
54415 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54416 + ___retval = NULL; \
54417 + else \
54418 + ___retval = __krealloc((x), (size_t)___y, (z)); \
54419 + ___retval; \
54420 +})
54421 +
54422 +#define krealloc(x, y, z) \
54423 +({ \
54424 + void *___retval; \
54425 + intoverflow_t ___y = (intoverflow_t)y; \
54426 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54427 + ___retval = NULL; \
54428 + else \
54429 + ___retval = krealloc((x), (size_t)___y, (z)); \
54430 + ___retval; \
54431 +})
54432 +
54433 #endif /* _LINUX_SLAB_H */
54434 diff -urNp linux-3.0.3/include/linux/slub_def.h linux-3.0.3/include/linux/slub_def.h
54435 --- linux-3.0.3/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
54436 +++ linux-3.0.3/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
54437 @@ -82,7 +82,7 @@ struct kmem_cache {
54438 struct kmem_cache_order_objects max;
54439 struct kmem_cache_order_objects min;
54440 gfp_t allocflags; /* gfp flags to use on each alloc */
54441 - int refcount; /* Refcount for slab cache destroy */
54442 + atomic_t refcount; /* Refcount for slab cache destroy */
54443 void (*ctor)(void *);
54444 int inuse; /* Offset to metadata */
54445 int align; /* Alignment */
54446 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54447 }
54448
54449 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54450 -void *__kmalloc(size_t size, gfp_t flags);
54451 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54452
54453 static __always_inline void *
54454 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54455 diff -urNp linux-3.0.3/include/linux/sonet.h linux-3.0.3/include/linux/sonet.h
54456 --- linux-3.0.3/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
54457 +++ linux-3.0.3/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
54458 @@ -61,7 +61,7 @@ struct sonet_stats {
54459 #include <asm/atomic.h>
54460
54461 struct k_sonet_stats {
54462 -#define __HANDLE_ITEM(i) atomic_t i
54463 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54464 __SONET_ITEMS
54465 #undef __HANDLE_ITEM
54466 };
54467 diff -urNp linux-3.0.3/include/linux/sunrpc/clnt.h linux-3.0.3/include/linux/sunrpc/clnt.h
54468 --- linux-3.0.3/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
54469 +++ linux-3.0.3/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
54470 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54471 {
54472 switch (sap->sa_family) {
54473 case AF_INET:
54474 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
54475 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54476 case AF_INET6:
54477 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54478 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54479 }
54480 return 0;
54481 }
54482 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54483 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54484 const struct sockaddr *src)
54485 {
54486 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54487 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54488 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54489
54490 dsin->sin_family = ssin->sin_family;
54491 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54492 if (sa->sa_family != AF_INET6)
54493 return 0;
54494
54495 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54496 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54497 }
54498
54499 #endif /* __KERNEL__ */
54500 diff -urNp linux-3.0.3/include/linux/sunrpc/svc_rdma.h linux-3.0.3/include/linux/sunrpc/svc_rdma.h
54501 --- linux-3.0.3/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
54502 +++ linux-3.0.3/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
54503 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54504 extern unsigned int svcrdma_max_requests;
54505 extern unsigned int svcrdma_max_req_size;
54506
54507 -extern atomic_t rdma_stat_recv;
54508 -extern atomic_t rdma_stat_read;
54509 -extern atomic_t rdma_stat_write;
54510 -extern atomic_t rdma_stat_sq_starve;
54511 -extern atomic_t rdma_stat_rq_starve;
54512 -extern atomic_t rdma_stat_rq_poll;
54513 -extern atomic_t rdma_stat_rq_prod;
54514 -extern atomic_t rdma_stat_sq_poll;
54515 -extern atomic_t rdma_stat_sq_prod;
54516 +extern atomic_unchecked_t rdma_stat_recv;
54517 +extern atomic_unchecked_t rdma_stat_read;
54518 +extern atomic_unchecked_t rdma_stat_write;
54519 +extern atomic_unchecked_t rdma_stat_sq_starve;
54520 +extern atomic_unchecked_t rdma_stat_rq_starve;
54521 +extern atomic_unchecked_t rdma_stat_rq_poll;
54522 +extern atomic_unchecked_t rdma_stat_rq_prod;
54523 +extern atomic_unchecked_t rdma_stat_sq_poll;
54524 +extern atomic_unchecked_t rdma_stat_sq_prod;
54525
54526 #define RPCRDMA_VERSION 1
54527
54528 diff -urNp linux-3.0.3/include/linux/sysctl.h linux-3.0.3/include/linux/sysctl.h
54529 --- linux-3.0.3/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
54530 +++ linux-3.0.3/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
54531 @@ -155,7 +155,11 @@ enum
54532 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54533 };
54534
54535 -
54536 +#ifdef CONFIG_PAX_SOFTMODE
54537 +enum {
54538 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54539 +};
54540 +#endif
54541
54542 /* CTL_VM names: */
54543 enum
54544 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54545
54546 extern int proc_dostring(struct ctl_table *, int,
54547 void __user *, size_t *, loff_t *);
54548 +extern int proc_dostring_modpriv(struct ctl_table *, int,
54549 + void __user *, size_t *, loff_t *);
54550 extern int proc_dointvec(struct ctl_table *, int,
54551 void __user *, size_t *, loff_t *);
54552 extern int proc_dointvec_minmax(struct ctl_table *, int,
54553 diff -urNp linux-3.0.3/include/linux/tty_ldisc.h linux-3.0.3/include/linux/tty_ldisc.h
54554 --- linux-3.0.3/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
54555 +++ linux-3.0.3/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
54556 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54557
54558 struct module *owner;
54559
54560 - int refcount;
54561 + atomic_t refcount;
54562 };
54563
54564 struct tty_ldisc {
54565 diff -urNp linux-3.0.3/include/linux/types.h linux-3.0.3/include/linux/types.h
54566 --- linux-3.0.3/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
54567 +++ linux-3.0.3/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
54568 @@ -213,10 +213,26 @@ typedef struct {
54569 int counter;
54570 } atomic_t;
54571
54572 +#ifdef CONFIG_PAX_REFCOUNT
54573 +typedef struct {
54574 + int counter;
54575 +} atomic_unchecked_t;
54576 +#else
54577 +typedef atomic_t atomic_unchecked_t;
54578 +#endif
54579 +
54580 #ifdef CONFIG_64BIT
54581 typedef struct {
54582 long counter;
54583 } atomic64_t;
54584 +
54585 +#ifdef CONFIG_PAX_REFCOUNT
54586 +typedef struct {
54587 + long counter;
54588 +} atomic64_unchecked_t;
54589 +#else
54590 +typedef atomic64_t atomic64_unchecked_t;
54591 +#endif
54592 #endif
54593
54594 struct list_head {
54595 diff -urNp linux-3.0.3/include/linux/uaccess.h linux-3.0.3/include/linux/uaccess.h
54596 --- linux-3.0.3/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
54597 +++ linux-3.0.3/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
54598 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54599 long ret; \
54600 mm_segment_t old_fs = get_fs(); \
54601 \
54602 - set_fs(KERNEL_DS); \
54603 pagefault_disable(); \
54604 + set_fs(KERNEL_DS); \
54605 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54606 - pagefault_enable(); \
54607 set_fs(old_fs); \
54608 + pagefault_enable(); \
54609 ret; \
54610 })
54611
54612 diff -urNp linux-3.0.3/include/linux/unaligned/access_ok.h linux-3.0.3/include/linux/unaligned/access_ok.h
54613 --- linux-3.0.3/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
54614 +++ linux-3.0.3/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
54615 @@ -6,32 +6,32 @@
54616
54617 static inline u16 get_unaligned_le16(const void *p)
54618 {
54619 - return le16_to_cpup((__le16 *)p);
54620 + return le16_to_cpup((const __le16 *)p);
54621 }
54622
54623 static inline u32 get_unaligned_le32(const void *p)
54624 {
54625 - return le32_to_cpup((__le32 *)p);
54626 + return le32_to_cpup((const __le32 *)p);
54627 }
54628
54629 static inline u64 get_unaligned_le64(const void *p)
54630 {
54631 - return le64_to_cpup((__le64 *)p);
54632 + return le64_to_cpup((const __le64 *)p);
54633 }
54634
54635 static inline u16 get_unaligned_be16(const void *p)
54636 {
54637 - return be16_to_cpup((__be16 *)p);
54638 + return be16_to_cpup((const __be16 *)p);
54639 }
54640
54641 static inline u32 get_unaligned_be32(const void *p)
54642 {
54643 - return be32_to_cpup((__be32 *)p);
54644 + return be32_to_cpup((const __be32 *)p);
54645 }
54646
54647 static inline u64 get_unaligned_be64(const void *p)
54648 {
54649 - return be64_to_cpup((__be64 *)p);
54650 + return be64_to_cpup((const __be64 *)p);
54651 }
54652
54653 static inline void put_unaligned_le16(u16 val, void *p)
54654 diff -urNp linux-3.0.3/include/linux/vmalloc.h linux-3.0.3/include/linux/vmalloc.h
54655 --- linux-3.0.3/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
54656 +++ linux-3.0.3/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
54657 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54658 #define VM_MAP 0x00000004 /* vmap()ed pages */
54659 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54660 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54661 +
54662 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54663 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54664 +#endif
54665 +
54666 /* bits [20..32] reserved for arch specific ioremap internals */
54667
54668 /*
54669 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54670 # endif
54671 #endif
54672
54673 +#define vmalloc(x) \
54674 +({ \
54675 + void *___retval; \
54676 + intoverflow_t ___x = (intoverflow_t)x; \
54677 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54678 + ___retval = NULL; \
54679 + else \
54680 + ___retval = vmalloc((unsigned long)___x); \
54681 + ___retval; \
54682 +})
54683 +
54684 +#define vzalloc(x) \
54685 +({ \
54686 + void *___retval; \
54687 + intoverflow_t ___x = (intoverflow_t)x; \
54688 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54689 + ___retval = NULL; \
54690 + else \
54691 + ___retval = vzalloc((unsigned long)___x); \
54692 + ___retval; \
54693 +})
54694 +
54695 +#define __vmalloc(x, y, z) \
54696 +({ \
54697 + void *___retval; \
54698 + intoverflow_t ___x = (intoverflow_t)x; \
54699 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54700 + ___retval = NULL; \
54701 + else \
54702 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54703 + ___retval; \
54704 +})
54705 +
54706 +#define vmalloc_user(x) \
54707 +({ \
54708 + void *___retval; \
54709 + intoverflow_t ___x = (intoverflow_t)x; \
54710 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54711 + ___retval = NULL; \
54712 + else \
54713 + ___retval = vmalloc_user((unsigned long)___x); \
54714 + ___retval; \
54715 +})
54716 +
54717 +#define vmalloc_exec(x) \
54718 +({ \
54719 + void *___retval; \
54720 + intoverflow_t ___x = (intoverflow_t)x; \
54721 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
54722 + ___retval = NULL; \
54723 + else \
54724 + ___retval = vmalloc_exec((unsigned long)___x); \
54725 + ___retval; \
54726 +})
54727 +
54728 +#define vmalloc_node(x, y) \
54729 +({ \
54730 + void *___retval; \
54731 + intoverflow_t ___x = (intoverflow_t)x; \
54732 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
54733 + ___retval = NULL; \
54734 + else \
54735 + ___retval = vmalloc_node((unsigned long)___x, (y));\
54736 + ___retval; \
54737 +})
54738 +
54739 +#define vzalloc_node(x, y) \
54740 +({ \
54741 + void *___retval; \
54742 + intoverflow_t ___x = (intoverflow_t)x; \
54743 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
54744 + ___retval = NULL; \
54745 + else \
54746 + ___retval = vzalloc_node((unsigned long)___x, (y));\
54747 + ___retval; \
54748 +})
54749 +
54750 +#define vmalloc_32(x) \
54751 +({ \
54752 + void *___retval; \
54753 + intoverflow_t ___x = (intoverflow_t)x; \
54754 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
54755 + ___retval = NULL; \
54756 + else \
54757 + ___retval = vmalloc_32((unsigned long)___x); \
54758 + ___retval; \
54759 +})
54760 +
54761 +#define vmalloc_32_user(x) \
54762 +({ \
54763 +void *___retval; \
54764 + intoverflow_t ___x = (intoverflow_t)x; \
54765 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
54766 + ___retval = NULL; \
54767 + else \
54768 + ___retval = vmalloc_32_user((unsigned long)___x);\
54769 + ___retval; \
54770 +})
54771 +
54772 #endif /* _LINUX_VMALLOC_H */
54773 diff -urNp linux-3.0.3/include/linux/vmstat.h linux-3.0.3/include/linux/vmstat.h
54774 --- linux-3.0.3/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
54775 +++ linux-3.0.3/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
54776 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
54777 /*
54778 * Zone based page accounting with per cpu differentials.
54779 */
54780 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54781 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54782
54783 static inline void zone_page_state_add(long x, struct zone *zone,
54784 enum zone_stat_item item)
54785 {
54786 - atomic_long_add(x, &zone->vm_stat[item]);
54787 - atomic_long_add(x, &vm_stat[item]);
54788 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
54789 + atomic_long_add_unchecked(x, &vm_stat[item]);
54790 }
54791
54792 static inline unsigned long global_page_state(enum zone_stat_item item)
54793 {
54794 - long x = atomic_long_read(&vm_stat[item]);
54795 + long x = atomic_long_read_unchecked(&vm_stat[item]);
54796 #ifdef CONFIG_SMP
54797 if (x < 0)
54798 x = 0;
54799 @@ -109,7 +109,7 @@ static inline unsigned long global_page_
54800 static inline unsigned long zone_page_state(struct zone *zone,
54801 enum zone_stat_item item)
54802 {
54803 - long x = atomic_long_read(&zone->vm_stat[item]);
54804 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54805 #ifdef CONFIG_SMP
54806 if (x < 0)
54807 x = 0;
54808 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
54809 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
54810 enum zone_stat_item item)
54811 {
54812 - long x = atomic_long_read(&zone->vm_stat[item]);
54813 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
54814
54815 #ifdef CONFIG_SMP
54816 int cpu;
54817 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
54818
54819 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
54820 {
54821 - atomic_long_inc(&zone->vm_stat[item]);
54822 - atomic_long_inc(&vm_stat[item]);
54823 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
54824 + atomic_long_inc_unchecked(&vm_stat[item]);
54825 }
54826
54827 static inline void __inc_zone_page_state(struct page *page,
54828 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
54829
54830 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
54831 {
54832 - atomic_long_dec(&zone->vm_stat[item]);
54833 - atomic_long_dec(&vm_stat[item]);
54834 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
54835 + atomic_long_dec_unchecked(&vm_stat[item]);
54836 }
54837
54838 static inline void __dec_zone_page_state(struct page *page,
54839 diff -urNp linux-3.0.3/include/media/saa7146_vv.h linux-3.0.3/include/media/saa7146_vv.h
54840 --- linux-3.0.3/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
54841 +++ linux-3.0.3/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
54842 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
54843 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
54844
54845 /* the extension can override this */
54846 - struct v4l2_ioctl_ops ops;
54847 + v4l2_ioctl_ops_no_const ops;
54848 /* pointer to the saa7146 core ops */
54849 const struct v4l2_ioctl_ops *core_ops;
54850
54851 diff -urNp linux-3.0.3/include/media/v4l2-ioctl.h linux-3.0.3/include/media/v4l2-ioctl.h
54852 --- linux-3.0.3/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
54853 +++ linux-3.0.3/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
54854 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
54855 long (*vidioc_default) (struct file *file, void *fh,
54856 bool valid_prio, int cmd, void *arg);
54857 };
54858 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
54859
54860
54861 /* v4l debugging and diagnostics */
54862 diff -urNp linux-3.0.3/include/net/caif/cfctrl.h linux-3.0.3/include/net/caif/cfctrl.h
54863 --- linux-3.0.3/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
54864 +++ linux-3.0.3/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
54865 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
54866 void (*radioset_rsp)(void);
54867 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
54868 struct cflayer *client_layer);
54869 -};
54870 +} __no_const;
54871
54872 /* Link Setup Parameters for CAIF-Links. */
54873 struct cfctrl_link_param {
54874 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
54875 struct cfctrl {
54876 struct cfsrvl serv;
54877 struct cfctrl_rsp res;
54878 - atomic_t req_seq_no;
54879 - atomic_t rsp_seq_no;
54880 + atomic_unchecked_t req_seq_no;
54881 + atomic_unchecked_t rsp_seq_no;
54882 struct list_head list;
54883 /* Protects from simultaneous access to first_req list */
54884 spinlock_t info_list_lock;
54885 diff -urNp linux-3.0.3/include/net/flow.h linux-3.0.3/include/net/flow.h
54886 --- linux-3.0.3/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
54887 +++ linux-3.0.3/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
54888 @@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
54889 u8 dir, flow_resolve_t resolver, void *ctx);
54890
54891 extern void flow_cache_flush(void);
54892 -extern atomic_t flow_cache_genid;
54893 +extern atomic_unchecked_t flow_cache_genid;
54894
54895 #endif
54896 diff -urNp linux-3.0.3/include/net/inetpeer.h linux-3.0.3/include/net/inetpeer.h
54897 --- linux-3.0.3/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
54898 +++ linux-3.0.3/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
54899 @@ -43,8 +43,8 @@ struct inet_peer {
54900 */
54901 union {
54902 struct {
54903 - atomic_t rid; /* Frag reception counter */
54904 - atomic_t ip_id_count; /* IP ID for the next packet */
54905 + atomic_unchecked_t rid; /* Frag reception counter */
54906 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
54907 __u32 tcp_ts;
54908 __u32 tcp_ts_stamp;
54909 u32 metrics[RTAX_MAX];
54910 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
54911 {
54912 more++;
54913 inet_peer_refcheck(p);
54914 - return atomic_add_return(more, &p->ip_id_count) - more;
54915 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
54916 }
54917
54918 #endif /* _NET_INETPEER_H */
54919 diff -urNp linux-3.0.3/include/net/ip_fib.h linux-3.0.3/include/net/ip_fib.h
54920 --- linux-3.0.3/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
54921 +++ linux-3.0.3/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
54922 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
54923
54924 #define FIB_RES_SADDR(net, res) \
54925 ((FIB_RES_NH(res).nh_saddr_genid == \
54926 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
54927 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
54928 FIB_RES_NH(res).nh_saddr : \
54929 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
54930 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
54931 diff -urNp linux-3.0.3/include/net/ip_vs.h linux-3.0.3/include/net/ip_vs.h
54932 --- linux-3.0.3/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
54933 +++ linux-3.0.3/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
54934 @@ -509,7 +509,7 @@ struct ip_vs_conn {
54935 struct ip_vs_conn *control; /* Master control connection */
54936 atomic_t n_control; /* Number of controlled ones */
54937 struct ip_vs_dest *dest; /* real server */
54938 - atomic_t in_pkts; /* incoming packet counter */
54939 + atomic_unchecked_t in_pkts; /* incoming packet counter */
54940
54941 /* packet transmitter for different forwarding methods. If it
54942 mangles the packet, it must return NF_DROP or better NF_STOLEN,
54943 @@ -647,7 +647,7 @@ struct ip_vs_dest {
54944 __be16 port; /* port number of the server */
54945 union nf_inet_addr addr; /* IP address of the server */
54946 volatile unsigned flags; /* dest status flags */
54947 - atomic_t conn_flags; /* flags to copy to conn */
54948 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
54949 atomic_t weight; /* server weight */
54950
54951 atomic_t refcnt; /* reference counter */
54952 diff -urNp linux-3.0.3/include/net/irda/ircomm_core.h linux-3.0.3/include/net/irda/ircomm_core.h
54953 --- linux-3.0.3/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
54954 +++ linux-3.0.3/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
54955 @@ -51,7 +51,7 @@ typedef struct {
54956 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
54957 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
54958 struct ircomm_info *);
54959 -} call_t;
54960 +} __no_const call_t;
54961
54962 struct ircomm_cb {
54963 irda_queue_t queue;
54964 diff -urNp linux-3.0.3/include/net/irda/ircomm_tty.h linux-3.0.3/include/net/irda/ircomm_tty.h
54965 --- linux-3.0.3/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
54966 +++ linux-3.0.3/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
54967 @@ -35,6 +35,7 @@
54968 #include <linux/termios.h>
54969 #include <linux/timer.h>
54970 #include <linux/tty.h> /* struct tty_struct */
54971 +#include <asm/local.h>
54972
54973 #include <net/irda/irias_object.h>
54974 #include <net/irda/ircomm_core.h>
54975 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
54976 unsigned short close_delay;
54977 unsigned short closing_wait; /* time to wait before closing */
54978
54979 - int open_count;
54980 - int blocked_open; /* # of blocked opens */
54981 + local_t open_count;
54982 + local_t blocked_open; /* # of blocked opens */
54983
54984 /* Protect concurent access to :
54985 * o self->open_count
54986 diff -urNp linux-3.0.3/include/net/iucv/af_iucv.h linux-3.0.3/include/net/iucv/af_iucv.h
54987 --- linux-3.0.3/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
54988 +++ linux-3.0.3/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
54989 @@ -87,7 +87,7 @@ struct iucv_sock {
54990 struct iucv_sock_list {
54991 struct hlist_head head;
54992 rwlock_t lock;
54993 - atomic_t autobind_name;
54994 + atomic_unchecked_t autobind_name;
54995 };
54996
54997 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
54998 diff -urNp linux-3.0.3/include/net/lapb.h linux-3.0.3/include/net/lapb.h
54999 --- linux-3.0.3/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
55000 +++ linux-3.0.3/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
55001 @@ -95,7 +95,7 @@ struct lapb_cb {
55002 struct sk_buff_head write_queue;
55003 struct sk_buff_head ack_queue;
55004 unsigned char window;
55005 - struct lapb_register_struct callbacks;
55006 + struct lapb_register_struct *callbacks;
55007
55008 /* FRMR control information */
55009 struct lapb_frame frmr_data;
55010 diff -urNp linux-3.0.3/include/net/neighbour.h linux-3.0.3/include/net/neighbour.h
55011 --- linux-3.0.3/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
55012 +++ linux-3.0.3/include/net/neighbour.h 2011-08-23 21:47:56.000000000 -0400
55013 @@ -117,7 +117,7 @@ struct neighbour {
55014 };
55015
55016 struct neigh_ops {
55017 - int family;
55018 + const int family;
55019 void (*solicit)(struct neighbour *, struct sk_buff*);
55020 void (*error_report)(struct neighbour *, struct sk_buff*);
55021 int (*output)(struct sk_buff*);
55022 diff -urNp linux-3.0.3/include/net/netlink.h linux-3.0.3/include/net/netlink.h
55023 --- linux-3.0.3/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
55024 +++ linux-3.0.3/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
55025 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55026 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55027 {
55028 if (mark)
55029 - skb_trim(skb, (unsigned char *) mark - skb->data);
55030 + skb_trim(skb, (const unsigned char *) mark - skb->data);
55031 }
55032
55033 /**
55034 diff -urNp linux-3.0.3/include/net/netns/ipv4.h linux-3.0.3/include/net/netns/ipv4.h
55035 --- linux-3.0.3/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
55036 +++ linux-3.0.3/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
55037 @@ -56,8 +56,8 @@ struct netns_ipv4 {
55038
55039 unsigned int sysctl_ping_group_range[2];
55040
55041 - atomic_t rt_genid;
55042 - atomic_t dev_addr_genid;
55043 + atomic_unchecked_t rt_genid;
55044 + atomic_unchecked_t dev_addr_genid;
55045
55046 #ifdef CONFIG_IP_MROUTE
55047 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55048 diff -urNp linux-3.0.3/include/net/sctp/sctp.h linux-3.0.3/include/net/sctp/sctp.h
55049 --- linux-3.0.3/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
55050 +++ linux-3.0.3/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
55051 @@ -315,9 +315,9 @@ do { \
55052
55053 #else /* SCTP_DEBUG */
55054
55055 -#define SCTP_DEBUG_PRINTK(whatever...)
55056 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55057 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55058 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55059 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55060 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55061 #define SCTP_ENABLE_DEBUG
55062 #define SCTP_DISABLE_DEBUG
55063 #define SCTP_ASSERT(expr, str, func)
55064 diff -urNp linux-3.0.3/include/net/sock.h linux-3.0.3/include/net/sock.h
55065 --- linux-3.0.3/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
55066 +++ linux-3.0.3/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
55067 @@ -277,7 +277,7 @@ struct sock {
55068 #ifdef CONFIG_RPS
55069 __u32 sk_rxhash;
55070 #endif
55071 - atomic_t sk_drops;
55072 + atomic_unchecked_t sk_drops;
55073 int sk_rcvbuf;
55074
55075 struct sk_filter __rcu *sk_filter;
55076 @@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
55077 }
55078
55079 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
55080 - char __user *from, char *to,
55081 + char __user *from, unsigned char *to,
55082 int copy, int offset)
55083 {
55084 if (skb->ip_summed == CHECKSUM_NONE) {
55085 diff -urNp linux-3.0.3/include/net/tcp.h linux-3.0.3/include/net/tcp.h
55086 --- linux-3.0.3/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
55087 +++ linux-3.0.3/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
55088 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55089 struct tcp_seq_afinfo {
55090 char *name;
55091 sa_family_t family;
55092 - struct file_operations seq_fops;
55093 - struct seq_operations seq_ops;
55094 + file_operations_no_const seq_fops;
55095 + seq_operations_no_const seq_ops;
55096 };
55097
55098 struct tcp_iter_state {
55099 diff -urNp linux-3.0.3/include/net/udp.h linux-3.0.3/include/net/udp.h
55100 --- linux-3.0.3/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
55101 +++ linux-3.0.3/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
55102 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55103 char *name;
55104 sa_family_t family;
55105 struct udp_table *udp_table;
55106 - struct file_operations seq_fops;
55107 - struct seq_operations seq_ops;
55108 + file_operations_no_const seq_fops;
55109 + seq_operations_no_const seq_ops;
55110 };
55111
55112 struct udp_iter_state {
55113 diff -urNp linux-3.0.3/include/net/xfrm.h linux-3.0.3/include/net/xfrm.h
55114 --- linux-3.0.3/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
55115 +++ linux-3.0.3/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
55116 @@ -505,7 +505,7 @@ struct xfrm_policy {
55117 struct timer_list timer;
55118
55119 struct flow_cache_object flo;
55120 - atomic_t genid;
55121 + atomic_unchecked_t genid;
55122 u32 priority;
55123 u32 index;
55124 struct xfrm_mark mark;
55125 diff -urNp linux-3.0.3/include/rdma/iw_cm.h linux-3.0.3/include/rdma/iw_cm.h
55126 --- linux-3.0.3/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
55127 +++ linux-3.0.3/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
55128 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
55129 int backlog);
55130
55131 int (*destroy_listen)(struct iw_cm_id *cm_id);
55132 -};
55133 +} __no_const;
55134
55135 /**
55136 * iw_create_cm_id - Create an IW CM identifier.
55137 diff -urNp linux-3.0.3/include/scsi/libfc.h linux-3.0.3/include/scsi/libfc.h
55138 --- linux-3.0.3/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
55139 +++ linux-3.0.3/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
55140 @@ -750,6 +750,7 @@ struct libfc_function_template {
55141 */
55142 void (*disc_stop_final) (struct fc_lport *);
55143 };
55144 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55145
55146 /**
55147 * struct fc_disc - Discovery context
55148 @@ -853,7 +854,7 @@ struct fc_lport {
55149 struct fc_vport *vport;
55150
55151 /* Operational Information */
55152 - struct libfc_function_template tt;
55153 + libfc_function_template_no_const tt;
55154 u8 link_up;
55155 u8 qfull;
55156 enum fc_lport_state state;
55157 diff -urNp linux-3.0.3/include/scsi/scsi_device.h linux-3.0.3/include/scsi/scsi_device.h
55158 --- linux-3.0.3/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
55159 +++ linux-3.0.3/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
55160 @@ -161,9 +161,9 @@ struct scsi_device {
55161 unsigned int max_device_blocked; /* what device_blocked counts down from */
55162 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55163
55164 - atomic_t iorequest_cnt;
55165 - atomic_t iodone_cnt;
55166 - atomic_t ioerr_cnt;
55167 + atomic_unchecked_t iorequest_cnt;
55168 + atomic_unchecked_t iodone_cnt;
55169 + atomic_unchecked_t ioerr_cnt;
55170
55171 struct device sdev_gendev,
55172 sdev_dev;
55173 diff -urNp linux-3.0.3/include/scsi/scsi_transport_fc.h linux-3.0.3/include/scsi/scsi_transport_fc.h
55174 --- linux-3.0.3/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
55175 +++ linux-3.0.3/include/scsi/scsi_transport_fc.h 2011-08-23 21:47:56.000000000 -0400
55176 @@ -666,9 +666,9 @@ struct fc_function_template {
55177 int (*bsg_timeout)(struct fc_bsg_job *);
55178
55179 /* allocation lengths for host-specific data */
55180 - u32 dd_fcrport_size;
55181 - u32 dd_fcvport_size;
55182 - u32 dd_bsg_size;
55183 + const u32 dd_fcrport_size;
55184 + const u32 dd_fcvport_size;
55185 + const u32 dd_bsg_size;
55186
55187 /*
55188 * The driver sets these to tell the transport class it
55189 @@ -678,39 +678,39 @@ struct fc_function_template {
55190 */
55191
55192 /* remote port fixed attributes */
55193 - unsigned long show_rport_maxframe_size:1;
55194 - unsigned long show_rport_supported_classes:1;
55195 - unsigned long show_rport_dev_loss_tmo:1;
55196 + const unsigned long show_rport_maxframe_size:1;
55197 + const unsigned long show_rport_supported_classes:1;
55198 + const unsigned long show_rport_dev_loss_tmo:1;
55199
55200 /*
55201 * target dynamic attributes
55202 * These should all be "1" if the driver uses the remote port
55203 * add/delete functions (so attributes reflect rport values).
55204 */
55205 - unsigned long show_starget_node_name:1;
55206 - unsigned long show_starget_port_name:1;
55207 - unsigned long show_starget_port_id:1;
55208 + const unsigned long show_starget_node_name:1;
55209 + const unsigned long show_starget_port_name:1;
55210 + const unsigned long show_starget_port_id:1;
55211
55212 /* host fixed attributes */
55213 - unsigned long show_host_node_name:1;
55214 - unsigned long show_host_port_name:1;
55215 - unsigned long show_host_permanent_port_name:1;
55216 - unsigned long show_host_supported_classes:1;
55217 - unsigned long show_host_supported_fc4s:1;
55218 - unsigned long show_host_supported_speeds:1;
55219 - unsigned long show_host_maxframe_size:1;
55220 - unsigned long show_host_serial_number:1;
55221 + const unsigned long show_host_node_name:1;
55222 + const unsigned long show_host_port_name:1;
55223 + const unsigned long show_host_permanent_port_name:1;
55224 + const unsigned long show_host_supported_classes:1;
55225 + const unsigned long show_host_supported_fc4s:1;
55226 + const unsigned long show_host_supported_speeds:1;
55227 + const unsigned long show_host_maxframe_size:1;
55228 + const unsigned long show_host_serial_number:1;
55229 /* host dynamic attributes */
55230 - unsigned long show_host_port_id:1;
55231 - unsigned long show_host_port_type:1;
55232 - unsigned long show_host_port_state:1;
55233 - unsigned long show_host_active_fc4s:1;
55234 - unsigned long show_host_speed:1;
55235 - unsigned long show_host_fabric_name:1;
55236 - unsigned long show_host_symbolic_name:1;
55237 - unsigned long show_host_system_hostname:1;
55238 + const unsigned long show_host_port_id:1;
55239 + const unsigned long show_host_port_type:1;
55240 + const unsigned long show_host_port_state:1;
55241 + const unsigned long show_host_active_fc4s:1;
55242 + const unsigned long show_host_speed:1;
55243 + const unsigned long show_host_fabric_name:1;
55244 + const unsigned long show_host_symbolic_name:1;
55245 + const unsigned long show_host_system_hostname:1;
55246
55247 - unsigned long disable_target_scan:1;
55248 + const unsigned long disable_target_scan:1;
55249 };
55250
55251
55252 diff -urNp linux-3.0.3/include/sound/ak4xxx-adda.h linux-3.0.3/include/sound/ak4xxx-adda.h
55253 --- linux-3.0.3/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
55254 +++ linux-3.0.3/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
55255 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55256 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55257 unsigned char val);
55258 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55259 -};
55260 +} __no_const;
55261
55262 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55263
55264 diff -urNp linux-3.0.3/include/sound/hwdep.h linux-3.0.3/include/sound/hwdep.h
55265 --- linux-3.0.3/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
55266 +++ linux-3.0.3/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
55267 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55268 struct snd_hwdep_dsp_status *status);
55269 int (*dsp_load)(struct snd_hwdep *hw,
55270 struct snd_hwdep_dsp_image *image);
55271 -};
55272 +} __no_const;
55273
55274 struct snd_hwdep {
55275 struct snd_card *card;
55276 diff -urNp linux-3.0.3/include/sound/info.h linux-3.0.3/include/sound/info.h
55277 --- linux-3.0.3/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
55278 +++ linux-3.0.3/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
55279 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
55280 struct snd_info_buffer *buffer);
55281 void (*write)(struct snd_info_entry *entry,
55282 struct snd_info_buffer *buffer);
55283 -};
55284 +} __no_const;
55285
55286 struct snd_info_entry_ops {
55287 int (*open)(struct snd_info_entry *entry,
55288 diff -urNp linux-3.0.3/include/sound/pcm.h linux-3.0.3/include/sound/pcm.h
55289 --- linux-3.0.3/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
55290 +++ linux-3.0.3/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
55291 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
55292 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55293 int (*ack)(struct snd_pcm_substream *substream);
55294 };
55295 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55296
55297 /*
55298 *
55299 diff -urNp linux-3.0.3/include/sound/sb16_csp.h linux-3.0.3/include/sound/sb16_csp.h
55300 --- linux-3.0.3/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
55301 +++ linux-3.0.3/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
55302 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
55303 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55304 int (*csp_stop) (struct snd_sb_csp * p);
55305 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55306 -};
55307 +} __no_const;
55308
55309 /*
55310 * CSP private data
55311 diff -urNp linux-3.0.3/include/sound/soc.h linux-3.0.3/include/sound/soc.h
55312 --- linux-3.0.3/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
55313 +++ linux-3.0.3/include/sound/soc.h 2011-08-23 21:47:56.000000000 -0400
55314 @@ -635,7 +635,7 @@ struct snd_soc_platform_driver {
55315 struct snd_soc_dai *);
55316
55317 /* platform stream ops */
55318 - struct snd_pcm_ops *ops;
55319 + struct snd_pcm_ops * const ops;
55320 };
55321
55322 struct snd_soc_platform {
55323 diff -urNp linux-3.0.3/include/sound/ymfpci.h linux-3.0.3/include/sound/ymfpci.h
55324 --- linux-3.0.3/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
55325 +++ linux-3.0.3/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
55326 @@ -358,7 +358,7 @@ struct snd_ymfpci {
55327 spinlock_t reg_lock;
55328 spinlock_t voice_lock;
55329 wait_queue_head_t interrupt_sleep;
55330 - atomic_t interrupt_sleep_count;
55331 + atomic_unchecked_t interrupt_sleep_count;
55332 struct snd_info_entry *proc_entry;
55333 const struct firmware *dsp_microcode;
55334 const struct firmware *controller_microcode;
55335 diff -urNp linux-3.0.3/include/target/target_core_base.h linux-3.0.3/include/target/target_core_base.h
55336 --- linux-3.0.3/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
55337 +++ linux-3.0.3/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
55338 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
55339 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55340 int (*t10_pr_register)(struct se_cmd *);
55341 int (*t10_pr_clear)(struct se_cmd *);
55342 -};
55343 +} __no_const;
55344
55345 struct t10_reservation_template {
55346 /* Reservation effects all target ports */
55347 @@ -432,8 +432,8 @@ struct se_transport_task {
55348 atomic_t t_task_cdbs_left;
55349 atomic_t t_task_cdbs_ex_left;
55350 atomic_t t_task_cdbs_timeout_left;
55351 - atomic_t t_task_cdbs_sent;
55352 - atomic_t t_transport_aborted;
55353 + atomic_unchecked_t t_task_cdbs_sent;
55354 + atomic_unchecked_t t_transport_aborted;
55355 atomic_t t_transport_active;
55356 atomic_t t_transport_complete;
55357 atomic_t t_transport_queue_active;
55358 @@ -774,7 +774,7 @@ struct se_device {
55359 atomic_t active_cmds;
55360 atomic_t simple_cmds;
55361 atomic_t depth_left;
55362 - atomic_t dev_ordered_id;
55363 + atomic_unchecked_t dev_ordered_id;
55364 atomic_t dev_tur_active;
55365 atomic_t execute_tasks;
55366 atomic_t dev_status_thr_count;
55367 diff -urNp linux-3.0.3/include/trace/events/irq.h linux-3.0.3/include/trace/events/irq.h
55368 --- linux-3.0.3/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
55369 +++ linux-3.0.3/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
55370 @@ -36,7 +36,7 @@ struct softirq_action;
55371 */
55372 TRACE_EVENT(irq_handler_entry,
55373
55374 - TP_PROTO(int irq, struct irqaction *action),
55375 + TP_PROTO(int irq, const struct irqaction *action),
55376
55377 TP_ARGS(irq, action),
55378
55379 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55380 */
55381 TRACE_EVENT(irq_handler_exit,
55382
55383 - TP_PROTO(int irq, struct irqaction *action, int ret),
55384 + TP_PROTO(int irq, const struct irqaction *action, int ret),
55385
55386 TP_ARGS(irq, action, ret),
55387
55388 diff -urNp linux-3.0.3/include/video/udlfb.h linux-3.0.3/include/video/udlfb.h
55389 --- linux-3.0.3/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
55390 +++ linux-3.0.3/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
55391 @@ -51,10 +51,10 @@ struct dlfb_data {
55392 int base8;
55393 u32 pseudo_palette[256];
55394 /* blit-only rendering path metrics, exposed through sysfs */
55395 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55396 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55397 - atomic_t bytes_sent; /* to usb, after compression including overhead */
55398 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55399 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55400 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55401 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55402 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55403 };
55404
55405 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55406 diff -urNp linux-3.0.3/include/video/uvesafb.h linux-3.0.3/include/video/uvesafb.h
55407 --- linux-3.0.3/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
55408 +++ linux-3.0.3/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
55409 @@ -177,6 +177,7 @@ struct uvesafb_par {
55410 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55411 u8 pmi_setpal; /* PMI for palette changes */
55412 u16 *pmi_base; /* protected mode interface location */
55413 + u8 *pmi_code; /* protected mode code location */
55414 void *pmi_start;
55415 void *pmi_pal;
55416 u8 *vbe_state_orig; /*
55417 diff -urNp linux-3.0.3/init/do_mounts.c linux-3.0.3/init/do_mounts.c
55418 --- linux-3.0.3/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
55419 +++ linux-3.0.3/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
55420 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55421
55422 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55423 {
55424 - int err = sys_mount(name, "/root", fs, flags, data);
55425 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55426 if (err)
55427 return err;
55428
55429 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55430 va_start(args, fmt);
55431 vsprintf(buf, fmt, args);
55432 va_end(args);
55433 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55434 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55435 if (fd >= 0) {
55436 sys_ioctl(fd, FDEJECT, 0);
55437 sys_close(fd);
55438 }
55439 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55440 - fd = sys_open("/dev/console", O_RDWR, 0);
55441 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55442 if (fd >= 0) {
55443 sys_ioctl(fd, TCGETS, (long)&termios);
55444 termios.c_lflag &= ~ICANON;
55445 sys_ioctl(fd, TCSETSF, (long)&termios);
55446 - sys_read(fd, &c, 1);
55447 + sys_read(fd, (char __user *)&c, 1);
55448 termios.c_lflag |= ICANON;
55449 sys_ioctl(fd, TCSETSF, (long)&termios);
55450 sys_close(fd);
55451 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55452 mount_root();
55453 out:
55454 devtmpfs_mount("dev");
55455 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55456 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55457 sys_chroot((const char __user __force *)".");
55458 }
55459 diff -urNp linux-3.0.3/init/do_mounts.h linux-3.0.3/init/do_mounts.h
55460 --- linux-3.0.3/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
55461 +++ linux-3.0.3/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
55462 @@ -15,15 +15,15 @@ extern int root_mountflags;
55463
55464 static inline int create_dev(char *name, dev_t dev)
55465 {
55466 - sys_unlink(name);
55467 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55468 + sys_unlink((__force char __user *)name);
55469 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55470 }
55471
55472 #if BITS_PER_LONG == 32
55473 static inline u32 bstat(char *name)
55474 {
55475 struct stat64 stat;
55476 - if (sys_stat64(name, &stat) != 0)
55477 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55478 return 0;
55479 if (!S_ISBLK(stat.st_mode))
55480 return 0;
55481 diff -urNp linux-3.0.3/init/do_mounts_initrd.c linux-3.0.3/init/do_mounts_initrd.c
55482 --- linux-3.0.3/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
55483 +++ linux-3.0.3/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
55484 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55485 create_dev("/dev/root.old", Root_RAM0);
55486 /* mount initrd on rootfs' /root */
55487 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55488 - sys_mkdir("/old", 0700);
55489 - root_fd = sys_open("/", 0, 0);
55490 - old_fd = sys_open("/old", 0, 0);
55491 + sys_mkdir((__force const char __user *)"/old", 0700);
55492 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
55493 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55494 /* move initrd over / and chdir/chroot in initrd root */
55495 - sys_chdir("/root");
55496 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55497 - sys_chroot(".");
55498 + sys_chdir((__force const char __user *)"/root");
55499 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55500 + sys_chroot((__force const char __user *)".");
55501
55502 /*
55503 * In case that a resume from disk is carried out by linuxrc or one of
55504 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55505
55506 /* move initrd to rootfs' /old */
55507 sys_fchdir(old_fd);
55508 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
55509 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55510 /* switch root and cwd back to / of rootfs */
55511 sys_fchdir(root_fd);
55512 - sys_chroot(".");
55513 + sys_chroot((__force const char __user *)".");
55514 sys_close(old_fd);
55515 sys_close(root_fd);
55516
55517 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55518 - sys_chdir("/old");
55519 + sys_chdir((__force const char __user *)"/old");
55520 return;
55521 }
55522
55523 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55524 mount_root();
55525
55526 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55527 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55528 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55529 if (!error)
55530 printk("okay\n");
55531 else {
55532 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
55533 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55534 if (error == -ENOENT)
55535 printk("/initrd does not exist. Ignored.\n");
55536 else
55537 printk("failed\n");
55538 printk(KERN_NOTICE "Unmounting old root\n");
55539 - sys_umount("/old", MNT_DETACH);
55540 + sys_umount((__force char __user *)"/old", MNT_DETACH);
55541 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55542 if (fd < 0) {
55543 error = fd;
55544 @@ -116,11 +116,11 @@ int __init initrd_load(void)
55545 * mounted in the normal path.
55546 */
55547 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55548 - sys_unlink("/initrd.image");
55549 + sys_unlink((__force const char __user *)"/initrd.image");
55550 handle_initrd();
55551 return 1;
55552 }
55553 }
55554 - sys_unlink("/initrd.image");
55555 + sys_unlink((__force const char __user *)"/initrd.image");
55556 return 0;
55557 }
55558 diff -urNp linux-3.0.3/init/do_mounts_md.c linux-3.0.3/init/do_mounts_md.c
55559 --- linux-3.0.3/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
55560 +++ linux-3.0.3/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
55561 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55562 partitioned ? "_d" : "", minor,
55563 md_setup_args[ent].device_names);
55564
55565 - fd = sys_open(name, 0, 0);
55566 + fd = sys_open((__force char __user *)name, 0, 0);
55567 if (fd < 0) {
55568 printk(KERN_ERR "md: open failed - cannot start "
55569 "array %s\n", name);
55570 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55571 * array without it
55572 */
55573 sys_close(fd);
55574 - fd = sys_open(name, 0, 0);
55575 + fd = sys_open((__force char __user *)name, 0, 0);
55576 sys_ioctl(fd, BLKRRPART, 0);
55577 }
55578 sys_close(fd);
55579 diff -urNp linux-3.0.3/init/initramfs.c linux-3.0.3/init/initramfs.c
55580 --- linux-3.0.3/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
55581 +++ linux-3.0.3/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
55582 @@ -74,7 +74,7 @@ static void __init free_hash(void)
55583 }
55584 }
55585
55586 -static long __init do_utime(char __user *filename, time_t mtime)
55587 +static long __init do_utime(__force char __user *filename, time_t mtime)
55588 {
55589 struct timespec t[2];
55590
55591 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
55592 struct dir_entry *de, *tmp;
55593 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55594 list_del(&de->list);
55595 - do_utime(de->name, de->mtime);
55596 + do_utime((__force char __user *)de->name, de->mtime);
55597 kfree(de->name);
55598 kfree(de);
55599 }
55600 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
55601 if (nlink >= 2) {
55602 char *old = find_link(major, minor, ino, mode, collected);
55603 if (old)
55604 - return (sys_link(old, collected) < 0) ? -1 : 1;
55605 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55606 }
55607 return 0;
55608 }
55609 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
55610 {
55611 struct stat st;
55612
55613 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55614 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55615 if (S_ISDIR(st.st_mode))
55616 - sys_rmdir(path);
55617 + sys_rmdir((__force char __user *)path);
55618 else
55619 - sys_unlink(path);
55620 + sys_unlink((__force char __user *)path);
55621 }
55622 }
55623
55624 @@ -305,7 +305,7 @@ static int __init do_name(void)
55625 int openflags = O_WRONLY|O_CREAT;
55626 if (ml != 1)
55627 openflags |= O_TRUNC;
55628 - wfd = sys_open(collected, openflags, mode);
55629 + wfd = sys_open((__force char __user *)collected, openflags, mode);
55630
55631 if (wfd >= 0) {
55632 sys_fchown(wfd, uid, gid);
55633 @@ -317,17 +317,17 @@ static int __init do_name(void)
55634 }
55635 }
55636 } else if (S_ISDIR(mode)) {
55637 - sys_mkdir(collected, mode);
55638 - sys_chown(collected, uid, gid);
55639 - sys_chmod(collected, mode);
55640 + sys_mkdir((__force char __user *)collected, mode);
55641 + sys_chown((__force char __user *)collected, uid, gid);
55642 + sys_chmod((__force char __user *)collected, mode);
55643 dir_add(collected, mtime);
55644 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55645 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55646 if (maybe_link() == 0) {
55647 - sys_mknod(collected, mode, rdev);
55648 - sys_chown(collected, uid, gid);
55649 - sys_chmod(collected, mode);
55650 - do_utime(collected, mtime);
55651 + sys_mknod((__force char __user *)collected, mode, rdev);
55652 + sys_chown((__force char __user *)collected, uid, gid);
55653 + sys_chmod((__force char __user *)collected, mode);
55654 + do_utime((__force char __user *)collected, mtime);
55655 }
55656 }
55657 return 0;
55658 @@ -336,15 +336,15 @@ static int __init do_name(void)
55659 static int __init do_copy(void)
55660 {
55661 if (count >= body_len) {
55662 - sys_write(wfd, victim, body_len);
55663 + sys_write(wfd, (__force char __user *)victim, body_len);
55664 sys_close(wfd);
55665 - do_utime(vcollected, mtime);
55666 + do_utime((__force char __user *)vcollected, mtime);
55667 kfree(vcollected);
55668 eat(body_len);
55669 state = SkipIt;
55670 return 0;
55671 } else {
55672 - sys_write(wfd, victim, count);
55673 + sys_write(wfd, (__force char __user *)victim, count);
55674 body_len -= count;
55675 eat(count);
55676 return 1;
55677 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
55678 {
55679 collected[N_ALIGN(name_len) + body_len] = '\0';
55680 clean_path(collected, 0);
55681 - sys_symlink(collected + N_ALIGN(name_len), collected);
55682 - sys_lchown(collected, uid, gid);
55683 - do_utime(collected, mtime);
55684 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55685 + sys_lchown((__force char __user *)collected, uid, gid);
55686 + do_utime((__force char __user *)collected, mtime);
55687 state = SkipIt;
55688 next_state = Reset;
55689 return 0;
55690 diff -urNp linux-3.0.3/init/Kconfig linux-3.0.3/init/Kconfig
55691 --- linux-3.0.3/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
55692 +++ linux-3.0.3/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
55693 @@ -1195,7 +1195,7 @@ config SLUB_DEBUG
55694
55695 config COMPAT_BRK
55696 bool "Disable heap randomization"
55697 - default y
55698 + default n
55699 help
55700 Randomizing heap placement makes heap exploits harder, but it
55701 also breaks ancient binaries (including anything libc5 based).
55702 diff -urNp linux-3.0.3/init/main.c linux-3.0.3/init/main.c
55703 --- linux-3.0.3/init/main.c 2011-07-21 22:17:23.000000000 -0400
55704 +++ linux-3.0.3/init/main.c 2011-08-23 21:48:14.000000000 -0400
55705 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
55706 extern void tc_init(void);
55707 #endif
55708
55709 +extern void grsecurity_init(void);
55710 +
55711 /*
55712 * Debug helper: via this flag we know that we are in 'early bootup code'
55713 * where only the boot processor is running with IRQ disabled. This means
55714 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
55715
55716 __setup("reset_devices", set_reset_devices);
55717
55718 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
55719 +extern char pax_enter_kernel_user[];
55720 +extern char pax_exit_kernel_user[];
55721 +extern pgdval_t clone_pgd_mask;
55722 +#endif
55723 +
55724 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
55725 +static int __init setup_pax_nouderef(char *str)
55726 +{
55727 +#ifdef CONFIG_X86_32
55728 + unsigned int cpu;
55729 + struct desc_struct *gdt;
55730 +
55731 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
55732 + gdt = get_cpu_gdt_table(cpu);
55733 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
55734 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
55735 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
55736 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
55737 + }
55738 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
55739 +#else
55740 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
55741 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
55742 + clone_pgd_mask = ~(pgdval_t)0UL;
55743 +#endif
55744 +
55745 + return 0;
55746 +}
55747 +early_param("pax_nouderef", setup_pax_nouderef);
55748 +#endif
55749 +
55750 +#ifdef CONFIG_PAX_SOFTMODE
55751 +int pax_softmode;
55752 +
55753 +static int __init setup_pax_softmode(char *str)
55754 +{
55755 + get_option(&str, &pax_softmode);
55756 + return 1;
55757 +}
55758 +__setup("pax_softmode=", setup_pax_softmode);
55759 +#endif
55760 +
55761 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
55762 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
55763 static const char *panic_later, *panic_param;
55764 @@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
55765 {
55766 int count = preempt_count();
55767 int ret;
55768 + const char *msg1 = "", *msg2 = "";
55769
55770 if (initcall_debug)
55771 ret = do_one_initcall_debug(fn);
55772 @@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
55773 sprintf(msgbuf, "error code %d ", ret);
55774
55775 if (preempt_count() != count) {
55776 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
55777 + msg1 = " preemption imbalance";
55778 preempt_count() = count;
55779 }
55780 if (irqs_disabled()) {
55781 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
55782 + msg2 = " disabled interrupts";
55783 local_irq_enable();
55784 }
55785 - if (msgbuf[0]) {
55786 - printk("initcall %pF returned with %s\n", fn, msgbuf);
55787 + if (msgbuf[0] || *msg1 || *msg2) {
55788 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
55789 }
55790
55791 return ret;
55792 @@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
55793 do_basic_setup();
55794
55795 /* Open the /dev/console on the rootfs, this should never fail */
55796 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
55797 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
55798 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
55799
55800 (void) sys_dup(0);
55801 @@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
55802 if (!ramdisk_execute_command)
55803 ramdisk_execute_command = "/init";
55804
55805 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
55806 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
55807 ramdisk_execute_command = NULL;
55808 prepare_namespace();
55809 }
55810
55811 + grsecurity_init();
55812 +
55813 /*
55814 * Ok, we have completed the initial bootup, and
55815 * we're essentially up and running. Get rid of the
55816 diff -urNp linux-3.0.3/ipc/mqueue.c linux-3.0.3/ipc/mqueue.c
55817 --- linux-3.0.3/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
55818 +++ linux-3.0.3/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
55819 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
55820 mq_bytes = (mq_msg_tblsz +
55821 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
55822
55823 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
55824 spin_lock(&mq_lock);
55825 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
55826 u->mq_bytes + mq_bytes >
55827 diff -urNp linux-3.0.3/ipc/msg.c linux-3.0.3/ipc/msg.c
55828 --- linux-3.0.3/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
55829 +++ linux-3.0.3/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
55830 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
55831 return security_msg_queue_associate(msq, msgflg);
55832 }
55833
55834 +static struct ipc_ops msg_ops = {
55835 + .getnew = newque,
55836 + .associate = msg_security,
55837 + .more_checks = NULL
55838 +};
55839 +
55840 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
55841 {
55842 struct ipc_namespace *ns;
55843 - struct ipc_ops msg_ops;
55844 struct ipc_params msg_params;
55845
55846 ns = current->nsproxy->ipc_ns;
55847
55848 - msg_ops.getnew = newque;
55849 - msg_ops.associate = msg_security;
55850 - msg_ops.more_checks = NULL;
55851 -
55852 msg_params.key = key;
55853 msg_params.flg = msgflg;
55854
55855 diff -urNp linux-3.0.3/ipc/sem.c linux-3.0.3/ipc/sem.c
55856 --- linux-3.0.3/ipc/sem.c 2011-08-23 21:44:40.000000000 -0400
55857 +++ linux-3.0.3/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
55858 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
55859 return 0;
55860 }
55861
55862 +static struct ipc_ops sem_ops = {
55863 + .getnew = newary,
55864 + .associate = sem_security,
55865 + .more_checks = sem_more_checks
55866 +};
55867 +
55868 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
55869 {
55870 struct ipc_namespace *ns;
55871 - struct ipc_ops sem_ops;
55872 struct ipc_params sem_params;
55873
55874 ns = current->nsproxy->ipc_ns;
55875 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
55876 if (nsems < 0 || nsems > ns->sc_semmsl)
55877 return -EINVAL;
55878
55879 - sem_ops.getnew = newary;
55880 - sem_ops.associate = sem_security;
55881 - sem_ops.more_checks = sem_more_checks;
55882 -
55883 sem_params.key = key;
55884 sem_params.flg = semflg;
55885 sem_params.u.nsems = nsems;
55886 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
55887 int nsems;
55888 struct list_head tasks;
55889
55890 + pax_track_stack();
55891 +
55892 sma = sem_lock_check(ns, semid);
55893 if (IS_ERR(sma))
55894 return PTR_ERR(sma);
55895 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
55896 struct ipc_namespace *ns;
55897 struct list_head tasks;
55898
55899 + pax_track_stack();
55900 +
55901 ns = current->nsproxy->ipc_ns;
55902
55903 if (nsops < 1 || semid < 0)
55904 diff -urNp linux-3.0.3/ipc/shm.c linux-3.0.3/ipc/shm.c
55905 --- linux-3.0.3/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
55906 +++ linux-3.0.3/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
55907 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
55908 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
55909 #endif
55910
55911 +#ifdef CONFIG_GRKERNSEC
55912 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55913 + const time_t shm_createtime, const uid_t cuid,
55914 + const int shmid);
55915 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55916 + const time_t shm_createtime);
55917 +#endif
55918 +
55919 void shm_init_ns(struct ipc_namespace *ns)
55920 {
55921 ns->shm_ctlmax = SHMMAX;
55922 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
55923 shp->shm_lprid = 0;
55924 shp->shm_atim = shp->shm_dtim = 0;
55925 shp->shm_ctim = get_seconds();
55926 +#ifdef CONFIG_GRKERNSEC
55927 + {
55928 + struct timespec timeval;
55929 + do_posix_clock_monotonic_gettime(&timeval);
55930 +
55931 + shp->shm_createtime = timeval.tv_sec;
55932 + }
55933 +#endif
55934 shp->shm_segsz = size;
55935 shp->shm_nattch = 0;
55936 shp->shm_file = file;
55937 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
55938 return 0;
55939 }
55940
55941 +static struct ipc_ops shm_ops = {
55942 + .getnew = newseg,
55943 + .associate = shm_security,
55944 + .more_checks = shm_more_checks
55945 +};
55946 +
55947 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
55948 {
55949 struct ipc_namespace *ns;
55950 - struct ipc_ops shm_ops;
55951 struct ipc_params shm_params;
55952
55953 ns = current->nsproxy->ipc_ns;
55954
55955 - shm_ops.getnew = newseg;
55956 - shm_ops.associate = shm_security;
55957 - shm_ops.more_checks = shm_more_checks;
55958 -
55959 shm_params.key = key;
55960 shm_params.flg = shmflg;
55961 shm_params.u.size = size;
55962 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
55963 case SHM_LOCK:
55964 case SHM_UNLOCK:
55965 {
55966 - struct file *uninitialized_var(shm_file);
55967 -
55968 lru_add_drain_all(); /* drain pagevecs to lru lists */
55969
55970 shp = shm_lock_check(ns, shmid);
55971 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
55972 if (err)
55973 goto out_unlock;
55974
55975 +#ifdef CONFIG_GRKERNSEC
55976 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
55977 + shp->shm_perm.cuid, shmid) ||
55978 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
55979 + err = -EACCES;
55980 + goto out_unlock;
55981 + }
55982 +#endif
55983 +
55984 path = shp->shm_file->f_path;
55985 path_get(&path);
55986 shp->shm_nattch++;
55987 +#ifdef CONFIG_GRKERNSEC
55988 + shp->shm_lapid = current->pid;
55989 +#endif
55990 size = i_size_read(path.dentry->d_inode);
55991 shm_unlock(shp);
55992
55993 diff -urNp linux-3.0.3/kernel/acct.c linux-3.0.3/kernel/acct.c
55994 --- linux-3.0.3/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
55995 +++ linux-3.0.3/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
55996 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
55997 */
55998 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
55999 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56000 - file->f_op->write(file, (char *)&ac,
56001 + file->f_op->write(file, (__force char __user *)&ac,
56002 sizeof(acct_t), &file->f_pos);
56003 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56004 set_fs(fs);
56005 diff -urNp linux-3.0.3/kernel/audit.c linux-3.0.3/kernel/audit.c
56006 --- linux-3.0.3/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
56007 +++ linux-3.0.3/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
56008 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56009 3) suppressed due to audit_rate_limit
56010 4) suppressed due to audit_backlog_limit
56011 */
56012 -static atomic_t audit_lost = ATOMIC_INIT(0);
56013 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56014
56015 /* The netlink socket. */
56016 static struct sock *audit_sock;
56017 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56018 unsigned long now;
56019 int print;
56020
56021 - atomic_inc(&audit_lost);
56022 + atomic_inc_unchecked(&audit_lost);
56023
56024 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56025
56026 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56027 printk(KERN_WARNING
56028 "audit: audit_lost=%d audit_rate_limit=%d "
56029 "audit_backlog_limit=%d\n",
56030 - atomic_read(&audit_lost),
56031 + atomic_read_unchecked(&audit_lost),
56032 audit_rate_limit,
56033 audit_backlog_limit);
56034 audit_panic(message);
56035 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56036 status_set.pid = audit_pid;
56037 status_set.rate_limit = audit_rate_limit;
56038 status_set.backlog_limit = audit_backlog_limit;
56039 - status_set.lost = atomic_read(&audit_lost);
56040 + status_set.lost = atomic_read_unchecked(&audit_lost);
56041 status_set.backlog = skb_queue_len(&audit_skb_queue);
56042 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56043 &status_set, sizeof(status_set));
56044 diff -urNp linux-3.0.3/kernel/auditsc.c linux-3.0.3/kernel/auditsc.c
56045 --- linux-3.0.3/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
56046 +++ linux-3.0.3/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
56047 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
56048 }
56049
56050 /* global counter which is incremented every time something logs in */
56051 -static atomic_t session_id = ATOMIC_INIT(0);
56052 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56053
56054 /**
56055 * audit_set_loginuid - set a task's audit_context loginuid
56056 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
56057 */
56058 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56059 {
56060 - unsigned int sessionid = atomic_inc_return(&session_id);
56061 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56062 struct audit_context *context = task->audit_context;
56063
56064 if (context && context->in_syscall) {
56065 diff -urNp linux-3.0.3/kernel/capability.c linux-3.0.3/kernel/capability.c
56066 --- linux-3.0.3/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
56067 +++ linux-3.0.3/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
56068 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56069 * before modification is attempted and the application
56070 * fails.
56071 */
56072 + if (tocopy > ARRAY_SIZE(kdata))
56073 + return -EFAULT;
56074 +
56075 if (copy_to_user(dataptr, kdata, tocopy
56076 * sizeof(struct __user_cap_data_struct))) {
56077 return -EFAULT;
56078 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
56079 BUG();
56080 }
56081
56082 - if (security_capable(ns, current_cred(), cap) == 0) {
56083 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56084 current->flags |= PF_SUPERPRIV;
56085 return true;
56086 }
56087 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
56088 }
56089 EXPORT_SYMBOL(ns_capable);
56090
56091 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
56092 +{
56093 + if (unlikely(!cap_valid(cap))) {
56094 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56095 + BUG();
56096 + }
56097 +
56098 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56099 + current->flags |= PF_SUPERPRIV;
56100 + return true;
56101 + }
56102 + return false;
56103 +}
56104 +EXPORT_SYMBOL(ns_capable_nolog);
56105 +
56106 +bool capable_nolog(int cap)
56107 +{
56108 + return ns_capable_nolog(&init_user_ns, cap);
56109 +}
56110 +EXPORT_SYMBOL(capable_nolog);
56111 +
56112 /**
56113 * task_ns_capable - Determine whether current task has a superior
56114 * capability targeted at a specific task's user namespace.
56115 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
56116 }
56117 EXPORT_SYMBOL(task_ns_capable);
56118
56119 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
56120 +{
56121 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56122 +}
56123 +EXPORT_SYMBOL(task_ns_capable_nolog);
56124 +
56125 /**
56126 * nsown_capable - Check superior capability to one's own user_ns
56127 * @cap: The capability in question
56128 diff -urNp linux-3.0.3/kernel/cgroup.c linux-3.0.3/kernel/cgroup.c
56129 --- linux-3.0.3/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
56130 +++ linux-3.0.3/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
56131 @@ -593,6 +593,8 @@ static struct css_set *find_css_set(
56132 struct hlist_head *hhead;
56133 struct cg_cgroup_link *link;
56134
56135 + pax_track_stack();
56136 +
56137 /* First see if we already have a cgroup group that matches
56138 * the desired set */
56139 read_lock(&css_set_lock);
56140 diff -urNp linux-3.0.3/kernel/compat.c linux-3.0.3/kernel/compat.c
56141 --- linux-3.0.3/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
56142 +++ linux-3.0.3/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
56143 @@ -13,6 +13,7 @@
56144
56145 #include <linux/linkage.h>
56146 #include <linux/compat.h>
56147 +#include <linux/module.h>
56148 #include <linux/errno.h>
56149 #include <linux/time.h>
56150 #include <linux/signal.h>
56151 diff -urNp linux-3.0.3/kernel/configs.c linux-3.0.3/kernel/configs.c
56152 --- linux-3.0.3/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
56153 +++ linux-3.0.3/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
56154 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56155 struct proc_dir_entry *entry;
56156
56157 /* create the current config file */
56158 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56159 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56160 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56161 + &ikconfig_file_ops);
56162 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56163 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56164 + &ikconfig_file_ops);
56165 +#endif
56166 +#else
56167 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56168 &ikconfig_file_ops);
56169 +#endif
56170 +
56171 if (!entry)
56172 return -ENOMEM;
56173
56174 diff -urNp linux-3.0.3/kernel/cred.c linux-3.0.3/kernel/cred.c
56175 --- linux-3.0.3/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
56176 +++ linux-3.0.3/kernel/cred.c 2011-08-23 21:48:14.000000000 -0400
56177 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56178 */
56179 void __put_cred(struct cred *cred)
56180 {
56181 + pax_track_stack();
56182 +
56183 kdebug("__put_cred(%p{%d,%d})", cred,
56184 atomic_read(&cred->usage),
56185 read_cred_subscribers(cred));
56186 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56187 {
56188 struct cred *cred;
56189
56190 + pax_track_stack();
56191 +
56192 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56193 atomic_read(&tsk->cred->usage),
56194 read_cred_subscribers(tsk->cred));
56195 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56196 {
56197 const struct cred *cred;
56198
56199 + pax_track_stack();
56200 +
56201 rcu_read_lock();
56202
56203 do {
56204 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56205 {
56206 struct cred *new;
56207
56208 + pax_track_stack();
56209 +
56210 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56211 if (!new)
56212 return NULL;
56213 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56214 const struct cred *old;
56215 struct cred *new;
56216
56217 + pax_track_stack();
56218 +
56219 validate_process_creds();
56220
56221 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56222 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56223 struct thread_group_cred *tgcred = NULL;
56224 struct cred *new;
56225
56226 + pax_track_stack();
56227 +
56228 #ifdef CONFIG_KEYS
56229 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56230 if (!tgcred)
56231 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56232 struct cred *new;
56233 int ret;
56234
56235 + pax_track_stack();
56236 +
56237 if (
56238 #ifdef CONFIG_KEYS
56239 !p->cred->thread_keyring &&
56240 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56241 struct task_struct *task = current;
56242 const struct cred *old = task->real_cred;
56243
56244 + pax_track_stack();
56245 +
56246 kdebug("commit_creds(%p{%d,%d})", new,
56247 atomic_read(&new->usage),
56248 read_cred_subscribers(new));
56249 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56250
56251 get_cred(new); /* we will require a ref for the subj creds too */
56252
56253 + gr_set_role_label(task, new->uid, new->gid);
56254 +
56255 /* dumpability changes */
56256 if (old->euid != new->euid ||
56257 old->egid != new->egid ||
56258 @@ -551,6 +569,8 @@ EXPORT_SYMBOL(commit_creds);
56259 */
56260 void abort_creds(struct cred *new)
56261 {
56262 + pax_track_stack();
56263 +
56264 kdebug("abort_creds(%p{%d,%d})", new,
56265 atomic_read(&new->usage),
56266 read_cred_subscribers(new));
56267 @@ -574,6 +594,8 @@ const struct cred *override_creds(const
56268 {
56269 const struct cred *old = current->cred;
56270
56271 + pax_track_stack();
56272 +
56273 kdebug("override_creds(%p{%d,%d})", new,
56274 atomic_read(&new->usage),
56275 read_cred_subscribers(new));
56276 @@ -603,6 +625,8 @@ void revert_creds(const struct cred *old
56277 {
56278 const struct cred *override = current->cred;
56279
56280 + pax_track_stack();
56281 +
56282 kdebug("revert_creds(%p{%d,%d})", old,
56283 atomic_read(&old->usage),
56284 read_cred_subscribers(old));
56285 @@ -649,6 +673,8 @@ struct cred *prepare_kernel_cred(struct
56286 const struct cred *old;
56287 struct cred *new;
56288
56289 + pax_track_stack();
56290 +
56291 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56292 if (!new)
56293 return NULL;
56294 @@ -703,6 +729,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56295 */
56296 int set_security_override(struct cred *new, u32 secid)
56297 {
56298 + pax_track_stack();
56299 +
56300 return security_kernel_act_as(new, secid);
56301 }
56302 EXPORT_SYMBOL(set_security_override);
56303 @@ -722,6 +750,8 @@ int set_security_override_from_ctx(struc
56304 u32 secid;
56305 int ret;
56306
56307 + pax_track_stack();
56308 +
56309 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56310 if (ret < 0)
56311 return ret;
56312 diff -urNp linux-3.0.3/kernel/debug/debug_core.c linux-3.0.3/kernel/debug/debug_core.c
56313 --- linux-3.0.3/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
56314 +++ linux-3.0.3/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
56315 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56316 */
56317 static atomic_t masters_in_kgdb;
56318 static atomic_t slaves_in_kgdb;
56319 -static atomic_t kgdb_break_tasklet_var;
56320 +static atomic_unchecked_t kgdb_break_tasklet_var;
56321 atomic_t kgdb_setting_breakpoint;
56322
56323 struct task_struct *kgdb_usethread;
56324 @@ -129,7 +129,7 @@ int kgdb_single_step;
56325 static pid_t kgdb_sstep_pid;
56326
56327 /* to keep track of the CPU which is doing the single stepping*/
56328 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56329 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56330
56331 /*
56332 * If you are debugging a problem where roundup (the collection of
56333 @@ -542,7 +542,7 @@ return_normal:
56334 * kernel will only try for the value of sstep_tries before
56335 * giving up and continuing on.
56336 */
56337 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56338 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56339 (kgdb_info[cpu].task &&
56340 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56341 atomic_set(&kgdb_active, -1);
56342 @@ -636,8 +636,8 @@ cpu_master_loop:
56343 }
56344
56345 kgdb_restore:
56346 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56347 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56348 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56349 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56350 if (kgdb_info[sstep_cpu].task)
56351 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56352 else
56353 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56354 static void kgdb_tasklet_bpt(unsigned long ing)
56355 {
56356 kgdb_breakpoint();
56357 - atomic_set(&kgdb_break_tasklet_var, 0);
56358 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56359 }
56360
56361 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56362
56363 void kgdb_schedule_breakpoint(void)
56364 {
56365 - if (atomic_read(&kgdb_break_tasklet_var) ||
56366 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56367 atomic_read(&kgdb_active) != -1 ||
56368 atomic_read(&kgdb_setting_breakpoint))
56369 return;
56370 - atomic_inc(&kgdb_break_tasklet_var);
56371 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
56372 tasklet_schedule(&kgdb_tasklet_breakpoint);
56373 }
56374 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56375 diff -urNp linux-3.0.3/kernel/debug/kdb/kdb_main.c linux-3.0.3/kernel/debug/kdb/kdb_main.c
56376 --- linux-3.0.3/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
56377 +++ linux-3.0.3/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
56378 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56379 list_for_each_entry(mod, kdb_modules, list) {
56380
56381 kdb_printf("%-20s%8u 0x%p ", mod->name,
56382 - mod->core_size, (void *)mod);
56383 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
56384 #ifdef CONFIG_MODULE_UNLOAD
56385 kdb_printf("%4d ", module_refcount(mod));
56386 #endif
56387 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56388 kdb_printf(" (Loading)");
56389 else
56390 kdb_printf(" (Live)");
56391 - kdb_printf(" 0x%p", mod->module_core);
56392 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56393
56394 #ifdef CONFIG_MODULE_UNLOAD
56395 {
56396 diff -urNp linux-3.0.3/kernel/events/core.c linux-3.0.3/kernel/events/core.c
56397 --- linux-3.0.3/kernel/events/core.c 2011-08-23 21:44:40.000000000 -0400
56398 +++ linux-3.0.3/kernel/events/core.c 2011-08-23 21:47:56.000000000 -0400
56399 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
56400 return 0;
56401 }
56402
56403 -static atomic64_t perf_event_id;
56404 +static atomic64_unchecked_t perf_event_id;
56405
56406 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
56407 enum event_type_t event_type);
56408 @@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
56409
56410 static inline u64 perf_event_count(struct perf_event *event)
56411 {
56412 - return local64_read(&event->count) + atomic64_read(&event->child_count);
56413 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
56414 }
56415
56416 static u64 perf_event_read(struct perf_event *event)
56417 @@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
56418 mutex_lock(&event->child_mutex);
56419 total += perf_event_read(event);
56420 *enabled += event->total_time_enabled +
56421 - atomic64_read(&event->child_total_time_enabled);
56422 + atomic64_read_unchecked(&event->child_total_time_enabled);
56423 *running += event->total_time_running +
56424 - atomic64_read(&event->child_total_time_running);
56425 + atomic64_read_unchecked(&event->child_total_time_running);
56426
56427 list_for_each_entry(child, &event->child_list, child_list) {
56428 total += perf_event_read(child);
56429 @@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
56430 userpg->offset -= local64_read(&event->hw.prev_count);
56431
56432 userpg->time_enabled = event->total_time_enabled +
56433 - atomic64_read(&event->child_total_time_enabled);
56434 + atomic64_read_unchecked(&event->child_total_time_enabled);
56435
56436 userpg->time_running = event->total_time_running +
56437 - atomic64_read(&event->child_total_time_running);
56438 + atomic64_read_unchecked(&event->child_total_time_running);
56439
56440 barrier();
56441 ++userpg->lock;
56442 @@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
56443 values[n++] = perf_event_count(event);
56444 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
56445 values[n++] = enabled +
56446 - atomic64_read(&event->child_total_time_enabled);
56447 + atomic64_read_unchecked(&event->child_total_time_enabled);
56448 }
56449 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
56450 values[n++] = running +
56451 - atomic64_read(&event->child_total_time_running);
56452 + atomic64_read_unchecked(&event->child_total_time_running);
56453 }
56454 if (read_format & PERF_FORMAT_ID)
56455 values[n++] = primary_event_id(event);
56456 @@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
56457 event->parent = parent_event;
56458
56459 event->ns = get_pid_ns(current->nsproxy->pid_ns);
56460 - event->id = atomic64_inc_return(&perf_event_id);
56461 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
56462
56463 event->state = PERF_EVENT_STATE_INACTIVE;
56464
56465 @@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
56466 /*
56467 * Add back the child's count to the parent's count:
56468 */
56469 - atomic64_add(child_val, &parent_event->child_count);
56470 - atomic64_add(child_event->total_time_enabled,
56471 + atomic64_add_unchecked(child_val, &parent_event->child_count);
56472 + atomic64_add_unchecked(child_event->total_time_enabled,
56473 &parent_event->child_total_time_enabled);
56474 - atomic64_add(child_event->total_time_running,
56475 + atomic64_add_unchecked(child_event->total_time_running,
56476 &parent_event->child_total_time_running);
56477
56478 /*
56479 diff -urNp linux-3.0.3/kernel/exit.c linux-3.0.3/kernel/exit.c
56480 --- linux-3.0.3/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
56481 +++ linux-3.0.3/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
56482 @@ -57,6 +57,10 @@
56483 #include <asm/pgtable.h>
56484 #include <asm/mmu_context.h>
56485
56486 +#ifdef CONFIG_GRKERNSEC
56487 +extern rwlock_t grsec_exec_file_lock;
56488 +#endif
56489 +
56490 static void exit_mm(struct task_struct * tsk);
56491
56492 static void __unhash_process(struct task_struct *p, bool group_dead)
56493 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
56494 struct task_struct *leader;
56495 int zap_leader;
56496 repeat:
56497 +#ifdef CONFIG_NET
56498 + gr_del_task_from_ip_table(p);
56499 +#endif
56500 +
56501 tracehook_prepare_release_task(p);
56502 /* don't need to get the RCU readlock here - the process is dead and
56503 * can't be modifying its own credentials. But shut RCU-lockdep up */
56504 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
56505 {
56506 write_lock_irq(&tasklist_lock);
56507
56508 +#ifdef CONFIG_GRKERNSEC
56509 + write_lock(&grsec_exec_file_lock);
56510 + if (current->exec_file) {
56511 + fput(current->exec_file);
56512 + current->exec_file = NULL;
56513 + }
56514 + write_unlock(&grsec_exec_file_lock);
56515 +#endif
56516 +
56517 ptrace_unlink(current);
56518 /* Reparent to init */
56519 current->real_parent = current->parent = kthreadd_task;
56520 list_move_tail(&current->sibling, &current->real_parent->children);
56521
56522 + gr_set_kernel_label(current);
56523 +
56524 /* Set the exit signal to SIGCHLD so we signal init on exit */
56525 current->exit_signal = SIGCHLD;
56526
56527 @@ -394,7 +413,7 @@ int allow_signal(int sig)
56528 * know it'll be handled, so that they don't get converted to
56529 * SIGKILL or just silently dropped.
56530 */
56531 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56532 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56533 recalc_sigpending();
56534 spin_unlock_irq(&current->sighand->siglock);
56535 return 0;
56536 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
56537 vsnprintf(current->comm, sizeof(current->comm), name, args);
56538 va_end(args);
56539
56540 +#ifdef CONFIG_GRKERNSEC
56541 + write_lock(&grsec_exec_file_lock);
56542 + if (current->exec_file) {
56543 + fput(current->exec_file);
56544 + current->exec_file = NULL;
56545 + }
56546 + write_unlock(&grsec_exec_file_lock);
56547 +#endif
56548 +
56549 + gr_set_kernel_label(current);
56550 +
56551 /*
56552 * If we were started as result of loading a module, close all of the
56553 * user space pages. We don't need them, and if we didn't close them
56554 @@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
56555 struct task_struct *tsk = current;
56556 int group_dead;
56557
56558 - profile_task_exit(tsk);
56559 -
56560 - WARN_ON(atomic_read(&tsk->fs_excl));
56561 - WARN_ON(blk_needs_flush_plug(tsk));
56562 -
56563 if (unlikely(in_interrupt()))
56564 panic("Aiee, killing interrupt handler!");
56565 - if (unlikely(!tsk->pid))
56566 - panic("Attempted to kill the idle task!");
56567
56568 /*
56569 * If do_exit is called because this processes oopsed, it's possible
56570 @@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
56571 */
56572 set_fs(USER_DS);
56573
56574 + profile_task_exit(tsk);
56575 +
56576 + WARN_ON(atomic_read(&tsk->fs_excl));
56577 + WARN_ON(blk_needs_flush_plug(tsk));
56578 +
56579 + if (unlikely(!tsk->pid))
56580 + panic("Attempted to kill the idle task!");
56581 +
56582 tracehook_report_exit(&code);
56583
56584 validate_creds_for_do_exit(tsk);
56585 @@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
56586 tsk->exit_code = code;
56587 taskstats_exit(tsk, group_dead);
56588
56589 + gr_acl_handle_psacct(tsk, code);
56590 + gr_acl_handle_exit();
56591 +
56592 exit_mm(tsk);
56593
56594 if (group_dead)
56595 diff -urNp linux-3.0.3/kernel/fork.c linux-3.0.3/kernel/fork.c
56596 --- linux-3.0.3/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
56597 +++ linux-3.0.3/kernel/fork.c 2011-08-23 21:48:14.000000000 -0400
56598 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
56599 *stackend = STACK_END_MAGIC; /* for overflow detection */
56600
56601 #ifdef CONFIG_CC_STACKPROTECTOR
56602 - tsk->stack_canary = get_random_int();
56603 + tsk->stack_canary = pax_get_random_long();
56604 #endif
56605
56606 /* One for us, one for whoever does the "release_task()" (usually parent) */
56607 @@ -308,13 +308,77 @@ out:
56608 }
56609
56610 #ifdef CONFIG_MMU
56611 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56612 +{
56613 + struct vm_area_struct *tmp;
56614 + unsigned long charge;
56615 + struct mempolicy *pol;
56616 + struct file *file;
56617 +
56618 + charge = 0;
56619 + if (mpnt->vm_flags & VM_ACCOUNT) {
56620 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56621 + if (security_vm_enough_memory(len))
56622 + goto fail_nomem;
56623 + charge = len;
56624 + }
56625 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56626 + if (!tmp)
56627 + goto fail_nomem;
56628 + *tmp = *mpnt;
56629 + tmp->vm_mm = mm;
56630 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
56631 + pol = mpol_dup(vma_policy(mpnt));
56632 + if (IS_ERR(pol))
56633 + goto fail_nomem_policy;
56634 + vma_set_policy(tmp, pol);
56635 + if (anon_vma_fork(tmp, mpnt))
56636 + goto fail_nomem_anon_vma_fork;
56637 + tmp->vm_flags &= ~VM_LOCKED;
56638 + tmp->vm_next = tmp->vm_prev = NULL;
56639 + tmp->vm_mirror = NULL;
56640 + file = tmp->vm_file;
56641 + if (file) {
56642 + struct inode *inode = file->f_path.dentry->d_inode;
56643 + struct address_space *mapping = file->f_mapping;
56644 +
56645 + get_file(file);
56646 + if (tmp->vm_flags & VM_DENYWRITE)
56647 + atomic_dec(&inode->i_writecount);
56648 + mutex_lock(&mapping->i_mmap_mutex);
56649 + if (tmp->vm_flags & VM_SHARED)
56650 + mapping->i_mmap_writable++;
56651 + flush_dcache_mmap_lock(mapping);
56652 + /* insert tmp into the share list, just after mpnt */
56653 + vma_prio_tree_add(tmp, mpnt);
56654 + flush_dcache_mmap_unlock(mapping);
56655 + mutex_unlock(&mapping->i_mmap_mutex);
56656 + }
56657 +
56658 + /*
56659 + * Clear hugetlb-related page reserves for children. This only
56660 + * affects MAP_PRIVATE mappings. Faults generated by the child
56661 + * are not guaranteed to succeed, even if read-only
56662 + */
56663 + if (is_vm_hugetlb_page(tmp))
56664 + reset_vma_resv_huge_pages(tmp);
56665 +
56666 + return tmp;
56667 +
56668 +fail_nomem_anon_vma_fork:
56669 + mpol_put(pol);
56670 +fail_nomem_policy:
56671 + kmem_cache_free(vm_area_cachep, tmp);
56672 +fail_nomem:
56673 + vm_unacct_memory(charge);
56674 + return NULL;
56675 +}
56676 +
56677 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56678 {
56679 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56680 struct rb_node **rb_link, *rb_parent;
56681 int retval;
56682 - unsigned long charge;
56683 - struct mempolicy *pol;
56684
56685 down_write(&oldmm->mmap_sem);
56686 flush_cache_dup_mm(oldmm);
56687 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
56688 mm->locked_vm = 0;
56689 mm->mmap = NULL;
56690 mm->mmap_cache = NULL;
56691 - mm->free_area_cache = oldmm->mmap_base;
56692 - mm->cached_hole_size = ~0UL;
56693 + mm->free_area_cache = oldmm->free_area_cache;
56694 + mm->cached_hole_size = oldmm->cached_hole_size;
56695 mm->map_count = 0;
56696 cpumask_clear(mm_cpumask(mm));
56697 mm->mm_rb = RB_ROOT;
56698 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
56699
56700 prev = NULL;
56701 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56702 - struct file *file;
56703 -
56704 if (mpnt->vm_flags & VM_DONTCOPY) {
56705 long pages = vma_pages(mpnt);
56706 mm->total_vm -= pages;
56707 @@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
56708 -pages);
56709 continue;
56710 }
56711 - charge = 0;
56712 - if (mpnt->vm_flags & VM_ACCOUNT) {
56713 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56714 - if (security_vm_enough_memory(len))
56715 - goto fail_nomem;
56716 - charge = len;
56717 - }
56718 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56719 - if (!tmp)
56720 - goto fail_nomem;
56721 - *tmp = *mpnt;
56722 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
56723 - pol = mpol_dup(vma_policy(mpnt));
56724 - retval = PTR_ERR(pol);
56725 - if (IS_ERR(pol))
56726 - goto fail_nomem_policy;
56727 - vma_set_policy(tmp, pol);
56728 - tmp->vm_mm = mm;
56729 - if (anon_vma_fork(tmp, mpnt))
56730 - goto fail_nomem_anon_vma_fork;
56731 - tmp->vm_flags &= ~VM_LOCKED;
56732 - tmp->vm_next = tmp->vm_prev = NULL;
56733 - file = tmp->vm_file;
56734 - if (file) {
56735 - struct inode *inode = file->f_path.dentry->d_inode;
56736 - struct address_space *mapping = file->f_mapping;
56737 -
56738 - get_file(file);
56739 - if (tmp->vm_flags & VM_DENYWRITE)
56740 - atomic_dec(&inode->i_writecount);
56741 - mutex_lock(&mapping->i_mmap_mutex);
56742 - if (tmp->vm_flags & VM_SHARED)
56743 - mapping->i_mmap_writable++;
56744 - flush_dcache_mmap_lock(mapping);
56745 - /* insert tmp into the share list, just after mpnt */
56746 - vma_prio_tree_add(tmp, mpnt);
56747 - flush_dcache_mmap_unlock(mapping);
56748 - mutex_unlock(&mapping->i_mmap_mutex);
56749 + tmp = dup_vma(mm, mpnt);
56750 + if (!tmp) {
56751 + retval = -ENOMEM;
56752 + goto out;
56753 }
56754
56755 /*
56756 - * Clear hugetlb-related page reserves for children. This only
56757 - * affects MAP_PRIVATE mappings. Faults generated by the child
56758 - * are not guaranteed to succeed, even if read-only
56759 - */
56760 - if (is_vm_hugetlb_page(tmp))
56761 - reset_vma_resv_huge_pages(tmp);
56762 -
56763 - /*
56764 * Link in the new vma and copy the page table entries.
56765 */
56766 *pprev = tmp;
56767 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
56768 if (retval)
56769 goto out;
56770 }
56771 +
56772 +#ifdef CONFIG_PAX_SEGMEXEC
56773 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56774 + struct vm_area_struct *mpnt_m;
56775 +
56776 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56777 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56778 +
56779 + if (!mpnt->vm_mirror)
56780 + continue;
56781 +
56782 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56783 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
56784 + mpnt->vm_mirror = mpnt_m;
56785 + } else {
56786 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
56787 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
56788 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
56789 + mpnt->vm_mirror->vm_mirror = mpnt;
56790 + }
56791 + }
56792 + BUG_ON(mpnt_m);
56793 + }
56794 +#endif
56795 +
56796 /* a new mm has just been created */
56797 arch_dup_mmap(oldmm, mm);
56798 retval = 0;
56799 @@ -429,14 +474,6 @@ out:
56800 flush_tlb_mm(oldmm);
56801 up_write(&oldmm->mmap_sem);
56802 return retval;
56803 -fail_nomem_anon_vma_fork:
56804 - mpol_put(pol);
56805 -fail_nomem_policy:
56806 - kmem_cache_free(vm_area_cachep, tmp);
56807 -fail_nomem:
56808 - retval = -ENOMEM;
56809 - vm_unacct_memory(charge);
56810 - goto out;
56811 }
56812
56813 static inline int mm_alloc_pgd(struct mm_struct * mm)
56814 @@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
56815 spin_unlock(&fs->lock);
56816 return -EAGAIN;
56817 }
56818 - fs->users++;
56819 + atomic_inc(&fs->users);
56820 spin_unlock(&fs->lock);
56821 return 0;
56822 }
56823 tsk->fs = copy_fs_struct(fs);
56824 if (!tsk->fs)
56825 return -ENOMEM;
56826 + gr_set_chroot_entries(tsk, &tsk->fs->root);
56827 return 0;
56828 }
56829
56830 @@ -1104,10 +1142,13 @@ static struct task_struct *copy_process(
56831 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
56832 #endif
56833 retval = -EAGAIN;
56834 +
56835 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
56836 +
56837 if (atomic_read(&p->real_cred->user->processes) >=
56838 task_rlimit(p, RLIMIT_NPROC)) {
56839 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
56840 - p->real_cred->user != INIT_USER)
56841 + if (p->real_cred->user != INIT_USER &&
56842 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
56843 goto bad_fork_free;
56844 }
56845
56846 @@ -1250,6 +1291,8 @@ static struct task_struct *copy_process(
56847 if (clone_flags & CLONE_THREAD)
56848 p->tgid = current->tgid;
56849
56850 + gr_copy_label(p);
56851 +
56852 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
56853 /*
56854 * Clear TID on mm_release()?
56855 @@ -1414,6 +1457,8 @@ bad_fork_cleanup_count:
56856 bad_fork_free:
56857 free_task(p);
56858 fork_out:
56859 + gr_log_forkfail(retval);
56860 +
56861 return ERR_PTR(retval);
56862 }
56863
56864 @@ -1502,6 +1547,8 @@ long do_fork(unsigned long clone_flags,
56865 if (clone_flags & CLONE_PARENT_SETTID)
56866 put_user(nr, parent_tidptr);
56867
56868 + gr_handle_brute_check();
56869 +
56870 if (clone_flags & CLONE_VFORK) {
56871 p->vfork_done = &vfork;
56872 init_completion(&vfork);
56873 @@ -1610,7 +1657,7 @@ static int unshare_fs(unsigned long unsh
56874 return 0;
56875
56876 /* don't need lock here; in the worst case we'll do useless copy */
56877 - if (fs->users == 1)
56878 + if (atomic_read(&fs->users) == 1)
56879 return 0;
56880
56881 *new_fsp = copy_fs_struct(fs);
56882 @@ -1697,7 +1744,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
56883 fs = current->fs;
56884 spin_lock(&fs->lock);
56885 current->fs = new_fs;
56886 - if (--fs->users)
56887 + gr_set_chroot_entries(current, &current->fs->root);
56888 + if (atomic_dec_return(&fs->users))
56889 new_fs = NULL;
56890 else
56891 new_fs = fs;
56892 diff -urNp linux-3.0.3/kernel/futex.c linux-3.0.3/kernel/futex.c
56893 --- linux-3.0.3/kernel/futex.c 2011-08-23 21:44:40.000000000 -0400
56894 +++ linux-3.0.3/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
56895 @@ -54,6 +54,7 @@
56896 #include <linux/mount.h>
56897 #include <linux/pagemap.h>
56898 #include <linux/syscalls.h>
56899 +#include <linux/ptrace.h>
56900 #include <linux/signal.h>
56901 #include <linux/module.h>
56902 #include <linux/magic.h>
56903 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
56904 struct page *page, *page_head;
56905 int err, ro = 0;
56906
56907 +#ifdef CONFIG_PAX_SEGMEXEC
56908 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
56909 + return -EFAULT;
56910 +#endif
56911 +
56912 /*
56913 * The futex address must be "naturally" aligned.
56914 */
56915 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
56916 struct futex_q q = futex_q_init;
56917 int ret;
56918
56919 + pax_track_stack();
56920 +
56921 if (!bitset)
56922 return -EINVAL;
56923 q.bitset = bitset;
56924 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
56925 struct futex_q q = futex_q_init;
56926 int res, ret;
56927
56928 + pax_track_stack();
56929 +
56930 if (!bitset)
56931 return -EINVAL;
56932
56933 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56934 {
56935 struct robust_list_head __user *head;
56936 unsigned long ret;
56937 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
56938 const struct cred *cred = current_cred(), *pcred;
56939 +#endif
56940
56941 if (!futex_cmpxchg_enabled)
56942 return -ENOSYS;
56943 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56944 if (!p)
56945 goto err_unlock;
56946 ret = -EPERM;
56947 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56948 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
56949 + goto err_unlock;
56950 +#else
56951 pcred = __task_cred(p);
56952 /* If victim is in different user_ns, then uids are not
56953 comparable, so we must have CAP_SYS_PTRACE */
56954 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
56955 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
56956 goto err_unlock;
56957 ok:
56958 +#endif
56959 head = p->robust_list;
56960 rcu_read_unlock();
56961 }
56962 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
56963 {
56964 u32 curval;
56965 int i;
56966 + mm_segment_t oldfs;
56967
56968 /*
56969 * This will fail and we want it. Some arch implementations do
56970 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
56971 * implementation, the non-functional ones will return
56972 * -ENOSYS.
56973 */
56974 + oldfs = get_fs();
56975 + set_fs(USER_DS);
56976 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
56977 futex_cmpxchg_enabled = 1;
56978 + set_fs(oldfs);
56979
56980 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
56981 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
56982 diff -urNp linux-3.0.3/kernel/futex_compat.c linux-3.0.3/kernel/futex_compat.c
56983 --- linux-3.0.3/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
56984 +++ linux-3.0.3/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
56985 @@ -10,6 +10,7 @@
56986 #include <linux/compat.h>
56987 #include <linux/nsproxy.h>
56988 #include <linux/futex.h>
56989 +#include <linux/ptrace.h>
56990
56991 #include <asm/uaccess.h>
56992
56993 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
56994 {
56995 struct compat_robust_list_head __user *head;
56996 unsigned long ret;
56997 - const struct cred *cred = current_cred(), *pcred;
56998 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
56999 + const struct cred *cred = current_cred();
57000 + const struct cred *pcred;
57001 +#endif
57002
57003 if (!futex_cmpxchg_enabled)
57004 return -ENOSYS;
57005 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57006 if (!p)
57007 goto err_unlock;
57008 ret = -EPERM;
57009 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57010 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57011 + goto err_unlock;
57012 +#else
57013 pcred = __task_cred(p);
57014 /* If victim is in different user_ns, then uids are not
57015 comparable, so we must have CAP_SYS_PTRACE */
57016 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57017 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57018 goto err_unlock;
57019 ok:
57020 +#endif
57021 head = p->compat_robust_list;
57022 rcu_read_unlock();
57023 }
57024 diff -urNp linux-3.0.3/kernel/gcov/base.c linux-3.0.3/kernel/gcov/base.c
57025 --- linux-3.0.3/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
57026 +++ linux-3.0.3/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
57027 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
57028 }
57029
57030 #ifdef CONFIG_MODULES
57031 -static inline int within(void *addr, void *start, unsigned long size)
57032 -{
57033 - return ((addr >= start) && (addr < start + size));
57034 -}
57035 -
57036 /* Update list and generate events when modules are unloaded. */
57037 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57038 void *data)
57039 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57040 prev = NULL;
57041 /* Remove entries located in module from linked list. */
57042 for (info = gcov_info_head; info; info = info->next) {
57043 - if (within(info, mod->module_core, mod->core_size)) {
57044 + if (within_module_core_rw((unsigned long)info, mod)) {
57045 if (prev)
57046 prev->next = info->next;
57047 else
57048 diff -urNp linux-3.0.3/kernel/hrtimer.c linux-3.0.3/kernel/hrtimer.c
57049 --- linux-3.0.3/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
57050 +++ linux-3.0.3/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
57051 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
57052 local_irq_restore(flags);
57053 }
57054
57055 -static void run_hrtimer_softirq(struct softirq_action *h)
57056 +static void run_hrtimer_softirq(void)
57057 {
57058 hrtimer_peek_ahead_timers();
57059 }
57060 diff -urNp linux-3.0.3/kernel/jump_label.c linux-3.0.3/kernel/jump_label.c
57061 --- linux-3.0.3/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
57062 +++ linux-3.0.3/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
57063 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
57064
57065 size = (((unsigned long)stop - (unsigned long)start)
57066 / sizeof(struct jump_entry));
57067 + pax_open_kernel();
57068 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57069 + pax_close_kernel();
57070 }
57071
57072 static void jump_label_update(struct jump_label_key *key, int enable);
57073 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module
57074 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
57075 struct jump_entry *iter;
57076
57077 + pax_open_kernel();
57078 for (iter = iter_start; iter < iter_stop; iter++) {
57079 if (within_module_init(iter->code, mod))
57080 iter->code = 0;
57081 }
57082 + pax_close_kernel();
57083 }
57084
57085 static int
57086 diff -urNp linux-3.0.3/kernel/kallsyms.c linux-3.0.3/kernel/kallsyms.c
57087 --- linux-3.0.3/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
57088 +++ linux-3.0.3/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
57089 @@ -11,6 +11,9 @@
57090 * Changed the compression method from stem compression to "table lookup"
57091 * compression (see scripts/kallsyms.c for a more complete description)
57092 */
57093 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57094 +#define __INCLUDED_BY_HIDESYM 1
57095 +#endif
57096 #include <linux/kallsyms.h>
57097 #include <linux/module.h>
57098 #include <linux/init.h>
57099 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57100
57101 static inline int is_kernel_inittext(unsigned long addr)
57102 {
57103 + if (system_state != SYSTEM_BOOTING)
57104 + return 0;
57105 +
57106 if (addr >= (unsigned long)_sinittext
57107 && addr <= (unsigned long)_einittext)
57108 return 1;
57109 return 0;
57110 }
57111
57112 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57113 +#ifdef CONFIG_MODULES
57114 +static inline int is_module_text(unsigned long addr)
57115 +{
57116 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57117 + return 1;
57118 +
57119 + addr = ktla_ktva(addr);
57120 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57121 +}
57122 +#else
57123 +static inline int is_module_text(unsigned long addr)
57124 +{
57125 + return 0;
57126 +}
57127 +#endif
57128 +#endif
57129 +
57130 static inline int is_kernel_text(unsigned long addr)
57131 {
57132 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57133 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57134
57135 static inline int is_kernel(unsigned long addr)
57136 {
57137 +
57138 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57139 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
57140 + return 1;
57141 +
57142 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57143 +#else
57144 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57145 +#endif
57146 +
57147 return 1;
57148 return in_gate_area_no_mm(addr);
57149 }
57150
57151 static int is_ksym_addr(unsigned long addr)
57152 {
57153 +
57154 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57155 + if (is_module_text(addr))
57156 + return 0;
57157 +#endif
57158 +
57159 if (all_var)
57160 return is_kernel(addr);
57161
57162 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57163
57164 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57165 {
57166 - iter->name[0] = '\0';
57167 iter->nameoff = get_symbol_offset(new_pos);
57168 iter->pos = new_pos;
57169 }
57170 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57171 {
57172 struct kallsym_iter *iter = m->private;
57173
57174 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57175 + if (current_uid())
57176 + return 0;
57177 +#endif
57178 +
57179 /* Some debugging symbols have no name. Ignore them. */
57180 if (!iter->name[0])
57181 return 0;
57182 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57183 struct kallsym_iter *iter;
57184 int ret;
57185
57186 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57187 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57188 if (!iter)
57189 return -ENOMEM;
57190 reset_iter(iter, 0);
57191 diff -urNp linux-3.0.3/kernel/kmod.c linux-3.0.3/kernel/kmod.c
57192 --- linux-3.0.3/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
57193 +++ linux-3.0.3/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
57194 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57195 * If module auto-loading support is disabled then this function
57196 * becomes a no-operation.
57197 */
57198 -int __request_module(bool wait, const char *fmt, ...)
57199 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57200 {
57201 - va_list args;
57202 char module_name[MODULE_NAME_LEN];
57203 unsigned int max_modprobes;
57204 int ret;
57205 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57206 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57207 static char *envp[] = { "HOME=/",
57208 "TERM=linux",
57209 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57210 @@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
57211 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57212 static int kmod_loop_msg;
57213
57214 - va_start(args, fmt);
57215 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57216 - va_end(args);
57217 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57218 if (ret >= MODULE_NAME_LEN)
57219 return -ENAMETOOLONG;
57220
57221 @@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
57222 if (ret)
57223 return ret;
57224
57225 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57226 + if (!current_uid()) {
57227 + /* hack to workaround consolekit/udisks stupidity */
57228 + read_lock(&tasklist_lock);
57229 + if (!strcmp(current->comm, "mount") &&
57230 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57231 + read_unlock(&tasklist_lock);
57232 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57233 + return -EPERM;
57234 + }
57235 + read_unlock(&tasklist_lock);
57236 + }
57237 +#endif
57238 +
57239 /* If modprobe needs a service that is in a module, we get a recursive
57240 * loop. Limit the number of running kmod threads to max_threads/2 or
57241 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57242 @@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
57243 atomic_dec(&kmod_concurrent);
57244 return ret;
57245 }
57246 +
57247 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57248 +{
57249 + va_list args;
57250 + int ret;
57251 +
57252 + va_start(args, fmt);
57253 + ret = ____request_module(wait, module_param, fmt, args);
57254 + va_end(args);
57255 +
57256 + return ret;
57257 +}
57258 +
57259 +int __request_module(bool wait, const char *fmt, ...)
57260 +{
57261 + va_list args;
57262 + int ret;
57263 +
57264 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57265 + if (current_uid()) {
57266 + char module_param[MODULE_NAME_LEN];
57267 +
57268 + memset(module_param, 0, sizeof(module_param));
57269 +
57270 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57271 +
57272 + va_start(args, fmt);
57273 + ret = ____request_module(wait, module_param, fmt, args);
57274 + va_end(args);
57275 +
57276 + return ret;
57277 + }
57278 +#endif
57279 +
57280 + va_start(args, fmt);
57281 + ret = ____request_module(wait, NULL, fmt, args);
57282 + va_end(args);
57283 +
57284 + return ret;
57285 +}
57286 +
57287 EXPORT_SYMBOL(__request_module);
57288 #endif /* CONFIG_MODULES */
57289
57290 diff -urNp linux-3.0.3/kernel/kprobes.c linux-3.0.3/kernel/kprobes.c
57291 --- linux-3.0.3/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
57292 +++ linux-3.0.3/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
57293 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57294 * kernel image and loaded module images reside. This is required
57295 * so x86_64 can correctly handle the %rip-relative fixups.
57296 */
57297 - kip->insns = module_alloc(PAGE_SIZE);
57298 + kip->insns = module_alloc_exec(PAGE_SIZE);
57299 if (!kip->insns) {
57300 kfree(kip);
57301 return NULL;
57302 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57303 */
57304 if (!list_is_singular(&kip->list)) {
57305 list_del(&kip->list);
57306 - module_free(NULL, kip->insns);
57307 + module_free_exec(NULL, kip->insns);
57308 kfree(kip);
57309 }
57310 return 1;
57311 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57312 {
57313 int i, err = 0;
57314 unsigned long offset = 0, size = 0;
57315 - char *modname, namebuf[128];
57316 + char *modname, namebuf[KSYM_NAME_LEN];
57317 const char *symbol_name;
57318 void *addr;
57319 struct kprobe_blackpoint *kb;
57320 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57321 const char *sym = NULL;
57322 unsigned int i = *(loff_t *) v;
57323 unsigned long offset = 0;
57324 - char *modname, namebuf[128];
57325 + char *modname, namebuf[KSYM_NAME_LEN];
57326
57327 head = &kprobe_table[i];
57328 preempt_disable();
57329 diff -urNp linux-3.0.3/kernel/lockdep.c linux-3.0.3/kernel/lockdep.c
57330 --- linux-3.0.3/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
57331 +++ linux-3.0.3/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
57332 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
57333 end = (unsigned long) &_end,
57334 addr = (unsigned long) obj;
57335
57336 +#ifdef CONFIG_PAX_KERNEXEC
57337 + start = ktla_ktva(start);
57338 +#endif
57339 +
57340 /*
57341 * static variable?
57342 */
57343 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
57344 if (!static_obj(lock->key)) {
57345 debug_locks_off();
57346 printk("INFO: trying to register non-static key.\n");
57347 + printk("lock:%pS key:%pS.\n", lock, lock->key);
57348 printk("the code is fine but needs lockdep annotation.\n");
57349 printk("turning off the locking correctness validator.\n");
57350 dump_stack();
57351 @@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
57352 if (!class)
57353 return 0;
57354 }
57355 - atomic_inc((atomic_t *)&class->ops);
57356 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57357 if (very_verbose(class)) {
57358 printk("\nacquire class [%p] %s", class->key, class->name);
57359 if (class->name_version > 1)
57360 diff -urNp linux-3.0.3/kernel/lockdep_proc.c linux-3.0.3/kernel/lockdep_proc.c
57361 --- linux-3.0.3/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
57362 +++ linux-3.0.3/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
57363 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57364
57365 static void print_name(struct seq_file *m, struct lock_class *class)
57366 {
57367 - char str[128];
57368 + char str[KSYM_NAME_LEN];
57369 const char *name = class->name;
57370
57371 if (!name) {
57372 diff -urNp linux-3.0.3/kernel/module.c linux-3.0.3/kernel/module.c
57373 --- linux-3.0.3/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
57374 +++ linux-3.0.3/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
57375 @@ -58,6 +58,7 @@
57376 #include <linux/jump_label.h>
57377 #include <linux/pfn.h>
57378 #include <linux/bsearch.h>
57379 +#include <linux/grsecurity.h>
57380
57381 #define CREATE_TRACE_POINTS
57382 #include <trace/events/module.h>
57383 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57384
57385 /* Bounds of module allocation, for speeding __module_address.
57386 * Protected by module_mutex. */
57387 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57388 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57389 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57390
57391 int register_module_notifier(struct notifier_block * nb)
57392 {
57393 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
57394 return true;
57395
57396 list_for_each_entry_rcu(mod, &modules, list) {
57397 - struct symsearch arr[] = {
57398 + struct symsearch modarr[] = {
57399 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57400 NOT_GPL_ONLY, false },
57401 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57402 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
57403 #endif
57404 };
57405
57406 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57407 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57408 return true;
57409 }
57410 return false;
57411 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
57412 static int percpu_modalloc(struct module *mod,
57413 unsigned long size, unsigned long align)
57414 {
57415 - if (align > PAGE_SIZE) {
57416 + if (align-1 >= PAGE_SIZE) {
57417 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57418 mod->name, align, PAGE_SIZE);
57419 align = PAGE_SIZE;
57420 @@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
57421 */
57422 #ifdef CONFIG_SYSFS
57423
57424 -#ifdef CONFIG_KALLSYMS
57425 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57426 static inline bool sect_empty(const Elf_Shdr *sect)
57427 {
57428 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57429 @@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
57430
57431 static void unset_module_core_ro_nx(struct module *mod)
57432 {
57433 - set_page_attributes(mod->module_core + mod->core_text_size,
57434 - mod->module_core + mod->core_size,
57435 + set_page_attributes(mod->module_core_rw,
57436 + mod->module_core_rw + mod->core_size_rw,
57437 set_memory_x);
57438 - set_page_attributes(mod->module_core,
57439 - mod->module_core + mod->core_ro_size,
57440 + set_page_attributes(mod->module_core_rx,
57441 + mod->module_core_rx + mod->core_size_rx,
57442 set_memory_rw);
57443 }
57444
57445 static void unset_module_init_ro_nx(struct module *mod)
57446 {
57447 - set_page_attributes(mod->module_init + mod->init_text_size,
57448 - mod->module_init + mod->init_size,
57449 + set_page_attributes(mod->module_init_rw,
57450 + mod->module_init_rw + mod->init_size_rw,
57451 set_memory_x);
57452 - set_page_attributes(mod->module_init,
57453 - mod->module_init + mod->init_ro_size,
57454 + set_page_attributes(mod->module_init_rx,
57455 + mod->module_init_rx + mod->init_size_rx,
57456 set_memory_rw);
57457 }
57458
57459 @@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
57460
57461 mutex_lock(&module_mutex);
57462 list_for_each_entry_rcu(mod, &modules, list) {
57463 - if ((mod->module_core) && (mod->core_text_size)) {
57464 - set_page_attributes(mod->module_core,
57465 - mod->module_core + mod->core_text_size,
57466 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57467 + set_page_attributes(mod->module_core_rx,
57468 + mod->module_core_rx + mod->core_size_rx,
57469 set_memory_rw);
57470 }
57471 - if ((mod->module_init) && (mod->init_text_size)) {
57472 - set_page_attributes(mod->module_init,
57473 - mod->module_init + mod->init_text_size,
57474 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57475 + set_page_attributes(mod->module_init_rx,
57476 + mod->module_init_rx + mod->init_size_rx,
57477 set_memory_rw);
57478 }
57479 }
57480 @@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
57481
57482 mutex_lock(&module_mutex);
57483 list_for_each_entry_rcu(mod, &modules, list) {
57484 - if ((mod->module_core) && (mod->core_text_size)) {
57485 - set_page_attributes(mod->module_core,
57486 - mod->module_core + mod->core_text_size,
57487 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57488 + set_page_attributes(mod->module_core_rx,
57489 + mod->module_core_rx + mod->core_size_rx,
57490 set_memory_ro);
57491 }
57492 - if ((mod->module_init) && (mod->init_text_size)) {
57493 - set_page_attributes(mod->module_init,
57494 - mod->module_init + mod->init_text_size,
57495 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57496 + set_page_attributes(mod->module_init_rx,
57497 + mod->module_init_rx + mod->init_size_rx,
57498 set_memory_ro);
57499 }
57500 }
57501 @@ -1722,16 +1724,19 @@ static void free_module(struct module *m
57502
57503 /* This may be NULL, but that's OK */
57504 unset_module_init_ro_nx(mod);
57505 - module_free(mod, mod->module_init);
57506 + module_free(mod, mod->module_init_rw);
57507 + module_free_exec(mod, mod->module_init_rx);
57508 kfree(mod->args);
57509 percpu_modfree(mod);
57510
57511 /* Free lock-classes: */
57512 - lockdep_free_key_range(mod->module_core, mod->core_size);
57513 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57514 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57515
57516 /* Finally, free the core (containing the module structure) */
57517 unset_module_core_ro_nx(mod);
57518 - module_free(mod, mod->module_core);
57519 + module_free_exec(mod, mod->module_core_rx);
57520 + module_free(mod, mod->module_core_rw);
57521
57522 #ifdef CONFIG_MPU
57523 update_protections(current->mm);
57524 @@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
57525 unsigned int i;
57526 int ret = 0;
57527 const struct kernel_symbol *ksym;
57528 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57529 + int is_fs_load = 0;
57530 + int register_filesystem_found = 0;
57531 + char *p;
57532 +
57533 + p = strstr(mod->args, "grsec_modharden_fs");
57534 + if (p) {
57535 + char *endptr = p + strlen("grsec_modharden_fs");
57536 + /* copy \0 as well */
57537 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57538 + is_fs_load = 1;
57539 + }
57540 +#endif
57541
57542 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57543 const char *name = info->strtab + sym[i].st_name;
57544
57545 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57546 + /* it's a real shame this will never get ripped and copied
57547 + upstream! ;(
57548 + */
57549 + if (is_fs_load && !strcmp(name, "register_filesystem"))
57550 + register_filesystem_found = 1;
57551 +#endif
57552 +
57553 switch (sym[i].st_shndx) {
57554 case SHN_COMMON:
57555 /* We compiled with -fno-common. These are not
57556 @@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
57557 ksym = resolve_symbol_wait(mod, info, name);
57558 /* Ok if resolved. */
57559 if (ksym && !IS_ERR(ksym)) {
57560 + pax_open_kernel();
57561 sym[i].st_value = ksym->value;
57562 + pax_close_kernel();
57563 break;
57564 }
57565
57566 @@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
57567 secbase = (unsigned long)mod_percpu(mod);
57568 else
57569 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57570 + pax_open_kernel();
57571 sym[i].st_value += secbase;
57572 + pax_close_kernel();
57573 break;
57574 }
57575 }
57576
57577 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57578 + if (is_fs_load && !register_filesystem_found) {
57579 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57580 + ret = -EPERM;
57581 + }
57582 +#endif
57583 +
57584 return ret;
57585 }
57586
57587 @@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
57588 || s->sh_entsize != ~0UL
57589 || strstarts(sname, ".init"))
57590 continue;
57591 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57592 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57593 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57594 + else
57595 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57596 DEBUGP("\t%s\n", name);
57597 }
57598 - switch (m) {
57599 - case 0: /* executable */
57600 - mod->core_size = debug_align(mod->core_size);
57601 - mod->core_text_size = mod->core_size;
57602 - break;
57603 - case 1: /* RO: text and ro-data */
57604 - mod->core_size = debug_align(mod->core_size);
57605 - mod->core_ro_size = mod->core_size;
57606 - break;
57607 - case 3: /* whole core */
57608 - mod->core_size = debug_align(mod->core_size);
57609 - break;
57610 - }
57611 }
57612
57613 DEBUGP("Init section allocation order:\n");
57614 @@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
57615 || s->sh_entsize != ~0UL
57616 || !strstarts(sname, ".init"))
57617 continue;
57618 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57619 - | INIT_OFFSET_MASK);
57620 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57621 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57622 + else
57623 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57624 + s->sh_entsize |= INIT_OFFSET_MASK;
57625 DEBUGP("\t%s\n", sname);
57626 }
57627 - switch (m) {
57628 - case 0: /* executable */
57629 - mod->init_size = debug_align(mod->init_size);
57630 - mod->init_text_size = mod->init_size;
57631 - break;
57632 - case 1: /* RO: text and ro-data */
57633 - mod->init_size = debug_align(mod->init_size);
57634 - mod->init_ro_size = mod->init_size;
57635 - break;
57636 - case 3: /* whole init */
57637 - mod->init_size = debug_align(mod->init_size);
57638 - break;
57639 - }
57640 }
57641 }
57642
57643 @@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
57644
57645 /* Put symbol section at end of init part of module. */
57646 symsect->sh_flags |= SHF_ALLOC;
57647 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57648 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57649 info->index.sym) | INIT_OFFSET_MASK;
57650 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57651
57652 @@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
57653 }
57654
57655 /* Append room for core symbols at end of core part. */
57656 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57657 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57658 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57659 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57660
57661 /* Put string table section at end of init part of module. */
57662 strsect->sh_flags |= SHF_ALLOC;
57663 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57664 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57665 info->index.str) | INIT_OFFSET_MASK;
57666 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57667
57668 /* Append room for core symbols' strings at end of core part. */
57669 - info->stroffs = mod->core_size;
57670 + info->stroffs = mod->core_size_rx;
57671 __set_bit(0, info->strmap);
57672 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57673 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57674 }
57675
57676 static void add_kallsyms(struct module *mod, const struct load_info *info)
57677 @@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
57678 /* Make sure we get permanent strtab: don't use info->strtab. */
57679 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57680
57681 + pax_open_kernel();
57682 +
57683 /* Set types up while we still have access to sections. */
57684 for (i = 0; i < mod->num_symtab; i++)
57685 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57686
57687 - mod->core_symtab = dst = mod->module_core + info->symoffs;
57688 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57689 src = mod->symtab;
57690 *dst = *src;
57691 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57692 @@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
57693 }
57694 mod->core_num_syms = ndst;
57695
57696 - mod->core_strtab = s = mod->module_core + info->stroffs;
57697 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57698 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57699 if (test_bit(i, info->strmap))
57700 *++s = mod->strtab[i];
57701 +
57702 + pax_close_kernel();
57703 }
57704 #else
57705 static inline void layout_symtab(struct module *mod, struct load_info *info)
57706 @@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
57707 ddebug_remove_module(debug->modname);
57708 }
57709
57710 -static void *module_alloc_update_bounds(unsigned long size)
57711 +static void *module_alloc_update_bounds_rw(unsigned long size)
57712 {
57713 void *ret = module_alloc(size);
57714
57715 if (ret) {
57716 mutex_lock(&module_mutex);
57717 /* Update module bounds. */
57718 - if ((unsigned long)ret < module_addr_min)
57719 - module_addr_min = (unsigned long)ret;
57720 - if ((unsigned long)ret + size > module_addr_max)
57721 - module_addr_max = (unsigned long)ret + size;
57722 + if ((unsigned long)ret < module_addr_min_rw)
57723 + module_addr_min_rw = (unsigned long)ret;
57724 + if ((unsigned long)ret + size > module_addr_max_rw)
57725 + module_addr_max_rw = (unsigned long)ret + size;
57726 + mutex_unlock(&module_mutex);
57727 + }
57728 + return ret;
57729 +}
57730 +
57731 +static void *module_alloc_update_bounds_rx(unsigned long size)
57732 +{
57733 + void *ret = module_alloc_exec(size);
57734 +
57735 + if (ret) {
57736 + mutex_lock(&module_mutex);
57737 + /* Update module bounds. */
57738 + if ((unsigned long)ret < module_addr_min_rx)
57739 + module_addr_min_rx = (unsigned long)ret;
57740 + if ((unsigned long)ret + size > module_addr_max_rx)
57741 + module_addr_max_rx = (unsigned long)ret + size;
57742 mutex_unlock(&module_mutex);
57743 }
57744 return ret;
57745 @@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
57746 void *ptr;
57747
57748 /* Do the allocs. */
57749 - ptr = module_alloc_update_bounds(mod->core_size);
57750 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57751 /*
57752 * The pointer to this block is stored in the module structure
57753 * which is inside the block. Just mark it as not being a
57754 @@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
57755 if (!ptr)
57756 return -ENOMEM;
57757
57758 - memset(ptr, 0, mod->core_size);
57759 - mod->module_core = ptr;
57760 + memset(ptr, 0, mod->core_size_rw);
57761 + mod->module_core_rw = ptr;
57762
57763 - ptr = module_alloc_update_bounds(mod->init_size);
57764 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
57765 /*
57766 * The pointer to this block is stored in the module structure
57767 * which is inside the block. This block doesn't need to be
57768 * scanned as it contains data and code that will be freed
57769 * after the module is initialized.
57770 */
57771 - kmemleak_ignore(ptr);
57772 - if (!ptr && mod->init_size) {
57773 - module_free(mod, mod->module_core);
57774 + kmemleak_not_leak(ptr);
57775 + if (!ptr && mod->init_size_rw) {
57776 + module_free(mod, mod->module_core_rw);
57777 return -ENOMEM;
57778 }
57779 - memset(ptr, 0, mod->init_size);
57780 - mod->module_init = ptr;
57781 + memset(ptr, 0, mod->init_size_rw);
57782 + mod->module_init_rw = ptr;
57783 +
57784 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
57785 + kmemleak_not_leak(ptr);
57786 + if (!ptr) {
57787 + module_free(mod, mod->module_init_rw);
57788 + module_free(mod, mod->module_core_rw);
57789 + return -ENOMEM;
57790 + }
57791 +
57792 + pax_open_kernel();
57793 + memset(ptr, 0, mod->core_size_rx);
57794 + pax_close_kernel();
57795 + mod->module_core_rx = ptr;
57796 +
57797 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
57798 + kmemleak_not_leak(ptr);
57799 + if (!ptr && mod->init_size_rx) {
57800 + module_free_exec(mod, mod->module_core_rx);
57801 + module_free(mod, mod->module_init_rw);
57802 + module_free(mod, mod->module_core_rw);
57803 + return -ENOMEM;
57804 + }
57805 +
57806 + pax_open_kernel();
57807 + memset(ptr, 0, mod->init_size_rx);
57808 + pax_close_kernel();
57809 + mod->module_init_rx = ptr;
57810
57811 /* Transfer each section which specifies SHF_ALLOC */
57812 DEBUGP("final section addresses:\n");
57813 @@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
57814 if (!(shdr->sh_flags & SHF_ALLOC))
57815 continue;
57816
57817 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
57818 - dest = mod->module_init
57819 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57820 - else
57821 - dest = mod->module_core + shdr->sh_entsize;
57822 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
57823 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57824 + dest = mod->module_init_rw
57825 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57826 + else
57827 + dest = mod->module_init_rx
57828 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57829 + } else {
57830 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57831 + dest = mod->module_core_rw + shdr->sh_entsize;
57832 + else
57833 + dest = mod->module_core_rx + shdr->sh_entsize;
57834 + }
57835 +
57836 + if (shdr->sh_type != SHT_NOBITS) {
57837 +
57838 +#ifdef CONFIG_PAX_KERNEXEC
57839 +#ifdef CONFIG_X86_64
57840 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
57841 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
57842 +#endif
57843 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
57844 + pax_open_kernel();
57845 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57846 + pax_close_kernel();
57847 + } else
57848 +#endif
57849
57850 - if (shdr->sh_type != SHT_NOBITS)
57851 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
57852 + }
57853 /* Update sh_addr to point to copy in image. */
57854 - shdr->sh_addr = (unsigned long)dest;
57855 +
57856 +#ifdef CONFIG_PAX_KERNEXEC
57857 + if (shdr->sh_flags & SHF_EXECINSTR)
57858 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
57859 + else
57860 +#endif
57861 +
57862 + shdr->sh_addr = (unsigned long)dest;
57863 DEBUGP("\t0x%lx %s\n",
57864 shdr->sh_addr, info->secstrings + shdr->sh_name);
57865 }
57866 @@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
57867 * Do it before processing of module parameters, so the module
57868 * can provide parameter accessor functions of its own.
57869 */
57870 - if (mod->module_init)
57871 - flush_icache_range((unsigned long)mod->module_init,
57872 - (unsigned long)mod->module_init
57873 - + mod->init_size);
57874 - flush_icache_range((unsigned long)mod->module_core,
57875 - (unsigned long)mod->module_core + mod->core_size);
57876 + if (mod->module_init_rx)
57877 + flush_icache_range((unsigned long)mod->module_init_rx,
57878 + (unsigned long)mod->module_init_rx
57879 + + mod->init_size_rx);
57880 + flush_icache_range((unsigned long)mod->module_core_rx,
57881 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
57882
57883 set_fs(old_fs);
57884 }
57885 @@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
57886 {
57887 kfree(info->strmap);
57888 percpu_modfree(mod);
57889 - module_free(mod, mod->module_init);
57890 - module_free(mod, mod->module_core);
57891 + module_free_exec(mod, mod->module_init_rx);
57892 + module_free_exec(mod, mod->module_core_rx);
57893 + module_free(mod, mod->module_init_rw);
57894 + module_free(mod, mod->module_core_rw);
57895 }
57896
57897 static int post_relocation(struct module *mod, const struct load_info *info)
57898 @@ -2770,9 +2865,38 @@ static struct module *load_module(void _
57899 if (err)
57900 goto free_unload;
57901
57902 + /* Now copy in args */
57903 + mod->args = strndup_user(uargs, ~0UL >> 1);
57904 + if (IS_ERR(mod->args)) {
57905 + err = PTR_ERR(mod->args);
57906 + goto free_unload;
57907 + }
57908 +
57909 /* Set up MODINFO_ATTR fields */
57910 setup_modinfo(mod, &info);
57911
57912 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57913 + {
57914 + char *p, *p2;
57915 +
57916 + if (strstr(mod->args, "grsec_modharden_netdev")) {
57917 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
57918 + err = -EPERM;
57919 + goto free_modinfo;
57920 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
57921 + p += strlen("grsec_modharden_normal");
57922 + p2 = strstr(p, "_");
57923 + if (p2) {
57924 + *p2 = '\0';
57925 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
57926 + *p2 = '_';
57927 + }
57928 + err = -EPERM;
57929 + goto free_modinfo;
57930 + }
57931 + }
57932 +#endif
57933 +
57934 /* Fix up syms, so that st_value is a pointer to location. */
57935 err = simplify_symbols(mod, &info);
57936 if (err < 0)
57937 @@ -2788,13 +2912,6 @@ static struct module *load_module(void _
57938
57939 flush_module_icache(mod);
57940
57941 - /* Now copy in args */
57942 - mod->args = strndup_user(uargs, ~0UL >> 1);
57943 - if (IS_ERR(mod->args)) {
57944 - err = PTR_ERR(mod->args);
57945 - goto free_arch_cleanup;
57946 - }
57947 -
57948 /* Mark state as coming so strong_try_module_get() ignores us. */
57949 mod->state = MODULE_STATE_COMING;
57950
57951 @@ -2854,11 +2971,10 @@ static struct module *load_module(void _
57952 unlock:
57953 mutex_unlock(&module_mutex);
57954 synchronize_sched();
57955 - kfree(mod->args);
57956 - free_arch_cleanup:
57957 module_arch_cleanup(mod);
57958 free_modinfo:
57959 free_modinfo(mod);
57960 + kfree(mod->args);
57961 free_unload:
57962 module_unload_free(mod);
57963 free_module:
57964 @@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
57965 MODULE_STATE_COMING, mod);
57966
57967 /* Set RO and NX regions for core */
57968 - set_section_ro_nx(mod->module_core,
57969 - mod->core_text_size,
57970 - mod->core_ro_size,
57971 - mod->core_size);
57972 + set_section_ro_nx(mod->module_core_rx,
57973 + mod->core_size_rx,
57974 + mod->core_size_rx,
57975 + mod->core_size_rx);
57976
57977 /* Set RO and NX regions for init */
57978 - set_section_ro_nx(mod->module_init,
57979 - mod->init_text_size,
57980 - mod->init_ro_size,
57981 - mod->init_size);
57982 + set_section_ro_nx(mod->module_init_rx,
57983 + mod->init_size_rx,
57984 + mod->init_size_rx,
57985 + mod->init_size_rx);
57986
57987 do_mod_ctors(mod);
57988 /* Start the module */
57989 @@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
57990 mod->strtab = mod->core_strtab;
57991 #endif
57992 unset_module_init_ro_nx(mod);
57993 - module_free(mod, mod->module_init);
57994 - mod->module_init = NULL;
57995 - mod->init_size = 0;
57996 - mod->init_ro_size = 0;
57997 - mod->init_text_size = 0;
57998 + module_free(mod, mod->module_init_rw);
57999 + module_free_exec(mod, mod->module_init_rx);
58000 + mod->module_init_rw = NULL;
58001 + mod->module_init_rx = NULL;
58002 + mod->init_size_rw = 0;
58003 + mod->init_size_rx = 0;
58004 mutex_unlock(&module_mutex);
58005
58006 return 0;
58007 @@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
58008 unsigned long nextval;
58009
58010 /* At worse, next value is at end of module */
58011 - if (within_module_init(addr, mod))
58012 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
58013 + if (within_module_init_rx(addr, mod))
58014 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58015 + else if (within_module_init_rw(addr, mod))
58016 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58017 + else if (within_module_core_rx(addr, mod))
58018 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58019 + else if (within_module_core_rw(addr, mod))
58020 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58021 else
58022 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
58023 + return NULL;
58024
58025 /* Scan for closest preceding symbol, and next symbol. (ELF
58026 starts real symbols at 1). */
58027 @@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
58028 char buf[8];
58029
58030 seq_printf(m, "%s %u",
58031 - mod->name, mod->init_size + mod->core_size);
58032 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58033 print_unload_info(m, mod);
58034
58035 /* Informative for users. */
58036 @@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
58037 mod->state == MODULE_STATE_COMING ? "Loading":
58038 "Live");
58039 /* Used by oprofile and other similar tools. */
58040 - seq_printf(m, " 0x%pK", mod->module_core);
58041 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58042
58043 /* Taints info */
58044 if (mod->taints)
58045 @@ -3283,7 +3406,17 @@ static const struct file_operations proc
58046
58047 static int __init proc_modules_init(void)
58048 {
58049 +#ifndef CONFIG_GRKERNSEC_HIDESYM
58050 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58051 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58052 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58053 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58054 +#else
58055 proc_create("modules", 0, NULL, &proc_modules_operations);
58056 +#endif
58057 +#else
58058 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58059 +#endif
58060 return 0;
58061 }
58062 module_init(proc_modules_init);
58063 @@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
58064 {
58065 struct module *mod;
58066
58067 - if (addr < module_addr_min || addr > module_addr_max)
58068 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58069 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
58070 return NULL;
58071
58072 list_for_each_entry_rcu(mod, &modules, list)
58073 - if (within_module_core(addr, mod)
58074 - || within_module_init(addr, mod))
58075 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
58076 return mod;
58077 return NULL;
58078 }
58079 @@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
58080 */
58081 struct module *__module_text_address(unsigned long addr)
58082 {
58083 - struct module *mod = __module_address(addr);
58084 + struct module *mod;
58085 +
58086 +#ifdef CONFIG_X86_32
58087 + addr = ktla_ktva(addr);
58088 +#endif
58089 +
58090 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58091 + return NULL;
58092 +
58093 + mod = __module_address(addr);
58094 +
58095 if (mod) {
58096 /* Make sure it's within the text section. */
58097 - if (!within(addr, mod->module_init, mod->init_text_size)
58098 - && !within(addr, mod->module_core, mod->core_text_size))
58099 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58100 mod = NULL;
58101 }
58102 return mod;
58103 diff -urNp linux-3.0.3/kernel/mutex.c linux-3.0.3/kernel/mutex.c
58104 --- linux-3.0.3/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
58105 +++ linux-3.0.3/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
58106 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
58107 spin_lock_mutex(&lock->wait_lock, flags);
58108
58109 debug_mutex_lock_common(lock, &waiter);
58110 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58111 + debug_mutex_add_waiter(lock, &waiter, task);
58112
58113 /* add waiting tasks to the end of the waitqueue (FIFO): */
58114 list_add_tail(&waiter.list, &lock->wait_list);
58115 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
58116 * TASK_UNINTERRUPTIBLE case.)
58117 */
58118 if (unlikely(signal_pending_state(state, task))) {
58119 - mutex_remove_waiter(lock, &waiter,
58120 - task_thread_info(task));
58121 + mutex_remove_waiter(lock, &waiter, task);
58122 mutex_release(&lock->dep_map, 1, ip);
58123 spin_unlock_mutex(&lock->wait_lock, flags);
58124
58125 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
58126 done:
58127 lock_acquired(&lock->dep_map, ip);
58128 /* got the lock - rejoice! */
58129 - mutex_remove_waiter(lock, &waiter, current_thread_info());
58130 + mutex_remove_waiter(lock, &waiter, task);
58131 mutex_set_owner(lock);
58132
58133 /* set it to 0 if there are no waiters left: */
58134 diff -urNp linux-3.0.3/kernel/mutex-debug.c linux-3.0.3/kernel/mutex-debug.c
58135 --- linux-3.0.3/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
58136 +++ linux-3.0.3/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
58137 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58138 }
58139
58140 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58141 - struct thread_info *ti)
58142 + struct task_struct *task)
58143 {
58144 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58145
58146 /* Mark the current thread as blocked on the lock: */
58147 - ti->task->blocked_on = waiter;
58148 + task->blocked_on = waiter;
58149 }
58150
58151 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58152 - struct thread_info *ti)
58153 + struct task_struct *task)
58154 {
58155 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58156 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58157 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58158 - ti->task->blocked_on = NULL;
58159 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
58160 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58161 + task->blocked_on = NULL;
58162
58163 list_del_init(&waiter->list);
58164 waiter->task = NULL;
58165 diff -urNp linux-3.0.3/kernel/mutex-debug.h linux-3.0.3/kernel/mutex-debug.h
58166 --- linux-3.0.3/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
58167 +++ linux-3.0.3/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
58168 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
58169 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58170 extern void debug_mutex_add_waiter(struct mutex *lock,
58171 struct mutex_waiter *waiter,
58172 - struct thread_info *ti);
58173 + struct task_struct *task);
58174 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58175 - struct thread_info *ti);
58176 + struct task_struct *task);
58177 extern void debug_mutex_unlock(struct mutex *lock);
58178 extern void debug_mutex_init(struct mutex *lock, const char *name,
58179 struct lock_class_key *key);
58180 diff -urNp linux-3.0.3/kernel/padata.c linux-3.0.3/kernel/padata.c
58181 --- linux-3.0.3/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
58182 +++ linux-3.0.3/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
58183 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58184 padata->pd = pd;
58185 padata->cb_cpu = cb_cpu;
58186
58187 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58188 - atomic_set(&pd->seq_nr, -1);
58189 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58190 + atomic_set_unchecked(&pd->seq_nr, -1);
58191
58192 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58193 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58194
58195 target_cpu = padata_cpu_hash(padata);
58196 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58197 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58198 padata_init_pqueues(pd);
58199 padata_init_squeues(pd);
58200 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58201 - atomic_set(&pd->seq_nr, -1);
58202 + atomic_set_unchecked(&pd->seq_nr, -1);
58203 atomic_set(&pd->reorder_objects, 0);
58204 atomic_set(&pd->refcnt, 0);
58205 pd->pinst = pinst;
58206 diff -urNp linux-3.0.3/kernel/panic.c linux-3.0.3/kernel/panic.c
58207 --- linux-3.0.3/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
58208 +++ linux-3.0.3/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
58209 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58210 const char *board;
58211
58212 printk(KERN_WARNING "------------[ cut here ]------------\n");
58213 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58214 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58215 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58216 if (board)
58217 printk(KERN_WARNING "Hardware name: %s\n", board);
58218 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58219 */
58220 void __stack_chk_fail(void)
58221 {
58222 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
58223 + dump_stack();
58224 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58225 __builtin_return_address(0));
58226 }
58227 EXPORT_SYMBOL(__stack_chk_fail);
58228 diff -urNp linux-3.0.3/kernel/pid.c linux-3.0.3/kernel/pid.c
58229 --- linux-3.0.3/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
58230 +++ linux-3.0.3/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
58231 @@ -33,6 +33,7 @@
58232 #include <linux/rculist.h>
58233 #include <linux/bootmem.h>
58234 #include <linux/hash.h>
58235 +#include <linux/security.h>
58236 #include <linux/pid_namespace.h>
58237 #include <linux/init_task.h>
58238 #include <linux/syscalls.h>
58239 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58240
58241 int pid_max = PID_MAX_DEFAULT;
58242
58243 -#define RESERVED_PIDS 300
58244 +#define RESERVED_PIDS 500
58245
58246 int pid_max_min = RESERVED_PIDS + 1;
58247 int pid_max_max = PID_MAX_LIMIT;
58248 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58249 */
58250 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58251 {
58252 + struct task_struct *task;
58253 +
58254 rcu_lockdep_assert(rcu_read_lock_held());
58255 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58256 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58257 +
58258 + if (gr_pid_is_chrooted(task))
58259 + return NULL;
58260 +
58261 + return task;
58262 }
58263
58264 struct task_struct *find_task_by_vpid(pid_t vnr)
58265 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58266 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58267 }
58268
58269 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58270 +{
58271 + rcu_lockdep_assert(rcu_read_lock_held());
58272 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58273 +}
58274 +
58275 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58276 {
58277 struct pid *pid;
58278 diff -urNp linux-3.0.3/kernel/posix-cpu-timers.c linux-3.0.3/kernel/posix-cpu-timers.c
58279 --- linux-3.0.3/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
58280 +++ linux-3.0.3/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
58281 @@ -6,6 +6,7 @@
58282 #include <linux/posix-timers.h>
58283 #include <linux/errno.h>
58284 #include <linux/math64.h>
58285 +#include <linux/security.h>
58286 #include <asm/uaccess.h>
58287 #include <linux/kernel_stat.h>
58288 #include <trace/events/timer.h>
58289 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58290
58291 static __init int init_posix_cpu_timers(void)
58292 {
58293 - struct k_clock process = {
58294 + static struct k_clock process = {
58295 .clock_getres = process_cpu_clock_getres,
58296 .clock_get = process_cpu_clock_get,
58297 .timer_create = process_cpu_timer_create,
58298 .nsleep = process_cpu_nsleep,
58299 .nsleep_restart = process_cpu_nsleep_restart,
58300 };
58301 - struct k_clock thread = {
58302 + static struct k_clock thread = {
58303 .clock_getres = thread_cpu_clock_getres,
58304 .clock_get = thread_cpu_clock_get,
58305 .timer_create = thread_cpu_timer_create,
58306 diff -urNp linux-3.0.3/kernel/posix-timers.c linux-3.0.3/kernel/posix-timers.c
58307 --- linux-3.0.3/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
58308 +++ linux-3.0.3/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
58309 @@ -43,6 +43,7 @@
58310 #include <linux/idr.h>
58311 #include <linux/posix-clock.h>
58312 #include <linux/posix-timers.h>
58313 +#include <linux/grsecurity.h>
58314 #include <linux/syscalls.h>
58315 #include <linux/wait.h>
58316 #include <linux/workqueue.h>
58317 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58318 * which we beg off on and pass to do_sys_settimeofday().
58319 */
58320
58321 -static struct k_clock posix_clocks[MAX_CLOCKS];
58322 +static struct k_clock *posix_clocks[MAX_CLOCKS];
58323
58324 /*
58325 * These ones are defined below.
58326 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58327 */
58328 static __init int init_posix_timers(void)
58329 {
58330 - struct k_clock clock_realtime = {
58331 + static struct k_clock clock_realtime = {
58332 .clock_getres = hrtimer_get_res,
58333 .clock_get = posix_clock_realtime_get,
58334 .clock_set = posix_clock_realtime_set,
58335 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58336 .timer_get = common_timer_get,
58337 .timer_del = common_timer_del,
58338 };
58339 - struct k_clock clock_monotonic = {
58340 + static struct k_clock clock_monotonic = {
58341 .clock_getres = hrtimer_get_res,
58342 .clock_get = posix_ktime_get_ts,
58343 .nsleep = common_nsleep,
58344 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58345 .timer_get = common_timer_get,
58346 .timer_del = common_timer_del,
58347 };
58348 - struct k_clock clock_monotonic_raw = {
58349 + static struct k_clock clock_monotonic_raw = {
58350 .clock_getres = hrtimer_get_res,
58351 .clock_get = posix_get_monotonic_raw,
58352 };
58353 - struct k_clock clock_realtime_coarse = {
58354 + static struct k_clock clock_realtime_coarse = {
58355 .clock_getres = posix_get_coarse_res,
58356 .clock_get = posix_get_realtime_coarse,
58357 };
58358 - struct k_clock clock_monotonic_coarse = {
58359 + static struct k_clock clock_monotonic_coarse = {
58360 .clock_getres = posix_get_coarse_res,
58361 .clock_get = posix_get_monotonic_coarse,
58362 };
58363 - struct k_clock clock_boottime = {
58364 + static struct k_clock clock_boottime = {
58365 .clock_getres = hrtimer_get_res,
58366 .clock_get = posix_get_boottime,
58367 .nsleep = common_nsleep,
58368 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58369 .timer_del = common_timer_del,
58370 };
58371
58372 + pax_track_stack();
58373 +
58374 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58375 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58376 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58377 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58378 return;
58379 }
58380
58381 - posix_clocks[clock_id] = *new_clock;
58382 + posix_clocks[clock_id] = new_clock;
58383 }
58384 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58385
58386 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
58387 return (id & CLOCKFD_MASK) == CLOCKFD ?
58388 &clock_posix_dynamic : &clock_posix_cpu;
58389
58390 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58391 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58392 return NULL;
58393 - return &posix_clocks[id];
58394 + return posix_clocks[id];
58395 }
58396
58397 static int common_timer_create(struct k_itimer *new_timer)
58398 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58399 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58400 return -EFAULT;
58401
58402 + /* only the CLOCK_REALTIME clock can be set, all other clocks
58403 + have their clock_set fptr set to a nosettime dummy function
58404 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58405 + call common_clock_set, which calls do_sys_settimeofday, which
58406 + we hook
58407 + */
58408 +
58409 return kc->clock_set(which_clock, &new_tp);
58410 }
58411
58412 diff -urNp linux-3.0.3/kernel/power/poweroff.c linux-3.0.3/kernel/power/poweroff.c
58413 --- linux-3.0.3/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
58414 +++ linux-3.0.3/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
58415 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58416 .enable_mask = SYSRQ_ENABLE_BOOT,
58417 };
58418
58419 -static int pm_sysrq_init(void)
58420 +static int __init pm_sysrq_init(void)
58421 {
58422 register_sysrq_key('o', &sysrq_poweroff_op);
58423 return 0;
58424 diff -urNp linux-3.0.3/kernel/power/process.c linux-3.0.3/kernel/power/process.c
58425 --- linux-3.0.3/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
58426 +++ linux-3.0.3/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
58427 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58428 u64 elapsed_csecs64;
58429 unsigned int elapsed_csecs;
58430 bool wakeup = false;
58431 + bool timedout = false;
58432
58433 do_gettimeofday(&start);
58434
58435 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58436
58437 while (true) {
58438 todo = 0;
58439 + if (time_after(jiffies, end_time))
58440 + timedout = true;
58441 read_lock(&tasklist_lock);
58442 do_each_thread(g, p) {
58443 if (frozen(p) || !freezable(p))
58444 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58445 * try_to_stop() after schedule() in ptrace/signal
58446 * stop sees TIF_FREEZE.
58447 */
58448 - if (!task_is_stopped_or_traced(p) &&
58449 - !freezer_should_skip(p))
58450 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58451 todo++;
58452 + if (timedout) {
58453 + printk(KERN_ERR "Task refusing to freeze:\n");
58454 + sched_show_task(p);
58455 + }
58456 + }
58457 } while_each_thread(g, p);
58458 read_unlock(&tasklist_lock);
58459
58460 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58461 todo += wq_busy;
58462 }
58463
58464 - if (!todo || time_after(jiffies, end_time))
58465 + if (!todo || timedout)
58466 break;
58467
58468 if (pm_wakeup_pending()) {
58469 diff -urNp linux-3.0.3/kernel/printk.c linux-3.0.3/kernel/printk.c
58470 --- linux-3.0.3/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
58471 +++ linux-3.0.3/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
58472 @@ -313,12 +313,17 @@ static int check_syslog_permissions(int
58473 if (from_file && type != SYSLOG_ACTION_OPEN)
58474 return 0;
58475
58476 +#ifdef CONFIG_GRKERNSEC_DMESG
58477 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58478 + return -EPERM;
58479 +#endif
58480 +
58481 if (syslog_action_restricted(type)) {
58482 if (capable(CAP_SYSLOG))
58483 return 0;
58484 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58485 if (capable(CAP_SYS_ADMIN)) {
58486 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58487 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58488 "but no CAP_SYSLOG (deprecated).\n");
58489 return 0;
58490 }
58491 diff -urNp linux-3.0.3/kernel/profile.c linux-3.0.3/kernel/profile.c
58492 --- linux-3.0.3/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
58493 +++ linux-3.0.3/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
58494 @@ -39,7 +39,7 @@ struct profile_hit {
58495 /* Oprofile timer tick hook */
58496 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58497
58498 -static atomic_t *prof_buffer;
58499 +static atomic_unchecked_t *prof_buffer;
58500 static unsigned long prof_len, prof_shift;
58501
58502 int prof_on __read_mostly;
58503 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
58504 hits[i].pc = 0;
58505 continue;
58506 }
58507 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58508 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58509 hits[i].hits = hits[i].pc = 0;
58510 }
58511 }
58512 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
58513 * Add the current hit(s) and flush the write-queue out
58514 * to the global buffer:
58515 */
58516 - atomic_add(nr_hits, &prof_buffer[pc]);
58517 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58518 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58519 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58520 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58521 hits[i].pc = hits[i].hits = 0;
58522 }
58523 out:
58524 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
58525 {
58526 unsigned long pc;
58527 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58528 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58529 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58530 }
58531 #endif /* !CONFIG_SMP */
58532
58533 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58534 return -EFAULT;
58535 buf++; p++; count--; read++;
58536 }
58537 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58538 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58539 if (copy_to_user(buf, (void *)pnt, count))
58540 return -EFAULT;
58541 read += count;
58542 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58543 }
58544 #endif
58545 profile_discard_flip_buffers();
58546 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58547 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58548 return count;
58549 }
58550
58551 diff -urNp linux-3.0.3/kernel/ptrace.c linux-3.0.3/kernel/ptrace.c
58552 --- linux-3.0.3/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
58553 +++ linux-3.0.3/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
58554 @@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
58555 return ret;
58556 }
58557
58558 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58559 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58560 + unsigned int log)
58561 {
58562 const struct cred *cred = current_cred(), *tcred;
58563
58564 @@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
58565 cred->gid == tcred->sgid &&
58566 cred->gid == tcred->gid))
58567 goto ok;
58568 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58569 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58570 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58571 goto ok;
58572 rcu_read_unlock();
58573 return -EPERM;
58574 @@ -167,7 +169,9 @@ ok:
58575 smp_rmb();
58576 if (task->mm)
58577 dumpable = get_dumpable(task->mm);
58578 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58579 + if (!dumpable &&
58580 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58581 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58582 return -EPERM;
58583
58584 return security_ptrace_access_check(task, mode);
58585 @@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
58586 {
58587 int err;
58588 task_lock(task);
58589 - err = __ptrace_may_access(task, mode);
58590 + err = __ptrace_may_access(task, mode, 0);
58591 + task_unlock(task);
58592 + return !err;
58593 +}
58594 +
58595 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58596 +{
58597 + int err;
58598 + task_lock(task);
58599 + err = __ptrace_may_access(task, mode, 1);
58600 task_unlock(task);
58601 return !err;
58602 }
58603 @@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
58604 goto out;
58605
58606 task_lock(task);
58607 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58608 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58609 task_unlock(task);
58610 if (retval)
58611 goto unlock_creds;
58612 @@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
58613 goto unlock_tasklist;
58614
58615 task->ptrace = PT_PTRACED;
58616 - if (task_ns_capable(task, CAP_SYS_PTRACE))
58617 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58618 task->ptrace |= PT_PTRACE_CAP;
58619
58620 __ptrace_link(task, current);
58621 @@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
58622 {
58623 int copied = 0;
58624
58625 + pax_track_stack();
58626 +
58627 while (len > 0) {
58628 char buf[128];
58629 int this_len, retval;
58630 @@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
58631 break;
58632 return -EIO;
58633 }
58634 - if (copy_to_user(dst, buf, retval))
58635 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
58636 return -EFAULT;
58637 copied += retval;
58638 src += retval;
58639 @@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
58640 {
58641 int copied = 0;
58642
58643 + pax_track_stack();
58644 +
58645 while (len > 0) {
58646 char buf[128];
58647 int this_len, retval;
58648 @@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
58649 {
58650 int ret = -EIO;
58651 siginfo_t siginfo;
58652 - void __user *datavp = (void __user *) data;
58653 + void __user *datavp = (__force void __user *) data;
58654 unsigned long __user *datalp = datavp;
58655
58656 + pax_track_stack();
58657 +
58658 switch (request) {
58659 case PTRACE_PEEKTEXT:
58660 case PTRACE_PEEKDATA:
58661 @@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
58662 goto out;
58663 }
58664
58665 + if (gr_handle_ptrace(child, request)) {
58666 + ret = -EPERM;
58667 + goto out_put_task_struct;
58668 + }
58669 +
58670 if (request == PTRACE_ATTACH) {
58671 ret = ptrace_attach(child);
58672 /*
58673 * Some architectures need to do book-keeping after
58674 * a ptrace attach.
58675 */
58676 - if (!ret)
58677 + if (!ret) {
58678 arch_ptrace_attach(child);
58679 + gr_audit_ptrace(child);
58680 + }
58681 goto out_put_task_struct;
58682 }
58683
58684 @@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
58685 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
58686 if (copied != sizeof(tmp))
58687 return -EIO;
58688 - return put_user(tmp, (unsigned long __user *)data);
58689 + return put_user(tmp, (__force unsigned long __user *)data);
58690 }
58691
58692 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
58693 @@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
58694 siginfo_t siginfo;
58695 int ret;
58696
58697 + pax_track_stack();
58698 +
58699 switch (request) {
58700 case PTRACE_PEEKTEXT:
58701 case PTRACE_PEEKDATA:
58702 @@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
58703 goto out;
58704 }
58705
58706 + if (gr_handle_ptrace(child, request)) {
58707 + ret = -EPERM;
58708 + goto out_put_task_struct;
58709 + }
58710 +
58711 if (request == PTRACE_ATTACH) {
58712 ret = ptrace_attach(child);
58713 /*
58714 * Some architectures need to do book-keeping after
58715 * a ptrace attach.
58716 */
58717 - if (!ret)
58718 + if (!ret) {
58719 arch_ptrace_attach(child);
58720 + gr_audit_ptrace(child);
58721 + }
58722 goto out_put_task_struct;
58723 }
58724
58725 diff -urNp linux-3.0.3/kernel/rcutorture.c linux-3.0.3/kernel/rcutorture.c
58726 --- linux-3.0.3/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
58727 +++ linux-3.0.3/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
58728 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
58729 { 0 };
58730 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
58731 { 0 };
58732 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58733 -static atomic_t n_rcu_torture_alloc;
58734 -static atomic_t n_rcu_torture_alloc_fail;
58735 -static atomic_t n_rcu_torture_free;
58736 -static atomic_t n_rcu_torture_mberror;
58737 -static atomic_t n_rcu_torture_error;
58738 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
58739 +static atomic_unchecked_t n_rcu_torture_alloc;
58740 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
58741 +static atomic_unchecked_t n_rcu_torture_free;
58742 +static atomic_unchecked_t n_rcu_torture_mberror;
58743 +static atomic_unchecked_t n_rcu_torture_error;
58744 static long n_rcu_torture_boost_ktrerror;
58745 static long n_rcu_torture_boost_rterror;
58746 static long n_rcu_torture_boost_failure;
58747 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
58748
58749 spin_lock_bh(&rcu_torture_lock);
58750 if (list_empty(&rcu_torture_freelist)) {
58751 - atomic_inc(&n_rcu_torture_alloc_fail);
58752 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
58753 spin_unlock_bh(&rcu_torture_lock);
58754 return NULL;
58755 }
58756 - atomic_inc(&n_rcu_torture_alloc);
58757 + atomic_inc_unchecked(&n_rcu_torture_alloc);
58758 p = rcu_torture_freelist.next;
58759 list_del_init(p);
58760 spin_unlock_bh(&rcu_torture_lock);
58761 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
58762 static void
58763 rcu_torture_free(struct rcu_torture *p)
58764 {
58765 - atomic_inc(&n_rcu_torture_free);
58766 + atomic_inc_unchecked(&n_rcu_torture_free);
58767 spin_lock_bh(&rcu_torture_lock);
58768 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
58769 spin_unlock_bh(&rcu_torture_lock);
58770 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
58771 i = rp->rtort_pipe_count;
58772 if (i > RCU_TORTURE_PIPE_LEN)
58773 i = RCU_TORTURE_PIPE_LEN;
58774 - atomic_inc(&rcu_torture_wcount[i]);
58775 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58776 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58777 rp->rtort_mbtest = 0;
58778 rcu_torture_free(rp);
58779 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
58780 i = rp->rtort_pipe_count;
58781 if (i > RCU_TORTURE_PIPE_LEN)
58782 i = RCU_TORTURE_PIPE_LEN;
58783 - atomic_inc(&rcu_torture_wcount[i]);
58784 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58785 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
58786 rp->rtort_mbtest = 0;
58787 list_del(&rp->rtort_free);
58788 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
58789 i = old_rp->rtort_pipe_count;
58790 if (i > RCU_TORTURE_PIPE_LEN)
58791 i = RCU_TORTURE_PIPE_LEN;
58792 - atomic_inc(&rcu_torture_wcount[i]);
58793 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
58794 old_rp->rtort_pipe_count++;
58795 cur_ops->deferred_free(old_rp);
58796 }
58797 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
58798 return;
58799 }
58800 if (p->rtort_mbtest == 0)
58801 - atomic_inc(&n_rcu_torture_mberror);
58802 + atomic_inc_unchecked(&n_rcu_torture_mberror);
58803 spin_lock(&rand_lock);
58804 cur_ops->read_delay(&rand);
58805 n_rcu_torture_timers++;
58806 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
58807 continue;
58808 }
58809 if (p->rtort_mbtest == 0)
58810 - atomic_inc(&n_rcu_torture_mberror);
58811 + atomic_inc_unchecked(&n_rcu_torture_mberror);
58812 cur_ops->read_delay(&rand);
58813 preempt_disable();
58814 pipe_count = p->rtort_pipe_count;
58815 @@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
58816 rcu_torture_current,
58817 rcu_torture_current_version,
58818 list_empty(&rcu_torture_freelist),
58819 - atomic_read(&n_rcu_torture_alloc),
58820 - atomic_read(&n_rcu_torture_alloc_fail),
58821 - atomic_read(&n_rcu_torture_free),
58822 - atomic_read(&n_rcu_torture_mberror),
58823 + atomic_read_unchecked(&n_rcu_torture_alloc),
58824 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
58825 + atomic_read_unchecked(&n_rcu_torture_free),
58826 + atomic_read_unchecked(&n_rcu_torture_mberror),
58827 n_rcu_torture_boost_ktrerror,
58828 n_rcu_torture_boost_rterror,
58829 n_rcu_torture_boost_failure,
58830 n_rcu_torture_boosts,
58831 n_rcu_torture_timers);
58832 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
58833 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
58834 n_rcu_torture_boost_ktrerror != 0 ||
58835 n_rcu_torture_boost_rterror != 0 ||
58836 n_rcu_torture_boost_failure != 0)
58837 @@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
58838 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
58839 if (i > 1) {
58840 cnt += sprintf(&page[cnt], "!!! ");
58841 - atomic_inc(&n_rcu_torture_error);
58842 + atomic_inc_unchecked(&n_rcu_torture_error);
58843 WARN_ON_ONCE(1);
58844 }
58845 cnt += sprintf(&page[cnt], "Reader Pipe: ");
58846 @@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
58847 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
58848 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58849 cnt += sprintf(&page[cnt], " %d",
58850 - atomic_read(&rcu_torture_wcount[i]));
58851 + atomic_read_unchecked(&rcu_torture_wcount[i]));
58852 }
58853 cnt += sprintf(&page[cnt], "\n");
58854 if (cur_ops->stats)
58855 @@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
58856
58857 if (cur_ops->cleanup)
58858 cur_ops->cleanup();
58859 - if (atomic_read(&n_rcu_torture_error))
58860 + if (atomic_read_unchecked(&n_rcu_torture_error))
58861 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
58862 else
58863 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
58864 @@ -1476,17 +1476,17 @@ rcu_torture_init(void)
58865
58866 rcu_torture_current = NULL;
58867 rcu_torture_current_version = 0;
58868 - atomic_set(&n_rcu_torture_alloc, 0);
58869 - atomic_set(&n_rcu_torture_alloc_fail, 0);
58870 - atomic_set(&n_rcu_torture_free, 0);
58871 - atomic_set(&n_rcu_torture_mberror, 0);
58872 - atomic_set(&n_rcu_torture_error, 0);
58873 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
58874 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
58875 + atomic_set_unchecked(&n_rcu_torture_free, 0);
58876 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
58877 + atomic_set_unchecked(&n_rcu_torture_error, 0);
58878 n_rcu_torture_boost_ktrerror = 0;
58879 n_rcu_torture_boost_rterror = 0;
58880 n_rcu_torture_boost_failure = 0;
58881 n_rcu_torture_boosts = 0;
58882 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
58883 - atomic_set(&rcu_torture_wcount[i], 0);
58884 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
58885 for_each_possible_cpu(cpu) {
58886 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
58887 per_cpu(rcu_torture_count, cpu)[i] = 0;
58888 diff -urNp linux-3.0.3/kernel/rcutree.c linux-3.0.3/kernel/rcutree.c
58889 --- linux-3.0.3/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
58890 +++ linux-3.0.3/kernel/rcutree.c 2011-08-23 21:47:56.000000000 -0400
58891 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
58892 /*
58893 * Do softirq processing for the current CPU.
58894 */
58895 -static void rcu_process_callbacks(struct softirq_action *unused)
58896 +static void rcu_process_callbacks(void)
58897 {
58898 __rcu_process_callbacks(&rcu_sched_state,
58899 &__get_cpu_var(rcu_sched_data));
58900 diff -urNp linux-3.0.3/kernel/rcutree_plugin.h linux-3.0.3/kernel/rcutree_plugin.h
58901 --- linux-3.0.3/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
58902 +++ linux-3.0.3/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
58903 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
58904
58905 /* Clean up and exit. */
58906 smp_mb(); /* ensure expedited GP seen before counter increment. */
58907 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
58908 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
58909 unlock_mb_ret:
58910 mutex_unlock(&sync_rcu_preempt_exp_mutex);
58911 mb_ret:
58912 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
58913
58914 #else /* #ifndef CONFIG_SMP */
58915
58916 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
58917 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
58918 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
58919 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
58920
58921 static int synchronize_sched_expedited_cpu_stop(void *data)
58922 {
58923 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
58924 int firstsnap, s, snap, trycount = 0;
58925
58926 /* Note that atomic_inc_return() implies full memory barrier. */
58927 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
58928 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
58929 get_online_cpus();
58930
58931 /*
58932 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
58933 }
58934
58935 /* Check to see if someone else did our work for us. */
58936 - s = atomic_read(&sync_sched_expedited_done);
58937 + s = atomic_read_unchecked(&sync_sched_expedited_done);
58938 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
58939 smp_mb(); /* ensure test happens before caller kfree */
58940 return;
58941 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
58942 * grace period works for us.
58943 */
58944 get_online_cpus();
58945 - snap = atomic_read(&sync_sched_expedited_started) - 1;
58946 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
58947 smp_mb(); /* ensure read is before try_stop_cpus(). */
58948 }
58949
58950 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
58951 * than we did beat us to the punch.
58952 */
58953 do {
58954 - s = atomic_read(&sync_sched_expedited_done);
58955 + s = atomic_read_unchecked(&sync_sched_expedited_done);
58956 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
58957 smp_mb(); /* ensure test happens before caller kfree */
58958 break;
58959 }
58960 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
58961 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
58962
58963 put_online_cpus();
58964 }
58965 diff -urNp linux-3.0.3/kernel/relay.c linux-3.0.3/kernel/relay.c
58966 --- linux-3.0.3/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
58967 +++ linux-3.0.3/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
58968 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
58969 };
58970 ssize_t ret;
58971
58972 + pax_track_stack();
58973 +
58974 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
58975 return 0;
58976 if (splice_grow_spd(pipe, &spd))
58977 diff -urNp linux-3.0.3/kernel/resource.c linux-3.0.3/kernel/resource.c
58978 --- linux-3.0.3/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
58979 +++ linux-3.0.3/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
58980 @@ -141,8 +141,18 @@ static const struct file_operations proc
58981
58982 static int __init ioresources_init(void)
58983 {
58984 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
58985 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58986 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
58987 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
58988 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58989 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
58990 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
58991 +#endif
58992 +#else
58993 proc_create("ioports", 0, NULL, &proc_ioports_operations);
58994 proc_create("iomem", 0, NULL, &proc_iomem_operations);
58995 +#endif
58996 return 0;
58997 }
58998 __initcall(ioresources_init);
58999 diff -urNp linux-3.0.3/kernel/rtmutex-tester.c linux-3.0.3/kernel/rtmutex-tester.c
59000 --- linux-3.0.3/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
59001 +++ linux-3.0.3/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
59002 @@ -20,7 +20,7 @@
59003 #define MAX_RT_TEST_MUTEXES 8
59004
59005 static spinlock_t rttest_lock;
59006 -static atomic_t rttest_event;
59007 +static atomic_unchecked_t rttest_event;
59008
59009 struct test_thread_data {
59010 int opcode;
59011 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59012
59013 case RTTEST_LOCKCONT:
59014 td->mutexes[td->opdata] = 1;
59015 - td->event = atomic_add_return(1, &rttest_event);
59016 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59017 return 0;
59018
59019 case RTTEST_RESET:
59020 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59021 return 0;
59022
59023 case RTTEST_RESETEVENT:
59024 - atomic_set(&rttest_event, 0);
59025 + atomic_set_unchecked(&rttest_event, 0);
59026 return 0;
59027
59028 default:
59029 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59030 return ret;
59031
59032 td->mutexes[id] = 1;
59033 - td->event = atomic_add_return(1, &rttest_event);
59034 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59035 rt_mutex_lock(&mutexes[id]);
59036 - td->event = atomic_add_return(1, &rttest_event);
59037 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59038 td->mutexes[id] = 4;
59039 return 0;
59040
59041 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59042 return ret;
59043
59044 td->mutexes[id] = 1;
59045 - td->event = atomic_add_return(1, &rttest_event);
59046 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59047 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59048 - td->event = atomic_add_return(1, &rttest_event);
59049 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59050 td->mutexes[id] = ret ? 0 : 4;
59051 return ret ? -EINTR : 0;
59052
59053 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59054 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59055 return ret;
59056
59057 - td->event = atomic_add_return(1, &rttest_event);
59058 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59059 rt_mutex_unlock(&mutexes[id]);
59060 - td->event = atomic_add_return(1, &rttest_event);
59061 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59062 td->mutexes[id] = 0;
59063 return 0;
59064
59065 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59066 break;
59067
59068 td->mutexes[dat] = 2;
59069 - td->event = atomic_add_return(1, &rttest_event);
59070 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59071 break;
59072
59073 default:
59074 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59075 return;
59076
59077 td->mutexes[dat] = 3;
59078 - td->event = atomic_add_return(1, &rttest_event);
59079 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59080 break;
59081
59082 case RTTEST_LOCKNOWAIT:
59083 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59084 return;
59085
59086 td->mutexes[dat] = 1;
59087 - td->event = atomic_add_return(1, &rttest_event);
59088 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59089 return;
59090
59091 default:
59092 diff -urNp linux-3.0.3/kernel/sched_autogroup.c linux-3.0.3/kernel/sched_autogroup.c
59093 --- linux-3.0.3/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
59094 +++ linux-3.0.3/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
59095 @@ -7,7 +7,7 @@
59096
59097 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59098 static struct autogroup autogroup_default;
59099 -static atomic_t autogroup_seq_nr;
59100 +static atomic_unchecked_t autogroup_seq_nr;
59101
59102 static void __init autogroup_init(struct task_struct *init_task)
59103 {
59104 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59105
59106 kref_init(&ag->kref);
59107 init_rwsem(&ag->lock);
59108 - ag->id = atomic_inc_return(&autogroup_seq_nr);
59109 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59110 ag->tg = tg;
59111 #ifdef CONFIG_RT_GROUP_SCHED
59112 /*
59113 diff -urNp linux-3.0.3/kernel/sched.c linux-3.0.3/kernel/sched.c
59114 --- linux-3.0.3/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
59115 +++ linux-3.0.3/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
59116 @@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
59117 struct rq *rq;
59118 int cpu;
59119
59120 + pax_track_stack();
59121 +
59122 need_resched:
59123 preempt_disable();
59124 cpu = smp_processor_id();
59125 @@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
59126 /* convert nice value [19,-20] to rlimit style value [1,40] */
59127 int nice_rlim = 20 - nice;
59128
59129 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59130 +
59131 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59132 capable(CAP_SYS_NICE));
59133 }
59134 @@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59135 if (nice > 19)
59136 nice = 19;
59137
59138 - if (increment < 0 && !can_nice(current, nice))
59139 + if (increment < 0 && (!can_nice(current, nice) ||
59140 + gr_handle_chroot_nice()))
59141 return -EPERM;
59142
59143 retval = security_task_setnice(current, nice);
59144 @@ -5111,6 +5116,7 @@ recheck:
59145 unsigned long rlim_rtprio =
59146 task_rlimit(p, RLIMIT_RTPRIO);
59147
59148 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59149 /* can't set/change the rt policy */
59150 if (policy != p->policy && !rlim_rtprio)
59151 return -EPERM;
59152 diff -urNp linux-3.0.3/kernel/sched_fair.c linux-3.0.3/kernel/sched_fair.c
59153 --- linux-3.0.3/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
59154 +++ linux-3.0.3/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
59155 @@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
59156 * run_rebalance_domains is triggered when needed from the scheduler tick.
59157 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59158 */
59159 -static void run_rebalance_domains(struct softirq_action *h)
59160 +static void run_rebalance_domains(void)
59161 {
59162 int this_cpu = smp_processor_id();
59163 struct rq *this_rq = cpu_rq(this_cpu);
59164 diff -urNp linux-3.0.3/kernel/signal.c linux-3.0.3/kernel/signal.c
59165 --- linux-3.0.3/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
59166 +++ linux-3.0.3/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
59167 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59168
59169 int print_fatal_signals __read_mostly;
59170
59171 -static void __user *sig_handler(struct task_struct *t, int sig)
59172 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
59173 {
59174 return t->sighand->action[sig - 1].sa.sa_handler;
59175 }
59176
59177 -static int sig_handler_ignored(void __user *handler, int sig)
59178 +static int sig_handler_ignored(__sighandler_t handler, int sig)
59179 {
59180 /* Is it explicitly or implicitly ignored? */
59181 return handler == SIG_IGN ||
59182 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59183 static int sig_task_ignored(struct task_struct *t, int sig,
59184 int from_ancestor_ns)
59185 {
59186 - void __user *handler;
59187 + __sighandler_t handler;
59188
59189 handler = sig_handler(t, sig);
59190
59191 @@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
59192 atomic_inc(&user->sigpending);
59193 rcu_read_unlock();
59194
59195 + if (!override_rlimit)
59196 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59197 +
59198 if (override_rlimit ||
59199 atomic_read(&user->sigpending) <=
59200 task_rlimit(t, RLIMIT_SIGPENDING)) {
59201 @@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
59202
59203 int unhandled_signal(struct task_struct *tsk, int sig)
59204 {
59205 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59206 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59207 if (is_global_init(tsk))
59208 return 1;
59209 if (handler != SIG_IGN && handler != SIG_DFL)
59210 @@ -770,6 +773,13 @@ static int check_kill_permission(int sig
59211 }
59212 }
59213
59214 + /* allow glibc communication via tgkill to other threads in our
59215 + thread group */
59216 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
59217 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
59218 + && gr_handle_signal(t, sig))
59219 + return -EPERM;
59220 +
59221 return security_task_kill(t, info, sig, 0);
59222 }
59223
59224 @@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
59225 return send_signal(sig, info, p, 1);
59226 }
59227
59228 -static int
59229 +int
59230 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59231 {
59232 return send_signal(sig, info, t, 0);
59233 @@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
59234 unsigned long int flags;
59235 int ret, blocked, ignored;
59236 struct k_sigaction *action;
59237 + int is_unhandled = 0;
59238
59239 spin_lock_irqsave(&t->sighand->siglock, flags);
59240 action = &t->sighand->action[sig-1];
59241 @@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
59242 }
59243 if (action->sa.sa_handler == SIG_DFL)
59244 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59245 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59246 + is_unhandled = 1;
59247 ret = specific_send_sig_info(sig, info, t);
59248 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59249
59250 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
59251 + normal operation */
59252 + if (is_unhandled) {
59253 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59254 + gr_handle_crash(t, sig);
59255 + }
59256 +
59257 return ret;
59258 }
59259
59260 @@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
59261 ret = check_kill_permission(sig, info, p);
59262 rcu_read_unlock();
59263
59264 - if (!ret && sig)
59265 + if (!ret && sig) {
59266 ret = do_send_sig_info(sig, info, p, true);
59267 + if (!ret)
59268 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59269 + }
59270
59271 return ret;
59272 }
59273 @@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
59274 {
59275 siginfo_t info;
59276
59277 + pax_track_stack();
59278 +
59279 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59280
59281 memset(&info, 0, sizeof info);
59282 @@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59283 int error = -ESRCH;
59284
59285 rcu_read_lock();
59286 - p = find_task_by_vpid(pid);
59287 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59288 + /* allow glibc communication via tgkill to other threads in our
59289 + thread group */
59290 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59291 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
59292 + p = find_task_by_vpid_unrestricted(pid);
59293 + else
59294 +#endif
59295 + p = find_task_by_vpid(pid);
59296 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59297 error = check_kill_permission(sig, info, p);
59298 /*
59299 diff -urNp linux-3.0.3/kernel/smp.c linux-3.0.3/kernel/smp.c
59300 --- linux-3.0.3/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
59301 +++ linux-3.0.3/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
59302 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
59303 }
59304 EXPORT_SYMBOL(smp_call_function);
59305
59306 -void ipi_call_lock(void)
59307 +void ipi_call_lock(void) __acquires(call_function.lock)
59308 {
59309 raw_spin_lock(&call_function.lock);
59310 }
59311
59312 -void ipi_call_unlock(void)
59313 +void ipi_call_unlock(void) __releases(call_function.lock)
59314 {
59315 raw_spin_unlock(&call_function.lock);
59316 }
59317
59318 -void ipi_call_lock_irq(void)
59319 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
59320 {
59321 raw_spin_lock_irq(&call_function.lock);
59322 }
59323
59324 -void ipi_call_unlock_irq(void)
59325 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
59326 {
59327 raw_spin_unlock_irq(&call_function.lock);
59328 }
59329 diff -urNp linux-3.0.3/kernel/softirq.c linux-3.0.3/kernel/softirq.c
59330 --- linux-3.0.3/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
59331 +++ linux-3.0.3/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
59332 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59333
59334 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59335
59336 -char *softirq_to_name[NR_SOFTIRQS] = {
59337 +const char * const softirq_to_name[NR_SOFTIRQS] = {
59338 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59339 "TASKLET", "SCHED", "HRTIMER", "RCU"
59340 };
59341 @@ -235,7 +235,7 @@ restart:
59342 kstat_incr_softirqs_this_cpu(vec_nr);
59343
59344 trace_softirq_entry(vec_nr);
59345 - h->action(h);
59346 + h->action();
59347 trace_softirq_exit(vec_nr);
59348 if (unlikely(prev_count != preempt_count())) {
59349 printk(KERN_ERR "huh, entered softirq %u %s %p"
59350 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
59351 local_irq_restore(flags);
59352 }
59353
59354 -void open_softirq(int nr, void (*action)(struct softirq_action *))
59355 +void open_softirq(int nr, void (*action)(void))
59356 {
59357 - softirq_vec[nr].action = action;
59358 + pax_open_kernel();
59359 + *(void **)&softirq_vec[nr].action = action;
59360 + pax_close_kernel();
59361 }
59362
59363 /*
59364 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
59365
59366 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59367
59368 -static void tasklet_action(struct softirq_action *a)
59369 +static void tasklet_action(void)
59370 {
59371 struct tasklet_struct *list;
59372
59373 @@ -476,7 +478,7 @@ static void tasklet_action(struct softir
59374 }
59375 }
59376
59377 -static void tasklet_hi_action(struct softirq_action *a)
59378 +static void tasklet_hi_action(void)
59379 {
59380 struct tasklet_struct *list;
59381
59382 diff -urNp linux-3.0.3/kernel/sys.c linux-3.0.3/kernel/sys.c
59383 --- linux-3.0.3/kernel/sys.c 2011-07-21 22:17:23.000000000 -0400
59384 +++ linux-3.0.3/kernel/sys.c 2011-08-23 21:48:14.000000000 -0400
59385 @@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59386 error = -EACCES;
59387 goto out;
59388 }
59389 +
59390 + if (gr_handle_chroot_setpriority(p, niceval)) {
59391 + error = -EACCES;
59392 + goto out;
59393 + }
59394 +
59395 no_nice = security_task_setnice(p, niceval);
59396 if (no_nice) {
59397 error = no_nice;
59398 @@ -537,6 +543,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59399 goto error;
59400 }
59401
59402 + if (gr_check_group_change(new->gid, new->egid, -1))
59403 + goto error;
59404 +
59405 if (rgid != (gid_t) -1 ||
59406 (egid != (gid_t) -1 && egid != old->gid))
59407 new->sgid = new->egid;
59408 @@ -566,6 +575,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59409 old = current_cred();
59410
59411 retval = -EPERM;
59412 +
59413 + if (gr_check_group_change(gid, gid, gid))
59414 + goto error;
59415 +
59416 if (nsown_capable(CAP_SETGID))
59417 new->gid = new->egid = new->sgid = new->fsgid = gid;
59418 else if (gid == old->gid || gid == old->sgid)
59419 @@ -646,6 +659,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59420 goto error;
59421 }
59422
59423 + if (gr_check_user_change(new->uid, new->euid, -1))
59424 + goto error;
59425 +
59426 if (new->uid != old->uid) {
59427 retval = set_user(new);
59428 if (retval < 0)
59429 @@ -690,6 +706,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59430 old = current_cred();
59431
59432 retval = -EPERM;
59433 +
59434 + if (gr_check_crash_uid(uid))
59435 + goto error;
59436 + if (gr_check_user_change(uid, uid, uid))
59437 + goto error;
59438 +
59439 if (nsown_capable(CAP_SETUID)) {
59440 new->suid = new->uid = uid;
59441 if (uid != old->uid) {
59442 @@ -744,6 +766,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59443 goto error;
59444 }
59445
59446 + if (gr_check_user_change(ruid, euid, -1))
59447 + goto error;
59448 +
59449 if (ruid != (uid_t) -1) {
59450 new->uid = ruid;
59451 if (ruid != old->uid) {
59452 @@ -808,6 +833,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59453 goto error;
59454 }
59455
59456 + if (gr_check_group_change(rgid, egid, -1))
59457 + goto error;
59458 +
59459 if (rgid != (gid_t) -1)
59460 new->gid = rgid;
59461 if (egid != (gid_t) -1)
59462 @@ -854,6 +882,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59463 old = current_cred();
59464 old_fsuid = old->fsuid;
59465
59466 + if (gr_check_user_change(-1, -1, uid))
59467 + goto error;
59468 +
59469 if (uid == old->uid || uid == old->euid ||
59470 uid == old->suid || uid == old->fsuid ||
59471 nsown_capable(CAP_SETUID)) {
59472 @@ -864,6 +895,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59473 }
59474 }
59475
59476 +error:
59477 abort_creds(new);
59478 return old_fsuid;
59479
59480 @@ -890,12 +922,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59481 if (gid == old->gid || gid == old->egid ||
59482 gid == old->sgid || gid == old->fsgid ||
59483 nsown_capable(CAP_SETGID)) {
59484 + if (gr_check_group_change(-1, -1, gid))
59485 + goto error;
59486 +
59487 if (gid != old_fsgid) {
59488 new->fsgid = gid;
59489 goto change_okay;
59490 }
59491 }
59492
59493 +error:
59494 abort_creds(new);
59495 return old_fsgid;
59496
59497 @@ -1642,7 +1678,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59498 error = get_dumpable(me->mm);
59499 break;
59500 case PR_SET_DUMPABLE:
59501 - if (arg2 < 0 || arg2 > 1) {
59502 + if (arg2 > 1) {
59503 error = -EINVAL;
59504 break;
59505 }
59506 diff -urNp linux-3.0.3/kernel/sysctl.c linux-3.0.3/kernel/sysctl.c
59507 --- linux-3.0.3/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
59508 +++ linux-3.0.3/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
59509 @@ -85,6 +85,13 @@
59510
59511
59512 #if defined(CONFIG_SYSCTL)
59513 +#include <linux/grsecurity.h>
59514 +#include <linux/grinternal.h>
59515 +
59516 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59517 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59518 + const int op);
59519 +extern int gr_handle_chroot_sysctl(const int op);
59520
59521 /* External variables not in a header file. */
59522 extern int sysctl_overcommit_memory;
59523 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59524 }
59525
59526 #endif
59527 +extern struct ctl_table grsecurity_table[];
59528
59529 static struct ctl_table root_table[];
59530 static struct ctl_table_root sysctl_table_root;
59531 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
59532 int sysctl_legacy_va_layout;
59533 #endif
59534
59535 +#ifdef CONFIG_PAX_SOFTMODE
59536 +static ctl_table pax_table[] = {
59537 + {
59538 + .procname = "softmode",
59539 + .data = &pax_softmode,
59540 + .maxlen = sizeof(unsigned int),
59541 + .mode = 0600,
59542 + .proc_handler = &proc_dointvec,
59543 + },
59544 +
59545 + { }
59546 +};
59547 +#endif
59548 +
59549 /* The default sysctl tables: */
59550
59551 static struct ctl_table root_table[] = {
59552 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
59553 #endif
59554
59555 static struct ctl_table kern_table[] = {
59556 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59557 + {
59558 + .procname = "grsecurity",
59559 + .mode = 0500,
59560 + .child = grsecurity_table,
59561 + },
59562 +#endif
59563 +
59564 +#ifdef CONFIG_PAX_SOFTMODE
59565 + {
59566 + .procname = "pax",
59567 + .mode = 0500,
59568 + .child = pax_table,
59569 + },
59570 +#endif
59571 +
59572 {
59573 .procname = "sched_child_runs_first",
59574 .data = &sysctl_sched_child_runs_first,
59575 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
59576 .data = &modprobe_path,
59577 .maxlen = KMOD_PATH_LEN,
59578 .mode = 0644,
59579 - .proc_handler = proc_dostring,
59580 + .proc_handler = proc_dostring_modpriv,
59581 },
59582 {
59583 .procname = "modules_disabled",
59584 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
59585 .extra1 = &zero,
59586 .extra2 = &one,
59587 },
59588 +#endif
59589 {
59590 .procname = "kptr_restrict",
59591 .data = &kptr_restrict,
59592 .maxlen = sizeof(int),
59593 .mode = 0644,
59594 .proc_handler = proc_dmesg_restrict,
59595 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59596 + .extra1 = &two,
59597 +#else
59598 .extra1 = &zero,
59599 +#endif
59600 .extra2 = &two,
59601 },
59602 -#endif
59603 {
59604 .procname = "ngroups_max",
59605 .data = &ngroups_max,
59606 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
59607 .proc_handler = proc_dointvec_minmax,
59608 .extra1 = &zero,
59609 },
59610 + {
59611 + .procname = "heap_stack_gap",
59612 + .data = &sysctl_heap_stack_gap,
59613 + .maxlen = sizeof(sysctl_heap_stack_gap),
59614 + .mode = 0644,
59615 + .proc_handler = proc_doulongvec_minmax,
59616 + },
59617 #else
59618 {
59619 .procname = "nr_trim_pages",
59620 @@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
59621 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
59622 {
59623 int mode;
59624 + int error;
59625 +
59626 + if (table->parent != NULL && table->parent->procname != NULL &&
59627 + table->procname != NULL &&
59628 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
59629 + return -EACCES;
59630 + if (gr_handle_chroot_sysctl(op))
59631 + return -EACCES;
59632 + error = gr_handle_sysctl(table, op);
59633 + if (error)
59634 + return error;
59635
59636 if (root->permissions)
59637 mode = root->permissions(root, current->nsproxy, table);
59638 @@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
59639 buffer, lenp, ppos);
59640 }
59641
59642 +int proc_dostring_modpriv(struct ctl_table *table, int write,
59643 + void __user *buffer, size_t *lenp, loff_t *ppos)
59644 +{
59645 + if (write && !capable(CAP_SYS_MODULE))
59646 + return -EPERM;
59647 +
59648 + return _proc_do_string(table->data, table->maxlen, write,
59649 + buffer, lenp, ppos);
59650 +}
59651 +
59652 static size_t proc_skip_spaces(char **buf)
59653 {
59654 size_t ret;
59655 @@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
59656 len = strlen(tmp);
59657 if (len > *size)
59658 len = *size;
59659 + if (len > sizeof(tmp))
59660 + len = sizeof(tmp);
59661 if (copy_to_user(*buf, tmp, len))
59662 return -EFAULT;
59663 *size -= len;
59664 @@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
59665 *i = val;
59666 } else {
59667 val = convdiv * (*i) / convmul;
59668 - if (!first)
59669 + if (!first) {
59670 err = proc_put_char(&buffer, &left, '\t');
59671 + if (err)
59672 + break;
59673 + }
59674 err = proc_put_long(&buffer, &left, val, false);
59675 if (err)
59676 break;
59677 @@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
59678 return -ENOSYS;
59679 }
59680
59681 +int proc_dostring_modpriv(struct ctl_table *table, int write,
59682 + void __user *buffer, size_t *lenp, loff_t *ppos)
59683 +{
59684 + return -ENOSYS;
59685 +}
59686 +
59687 int proc_dointvec(struct ctl_table *table, int write,
59688 void __user *buffer, size_t *lenp, loff_t *ppos)
59689 {
59690 @@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
59691 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
59692 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
59693 EXPORT_SYMBOL(proc_dostring);
59694 +EXPORT_SYMBOL(proc_dostring_modpriv);
59695 EXPORT_SYMBOL(proc_doulongvec_minmax);
59696 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
59697 EXPORT_SYMBOL(register_sysctl_table);
59698 diff -urNp linux-3.0.3/kernel/sysctl_check.c linux-3.0.3/kernel/sysctl_check.c
59699 --- linux-3.0.3/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
59700 +++ linux-3.0.3/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
59701 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
59702 set_fail(&fail, table, "Directory with extra2");
59703 } else {
59704 if ((table->proc_handler == proc_dostring) ||
59705 + (table->proc_handler == proc_dostring_modpriv) ||
59706 (table->proc_handler == proc_dointvec) ||
59707 (table->proc_handler == proc_dointvec_minmax) ||
59708 (table->proc_handler == proc_dointvec_jiffies) ||
59709 diff -urNp linux-3.0.3/kernel/taskstats.c linux-3.0.3/kernel/taskstats.c
59710 --- linux-3.0.3/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
59711 +++ linux-3.0.3/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
59712 @@ -27,9 +27,12 @@
59713 #include <linux/cgroup.h>
59714 #include <linux/fs.h>
59715 #include <linux/file.h>
59716 +#include <linux/grsecurity.h>
59717 #include <net/genetlink.h>
59718 #include <asm/atomic.h>
59719
59720 +extern int gr_is_taskstats_denied(int pid);
59721 +
59722 /*
59723 * Maximum length of a cpumask that can be specified in
59724 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
59725 @@ -558,6 +561,9 @@ err:
59726
59727 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
59728 {
59729 + if (gr_is_taskstats_denied(current->pid))
59730 + return -EACCES;
59731 +
59732 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
59733 return cmd_attr_register_cpumask(info);
59734 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
59735 diff -urNp linux-3.0.3/kernel/time/alarmtimer.c linux-3.0.3/kernel/time/alarmtimer.c
59736 --- linux-3.0.3/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
59737 +++ linux-3.0.3/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
59738 @@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
59739 {
59740 int error = 0;
59741 int i;
59742 - struct k_clock alarm_clock = {
59743 + static struct k_clock alarm_clock = {
59744 .clock_getres = alarm_clock_getres,
59745 .clock_get = alarm_clock_get,
59746 .timer_create = alarm_timer_create,
59747 diff -urNp linux-3.0.3/kernel/time/tick-broadcast.c linux-3.0.3/kernel/time/tick-broadcast.c
59748 --- linux-3.0.3/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
59749 +++ linux-3.0.3/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
59750 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
59751 * then clear the broadcast bit.
59752 */
59753 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
59754 - int cpu = smp_processor_id();
59755 + cpu = smp_processor_id();
59756
59757 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
59758 tick_broadcast_clear_oneshot(cpu);
59759 diff -urNp linux-3.0.3/kernel/time/timekeeping.c linux-3.0.3/kernel/time/timekeeping.c
59760 --- linux-3.0.3/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
59761 +++ linux-3.0.3/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
59762 @@ -14,6 +14,7 @@
59763 #include <linux/init.h>
59764 #include <linux/mm.h>
59765 #include <linux/sched.h>
59766 +#include <linux/grsecurity.h>
59767 #include <linux/syscore_ops.h>
59768 #include <linux/clocksource.h>
59769 #include <linux/jiffies.h>
59770 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
59771 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
59772 return -EINVAL;
59773
59774 + gr_log_timechange();
59775 +
59776 write_seqlock_irqsave(&xtime_lock, flags);
59777
59778 timekeeping_forward_now();
59779 diff -urNp linux-3.0.3/kernel/time/timer_list.c linux-3.0.3/kernel/time/timer_list.c
59780 --- linux-3.0.3/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
59781 +++ linux-3.0.3/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
59782 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
59783
59784 static void print_name_offset(struct seq_file *m, void *sym)
59785 {
59786 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59787 + SEQ_printf(m, "<%p>", NULL);
59788 +#else
59789 char symname[KSYM_NAME_LEN];
59790
59791 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
59792 SEQ_printf(m, "<%pK>", sym);
59793 else
59794 SEQ_printf(m, "%s", symname);
59795 +#endif
59796 }
59797
59798 static void
59799 @@ -112,7 +116,11 @@ next_one:
59800 static void
59801 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
59802 {
59803 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59804 + SEQ_printf(m, " .base: %p\n", NULL);
59805 +#else
59806 SEQ_printf(m, " .base: %pK\n", base);
59807 +#endif
59808 SEQ_printf(m, " .index: %d\n",
59809 base->index);
59810 SEQ_printf(m, " .resolution: %Lu nsecs\n",
59811 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
59812 {
59813 struct proc_dir_entry *pe;
59814
59815 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59816 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
59817 +#else
59818 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
59819 +#endif
59820 if (!pe)
59821 return -ENOMEM;
59822 return 0;
59823 diff -urNp linux-3.0.3/kernel/time/timer_stats.c linux-3.0.3/kernel/time/timer_stats.c
59824 --- linux-3.0.3/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
59825 +++ linux-3.0.3/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
59826 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
59827 static unsigned long nr_entries;
59828 static struct entry entries[MAX_ENTRIES];
59829
59830 -static atomic_t overflow_count;
59831 +static atomic_unchecked_t overflow_count;
59832
59833 /*
59834 * The entries are in a hash-table, for fast lookup:
59835 @@ -140,7 +140,7 @@ static void reset_entries(void)
59836 nr_entries = 0;
59837 memset(entries, 0, sizeof(entries));
59838 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
59839 - atomic_set(&overflow_count, 0);
59840 + atomic_set_unchecked(&overflow_count, 0);
59841 }
59842
59843 static struct entry *alloc_entry(void)
59844 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
59845 if (likely(entry))
59846 entry->count++;
59847 else
59848 - atomic_inc(&overflow_count);
59849 + atomic_inc_unchecked(&overflow_count);
59850
59851 out_unlock:
59852 raw_spin_unlock_irqrestore(lock, flags);
59853 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
59854
59855 static void print_name_offset(struct seq_file *m, unsigned long addr)
59856 {
59857 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59858 + seq_printf(m, "<%p>", NULL);
59859 +#else
59860 char symname[KSYM_NAME_LEN];
59861
59862 if (lookup_symbol_name(addr, symname) < 0)
59863 seq_printf(m, "<%p>", (void *)addr);
59864 else
59865 seq_printf(m, "%s", symname);
59866 +#endif
59867 }
59868
59869 static int tstats_show(struct seq_file *m, void *v)
59870 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
59871
59872 seq_puts(m, "Timer Stats Version: v0.2\n");
59873 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
59874 - if (atomic_read(&overflow_count))
59875 + if (atomic_read_unchecked(&overflow_count))
59876 seq_printf(m, "Overflow: %d entries\n",
59877 - atomic_read(&overflow_count));
59878 + atomic_read_unchecked(&overflow_count));
59879
59880 for (i = 0; i < nr_entries; i++) {
59881 entry = entries + i;
59882 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
59883 {
59884 struct proc_dir_entry *pe;
59885
59886 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59887 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
59888 +#else
59889 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
59890 +#endif
59891 if (!pe)
59892 return -ENOMEM;
59893 return 0;
59894 diff -urNp linux-3.0.3/kernel/time.c linux-3.0.3/kernel/time.c
59895 --- linux-3.0.3/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
59896 +++ linux-3.0.3/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
59897 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
59898 return error;
59899
59900 if (tz) {
59901 + /* we log in do_settimeofday called below, so don't log twice
59902 + */
59903 + if (!tv)
59904 + gr_log_timechange();
59905 +
59906 /* SMP safe, global irq locking makes it work. */
59907 sys_tz = *tz;
59908 update_vsyscall_tz();
59909 diff -urNp linux-3.0.3/kernel/timer.c linux-3.0.3/kernel/timer.c
59910 --- linux-3.0.3/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
59911 +++ linux-3.0.3/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
59912 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
59913 /*
59914 * This function runs timers and the timer-tq in bottom half context.
59915 */
59916 -static void run_timer_softirq(struct softirq_action *h)
59917 +static void run_timer_softirq(void)
59918 {
59919 struct tvec_base *base = __this_cpu_read(tvec_bases);
59920
59921 diff -urNp linux-3.0.3/kernel/trace/blktrace.c linux-3.0.3/kernel/trace/blktrace.c
59922 --- linux-3.0.3/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
59923 +++ linux-3.0.3/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
59924 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
59925 struct blk_trace *bt = filp->private_data;
59926 char buf[16];
59927
59928 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
59929 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
59930
59931 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
59932 }
59933 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
59934 return 1;
59935
59936 bt = buf->chan->private_data;
59937 - atomic_inc(&bt->dropped);
59938 + atomic_inc_unchecked(&bt->dropped);
59939 return 0;
59940 }
59941
59942 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
59943
59944 bt->dir = dir;
59945 bt->dev = dev;
59946 - atomic_set(&bt->dropped, 0);
59947 + atomic_set_unchecked(&bt->dropped, 0);
59948
59949 ret = -EIO;
59950 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
59951 diff -urNp linux-3.0.3/kernel/trace/ftrace.c linux-3.0.3/kernel/trace/ftrace.c
59952 --- linux-3.0.3/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
59953 +++ linux-3.0.3/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
59954 @@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
59955 if (unlikely(ftrace_disabled))
59956 return 0;
59957
59958 + ret = ftrace_arch_code_modify_prepare();
59959 + FTRACE_WARN_ON(ret);
59960 + if (ret)
59961 + return 0;
59962 +
59963 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
59964 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
59965 if (ret) {
59966 ftrace_bug(ret, ip);
59967 - return 0;
59968 }
59969 - return 1;
59970 + return ret ? 0 : 1;
59971 }
59972
59973 /*
59974 @@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
59975
59976 int
59977 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59978 - void *data)
59979 + void *data)
59980 {
59981 struct ftrace_func_probe *entry;
59982 struct ftrace_page *pg;
59983 diff -urNp linux-3.0.3/kernel/trace/trace.c linux-3.0.3/kernel/trace/trace.c
59984 --- linux-3.0.3/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
59985 +++ linux-3.0.3/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
59986 @@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
59987 size_t rem;
59988 unsigned int i;
59989
59990 + pax_track_stack();
59991 +
59992 if (splice_grow_spd(pipe, &spd))
59993 return -ENOMEM;
59994
59995 @@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
59996 int entries, size, i;
59997 size_t ret;
59998
59999 + pax_track_stack();
60000 +
60001 if (splice_grow_spd(pipe, &spd))
60002 return -ENOMEM;
60003
60004 @@ -3990,10 +3994,9 @@ static const struct file_operations trac
60005 };
60006 #endif
60007
60008 -static struct dentry *d_tracer;
60009 -
60010 struct dentry *tracing_init_dentry(void)
60011 {
60012 + static struct dentry *d_tracer;
60013 static int once;
60014
60015 if (d_tracer)
60016 @@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
60017 return d_tracer;
60018 }
60019
60020 -static struct dentry *d_percpu;
60021 -
60022 struct dentry *tracing_dentry_percpu(void)
60023 {
60024 + static struct dentry *d_percpu;
60025 static int once;
60026 struct dentry *d_tracer;
60027
60028 diff -urNp linux-3.0.3/kernel/trace/trace_events.c linux-3.0.3/kernel/trace/trace_events.c
60029 --- linux-3.0.3/kernel/trace/trace_events.c 2011-08-23 21:44:40.000000000 -0400
60030 +++ linux-3.0.3/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
60031 @@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
60032 struct ftrace_module_file_ops {
60033 struct list_head list;
60034 struct module *mod;
60035 - struct file_operations id;
60036 - struct file_operations enable;
60037 - struct file_operations format;
60038 - struct file_operations filter;
60039 };
60040
60041 static struct ftrace_module_file_ops *
60042 @@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
60043
60044 file_ops->mod = mod;
60045
60046 - file_ops->id = ftrace_event_id_fops;
60047 - file_ops->id.owner = mod;
60048 -
60049 - file_ops->enable = ftrace_enable_fops;
60050 - file_ops->enable.owner = mod;
60051 -
60052 - file_ops->filter = ftrace_event_filter_fops;
60053 - file_ops->filter.owner = mod;
60054 -
60055 - file_ops->format = ftrace_event_format_fops;
60056 - file_ops->format.owner = mod;
60057 + pax_open_kernel();
60058 + *(void **)&mod->trace_id.owner = mod;
60059 + *(void **)&mod->trace_enable.owner = mod;
60060 + *(void **)&mod->trace_filter.owner = mod;
60061 + *(void **)&mod->trace_format.owner = mod;
60062 + pax_close_kernel();
60063
60064 list_add(&file_ops->list, &ftrace_module_file_list);
60065
60066 @@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
60067
60068 for_each_event(call, start, end) {
60069 __trace_add_event_call(*call, mod,
60070 - &file_ops->id, &file_ops->enable,
60071 - &file_ops->filter, &file_ops->format);
60072 + &mod->trace_id, &mod->trace_enable,
60073 + &mod->trace_filter, &mod->trace_format);
60074 }
60075 }
60076
60077 diff -urNp linux-3.0.3/kernel/trace/trace_mmiotrace.c linux-3.0.3/kernel/trace/trace_mmiotrace.c
60078 --- linux-3.0.3/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
60079 +++ linux-3.0.3/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
60080 @@ -24,7 +24,7 @@ struct header_iter {
60081 static struct trace_array *mmio_trace_array;
60082 static bool overrun_detected;
60083 static unsigned long prev_overruns;
60084 -static atomic_t dropped_count;
60085 +static atomic_unchecked_t dropped_count;
60086
60087 static void mmio_reset_data(struct trace_array *tr)
60088 {
60089 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60090
60091 static unsigned long count_overruns(struct trace_iterator *iter)
60092 {
60093 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
60094 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60095 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60096
60097 if (over > prev_overruns)
60098 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60099 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60100 sizeof(*entry), 0, pc);
60101 if (!event) {
60102 - atomic_inc(&dropped_count);
60103 + atomic_inc_unchecked(&dropped_count);
60104 return;
60105 }
60106 entry = ring_buffer_event_data(event);
60107 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60108 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60109 sizeof(*entry), 0, pc);
60110 if (!event) {
60111 - atomic_inc(&dropped_count);
60112 + atomic_inc_unchecked(&dropped_count);
60113 return;
60114 }
60115 entry = ring_buffer_event_data(event);
60116 diff -urNp linux-3.0.3/kernel/trace/trace_output.c linux-3.0.3/kernel/trace/trace_output.c
60117 --- linux-3.0.3/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
60118 +++ linux-3.0.3/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
60119 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60120
60121 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60122 if (!IS_ERR(p)) {
60123 - p = mangle_path(s->buffer + s->len, p, "\n");
60124 + p = mangle_path(s->buffer + s->len, p, "\n\\");
60125 if (p) {
60126 s->len = p - s->buffer;
60127 return 1;
60128 diff -urNp linux-3.0.3/kernel/trace/trace_stack.c linux-3.0.3/kernel/trace/trace_stack.c
60129 --- linux-3.0.3/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
60130 +++ linux-3.0.3/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
60131 @@ -50,7 +50,7 @@ static inline void check_stack(void)
60132 return;
60133
60134 /* we do not handle interrupt stacks yet */
60135 - if (!object_is_on_stack(&this_size))
60136 + if (!object_starts_on_stack(&this_size))
60137 return;
60138
60139 local_irq_save(flags);
60140 diff -urNp linux-3.0.3/kernel/trace/trace_workqueue.c linux-3.0.3/kernel/trace/trace_workqueue.c
60141 --- linux-3.0.3/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
60142 +++ linux-3.0.3/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
60143 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60144 int cpu;
60145 pid_t pid;
60146 /* Can be inserted from interrupt or user context, need to be atomic */
60147 - atomic_t inserted;
60148 + atomic_unchecked_t inserted;
60149 /*
60150 * Don't need to be atomic, works are serialized in a single workqueue thread
60151 * on a single CPU.
60152 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60153 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60154 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60155 if (node->pid == wq_thread->pid) {
60156 - atomic_inc(&node->inserted);
60157 + atomic_inc_unchecked(&node->inserted);
60158 goto found;
60159 }
60160 }
60161 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60162 tsk = get_pid_task(pid, PIDTYPE_PID);
60163 if (tsk) {
60164 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60165 - atomic_read(&cws->inserted), cws->executed,
60166 + atomic_read_unchecked(&cws->inserted), cws->executed,
60167 tsk->comm);
60168 put_task_struct(tsk);
60169 }
60170 diff -urNp linux-3.0.3/lib/bug.c linux-3.0.3/lib/bug.c
60171 --- linux-3.0.3/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
60172 +++ linux-3.0.3/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
60173 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60174 return BUG_TRAP_TYPE_NONE;
60175
60176 bug = find_bug(bugaddr);
60177 + if (!bug)
60178 + return BUG_TRAP_TYPE_NONE;
60179
60180 file = NULL;
60181 line = 0;
60182 diff -urNp linux-3.0.3/lib/debugobjects.c linux-3.0.3/lib/debugobjects.c
60183 --- linux-3.0.3/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
60184 +++ linux-3.0.3/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
60185 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60186 if (limit > 4)
60187 return;
60188
60189 - is_on_stack = object_is_on_stack(addr);
60190 + is_on_stack = object_starts_on_stack(addr);
60191 if (is_on_stack == onstack)
60192 return;
60193
60194 diff -urNp linux-3.0.3/lib/dma-debug.c linux-3.0.3/lib/dma-debug.c
60195 --- linux-3.0.3/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
60196 +++ linux-3.0.3/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
60197 @@ -870,7 +870,7 @@ out:
60198
60199 static void check_for_stack(struct device *dev, void *addr)
60200 {
60201 - if (object_is_on_stack(addr))
60202 + if (object_starts_on_stack(addr))
60203 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60204 "stack [addr=%p]\n", addr);
60205 }
60206 diff -urNp linux-3.0.3/lib/extable.c linux-3.0.3/lib/extable.c
60207 --- linux-3.0.3/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
60208 +++ linux-3.0.3/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
60209 @@ -13,6 +13,7 @@
60210 #include <linux/init.h>
60211 #include <linux/sort.h>
60212 #include <asm/uaccess.h>
60213 +#include <asm/pgtable.h>
60214
60215 #ifndef ARCH_HAS_SORT_EXTABLE
60216 /*
60217 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
60218 void sort_extable(struct exception_table_entry *start,
60219 struct exception_table_entry *finish)
60220 {
60221 + pax_open_kernel();
60222 sort(start, finish - start, sizeof(struct exception_table_entry),
60223 cmp_ex, NULL);
60224 + pax_close_kernel();
60225 }
60226
60227 #ifdef CONFIG_MODULES
60228 diff -urNp linux-3.0.3/lib/inflate.c linux-3.0.3/lib/inflate.c
60229 --- linux-3.0.3/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
60230 +++ linux-3.0.3/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
60231 @@ -269,7 +269,7 @@ static void free(void *where)
60232 malloc_ptr = free_mem_ptr;
60233 }
60234 #else
60235 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60236 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60237 #define free(a) kfree(a)
60238 #endif
60239
60240 diff -urNp linux-3.0.3/lib/Kconfig.debug linux-3.0.3/lib/Kconfig.debug
60241 --- linux-3.0.3/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
60242 +++ linux-3.0.3/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
60243 @@ -1088,6 +1088,7 @@ config LATENCYTOP
60244 depends on DEBUG_KERNEL
60245 depends on STACKTRACE_SUPPORT
60246 depends on PROC_FS
60247 + depends on !GRKERNSEC_HIDESYM
60248 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60249 select KALLSYMS
60250 select KALLSYMS_ALL
60251 diff -urNp linux-3.0.3/lib/kref.c linux-3.0.3/lib/kref.c
60252 --- linux-3.0.3/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
60253 +++ linux-3.0.3/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
60254 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60255 */
60256 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60257 {
60258 - WARN_ON(release == NULL);
60259 + BUG_ON(release == NULL);
60260 WARN_ON(release == (void (*)(struct kref *))kfree);
60261
60262 if (atomic_dec_and_test(&kref->refcount)) {
60263 diff -urNp linux-3.0.3/lib/radix-tree.c linux-3.0.3/lib/radix-tree.c
60264 --- linux-3.0.3/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
60265 +++ linux-3.0.3/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
60266 @@ -80,7 +80,7 @@ struct radix_tree_preload {
60267 int nr;
60268 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60269 };
60270 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60271 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60272
60273 static inline void *ptr_to_indirect(void *ptr)
60274 {
60275 diff -urNp linux-3.0.3/lib/vsprintf.c linux-3.0.3/lib/vsprintf.c
60276 --- linux-3.0.3/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
60277 +++ linux-3.0.3/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
60278 @@ -16,6 +16,9 @@
60279 * - scnprintf and vscnprintf
60280 */
60281
60282 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60283 +#define __INCLUDED_BY_HIDESYM 1
60284 +#endif
60285 #include <stdarg.h>
60286 #include <linux/module.h>
60287 #include <linux/types.h>
60288 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60289 char sym[KSYM_SYMBOL_LEN];
60290 if (ext == 'B')
60291 sprint_backtrace(sym, value);
60292 - else if (ext != 'f' && ext != 's')
60293 + else if (ext != 'f' && ext != 's' && ext != 'a')
60294 sprint_symbol(sym, value);
60295 else
60296 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60297 @@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
60298 return string(buf, end, uuid, spec);
60299 }
60300
60301 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60302 +int kptr_restrict __read_mostly = 2;
60303 +#else
60304 int kptr_restrict __read_mostly;
60305 +#endif
60306
60307 /*
60308 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60309 @@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
60310 * - 'S' For symbolic direct pointers with offset
60311 * - 's' For symbolic direct pointers without offset
60312 * - 'B' For backtraced symbolic direct pointers with offset
60313 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60314 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60315 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60316 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60317 * - 'M' For a 6-byte MAC address, it prints the address in the
60318 @@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
60319 {
60320 if (!ptr && *fmt != 'K') {
60321 /*
60322 - * Print (null) with the same width as a pointer so it makes
60323 + * Print (nil) with the same width as a pointer so it makes
60324 * tabular output look nice.
60325 */
60326 if (spec.field_width == -1)
60327 spec.field_width = 2 * sizeof(void *);
60328 - return string(buf, end, "(null)", spec);
60329 + return string(buf, end, "(nil)", spec);
60330 }
60331
60332 switch (*fmt) {
60333 @@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
60334 /* Fallthrough */
60335 case 'S':
60336 case 's':
60337 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60338 + break;
60339 +#else
60340 + return symbol_string(buf, end, ptr, spec, *fmt);
60341 +#endif
60342 + case 'A':
60343 + case 'a':
60344 case 'B':
60345 return symbol_string(buf, end, ptr, spec, *fmt);
60346 case 'R':
60347 @@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
60348 typeof(type) value; \
60349 if (sizeof(type) == 8) { \
60350 args = PTR_ALIGN(args, sizeof(u32)); \
60351 - *(u32 *)&value = *(u32 *)args; \
60352 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60353 + *(u32 *)&value = *(const u32 *)args; \
60354 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60355 } else { \
60356 args = PTR_ALIGN(args, sizeof(type)); \
60357 - value = *(typeof(type) *)args; \
60358 + value = *(const typeof(type) *)args; \
60359 } \
60360 args += sizeof(type); \
60361 value; \
60362 @@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
60363 case FORMAT_TYPE_STR: {
60364 const char *str_arg = args;
60365 args += strlen(str_arg) + 1;
60366 - str = string(str, end, (char *)str_arg, spec);
60367 + str = string(str, end, str_arg, spec);
60368 break;
60369 }
60370
60371 diff -urNp linux-3.0.3/localversion-grsec linux-3.0.3/localversion-grsec
60372 --- linux-3.0.3/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60373 +++ linux-3.0.3/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
60374 @@ -0,0 +1 @@
60375 +-grsec
60376 diff -urNp linux-3.0.3/Makefile linux-3.0.3/Makefile
60377 --- linux-3.0.3/Makefile 2011-08-23 21:44:40.000000000 -0400
60378 +++ linux-3.0.3/Makefile 2011-08-24 18:10:12.000000000 -0400
60379 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60380
60381 HOSTCC = gcc
60382 HOSTCXX = g++
60383 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60384 -HOSTCXXFLAGS = -O2
60385 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60386 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60387 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60388
60389 # Decide whether to build built-in, modular, or both.
60390 # Normally, just do built-in.
60391 @@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60392 KBUILD_CPPFLAGS := -D__KERNEL__
60393
60394 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60395 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
60396 -fno-strict-aliasing -fno-common \
60397 -Werror-implicit-function-declaration \
60398 -Wno-format-security \
60399 -fno-delete-null-pointer-checks
60400 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60401 KBUILD_AFLAGS_KERNEL :=
60402 KBUILD_CFLAGS_KERNEL :=
60403 KBUILD_AFLAGS := -D__ASSEMBLY__
60404 @@ -407,10 +410,11 @@ export RCS_TAR_IGNORE := --exclude SCCS
60405 # Rules shared between *config targets and build targets
60406
60407 # Basic helpers built in scripts/
60408 -PHONY += scripts_basic
60409 -scripts_basic:
60410 +PHONY += scripts_basic0 scripts_basic gcc-plugins
60411 +scripts_basic0:
60412 $(Q)$(MAKE) $(build)=scripts/basic
60413 $(Q)rm -f .tmp_quiet_recordmcount
60414 +scripts_basic: scripts_basic0 gcc-plugins
60415
60416 # To avoid any implicit rule to kick in, define an empty command.
60417 scripts/basic/%: scripts_basic ;
60418 @@ -564,6 +568,24 @@ else
60419 KBUILD_CFLAGS += -O2
60420 endif
60421
60422 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60423 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
60424 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
60425 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60426 +endif
60427 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60428 +gcc-plugins:
60429 + $(Q)$(MAKE) $(build)=tools/gcc
60430 +else
60431 +gcc-plugins:
60432 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60433 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60434 +else
60435 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60436 +endif
60437 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60438 +endif
60439 +
60440 include $(srctree)/arch/$(SRCARCH)/Makefile
60441
60442 ifneq ($(CONFIG_FRAME_WARN),0)
60443 @@ -708,7 +730,7 @@ export mod_strip_cmd
60444
60445
60446 ifeq ($(KBUILD_EXTMOD),)
60447 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60448 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60449
60450 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60451 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60452 @@ -907,6 +929,7 @@ define rule_vmlinux-modpost
60453 endef
60454
60455 # vmlinux image - including updated kernel symbols
60456 +vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60457 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
60458 ifdef CONFIG_HEADERS_CHECK
60459 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
60460 @@ -973,7 +996,7 @@ ifneq ($(KBUILD_SRC),)
60461 endif
60462
60463 # prepare2 creates a makefile if using a separate output directory
60464 -prepare2: prepare3 outputmakefile asm-generic
60465 +prepare2: prepare3 outputmakefile asm-generic gcc-plugins
60466
60467 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60468 include/config/auto.conf
60469 @@ -1087,6 +1110,7 @@ all: modules
60470 # using awk while concatenating to the final file.
60471
60472 PHONY += modules
60473 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60474 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
60475 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
60476 @$(kecho) ' Building modules, stage 2.';
60477 @@ -1198,7 +1222,7 @@ distclean: mrproper
60478 @find $(srctree) $(RCS_FIND_IGNORE) \
60479 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
60480 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
60481 - -o -name '.*.rej' -o -size 0 \
60482 + -o -name '.*.rej' -o -size 0 -o -name '*.so' \
60483 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
60484 -type f -print | xargs rm -f
60485
60486 @@ -1404,7 +1428,7 @@ clean: $(clean-dirs)
60487 $(call cmd,rmdirs)
60488 $(call cmd,rmfiles)
60489 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60490 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60491 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60492 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60493 -o -name '*.symtypes' -o -name 'modules.order' \
60494 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60495 diff -urNp linux-3.0.3/mm/filemap.c linux-3.0.3/mm/filemap.c
60496 --- linux-3.0.3/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
60497 +++ linux-3.0.3/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
60498 @@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
60499 struct address_space *mapping = file->f_mapping;
60500
60501 if (!mapping->a_ops->readpage)
60502 - return -ENOEXEC;
60503 + return -ENODEV;
60504 file_accessed(file);
60505 vma->vm_ops = &generic_file_vm_ops;
60506 vma->vm_flags |= VM_CAN_NONLINEAR;
60507 @@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
60508 *pos = i_size_read(inode);
60509
60510 if (limit != RLIM_INFINITY) {
60511 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60512 if (*pos >= limit) {
60513 send_sig(SIGXFSZ, current, 0);
60514 return -EFBIG;
60515 diff -urNp linux-3.0.3/mm/fremap.c linux-3.0.3/mm/fremap.c
60516 --- linux-3.0.3/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
60517 +++ linux-3.0.3/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
60518 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60519 retry:
60520 vma = find_vma(mm, start);
60521
60522 +#ifdef CONFIG_PAX_SEGMEXEC
60523 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60524 + goto out;
60525 +#endif
60526 +
60527 /*
60528 * Make sure the vma is shared, that it supports prefaulting,
60529 * and that the remapped range is valid and fully within
60530 diff -urNp linux-3.0.3/mm/highmem.c linux-3.0.3/mm/highmem.c
60531 --- linux-3.0.3/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
60532 +++ linux-3.0.3/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
60533 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60534 * So no dangers, even with speculative execution.
60535 */
60536 page = pte_page(pkmap_page_table[i]);
60537 + pax_open_kernel();
60538 pte_clear(&init_mm, (unsigned long)page_address(page),
60539 &pkmap_page_table[i]);
60540 -
60541 + pax_close_kernel();
60542 set_page_address(page, NULL);
60543 need_flush = 1;
60544 }
60545 @@ -186,9 +187,11 @@ start:
60546 }
60547 }
60548 vaddr = PKMAP_ADDR(last_pkmap_nr);
60549 +
60550 + pax_open_kernel();
60551 set_pte_at(&init_mm, vaddr,
60552 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60553 -
60554 + pax_close_kernel();
60555 pkmap_count[last_pkmap_nr] = 1;
60556 set_page_address(page, (void *)vaddr);
60557
60558 diff -urNp linux-3.0.3/mm/huge_memory.c linux-3.0.3/mm/huge_memory.c
60559 --- linux-3.0.3/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
60560 +++ linux-3.0.3/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
60561 @@ -702,7 +702,7 @@ out:
60562 * run pte_offset_map on the pmd, if an huge pmd could
60563 * materialize from under us from a different thread.
60564 */
60565 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60566 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60567 return VM_FAULT_OOM;
60568 /* if an huge pmd materialized from under us just retry later */
60569 if (unlikely(pmd_trans_huge(*pmd)))
60570 diff -urNp linux-3.0.3/mm/hugetlb.c linux-3.0.3/mm/hugetlb.c
60571 --- linux-3.0.3/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
60572 +++ linux-3.0.3/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
60573 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60574 return 1;
60575 }
60576
60577 +#ifdef CONFIG_PAX_SEGMEXEC
60578 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60579 +{
60580 + struct mm_struct *mm = vma->vm_mm;
60581 + struct vm_area_struct *vma_m;
60582 + unsigned long address_m;
60583 + pte_t *ptep_m;
60584 +
60585 + vma_m = pax_find_mirror_vma(vma);
60586 + if (!vma_m)
60587 + return;
60588 +
60589 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60590 + address_m = address + SEGMEXEC_TASK_SIZE;
60591 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60592 + get_page(page_m);
60593 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
60594 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60595 +}
60596 +#endif
60597 +
60598 /*
60599 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60600 */
60601 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
60602 make_huge_pte(vma, new_page, 1));
60603 page_remove_rmap(old_page);
60604 hugepage_add_new_anon_rmap(new_page, vma, address);
60605 +
60606 +#ifdef CONFIG_PAX_SEGMEXEC
60607 + pax_mirror_huge_pte(vma, address, new_page);
60608 +#endif
60609 +
60610 /* Make the old page be freed below */
60611 new_page = old_page;
60612 mmu_notifier_invalidate_range_end(mm,
60613 @@ -2591,6 +2617,10 @@ retry:
60614 && (vma->vm_flags & VM_SHARED)));
60615 set_huge_pte_at(mm, address, ptep, new_pte);
60616
60617 +#ifdef CONFIG_PAX_SEGMEXEC
60618 + pax_mirror_huge_pte(vma, address, page);
60619 +#endif
60620 +
60621 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60622 /* Optimization, do the COW without a second fault */
60623 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60624 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60625 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60626 struct hstate *h = hstate_vma(vma);
60627
60628 +#ifdef CONFIG_PAX_SEGMEXEC
60629 + struct vm_area_struct *vma_m;
60630 +#endif
60631 +
60632 ptep = huge_pte_offset(mm, address);
60633 if (ptep) {
60634 entry = huge_ptep_get(ptep);
60635 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60636 VM_FAULT_SET_HINDEX(h - hstates);
60637 }
60638
60639 +#ifdef CONFIG_PAX_SEGMEXEC
60640 + vma_m = pax_find_mirror_vma(vma);
60641 + if (vma_m) {
60642 + unsigned long address_m;
60643 +
60644 + if (vma->vm_start > vma_m->vm_start) {
60645 + address_m = address;
60646 + address -= SEGMEXEC_TASK_SIZE;
60647 + vma = vma_m;
60648 + h = hstate_vma(vma);
60649 + } else
60650 + address_m = address + SEGMEXEC_TASK_SIZE;
60651 +
60652 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
60653 + return VM_FAULT_OOM;
60654 + address_m &= HPAGE_MASK;
60655 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
60656 + }
60657 +#endif
60658 +
60659 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
60660 if (!ptep)
60661 return VM_FAULT_OOM;
60662 diff -urNp linux-3.0.3/mm/internal.h linux-3.0.3/mm/internal.h
60663 --- linux-3.0.3/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
60664 +++ linux-3.0.3/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
60665 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
60666 * in mm/page_alloc.c
60667 */
60668 extern void __free_pages_bootmem(struct page *page, unsigned int order);
60669 +extern void free_compound_page(struct page *page);
60670 extern void prep_compound_page(struct page *page, unsigned long order);
60671 #ifdef CONFIG_MEMORY_FAILURE
60672 extern bool is_free_buddy_page(struct page *page);
60673 diff -urNp linux-3.0.3/mm/Kconfig linux-3.0.3/mm/Kconfig
60674 --- linux-3.0.3/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
60675 +++ linux-3.0.3/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
60676 @@ -240,7 +240,7 @@ config KSM
60677 config DEFAULT_MMAP_MIN_ADDR
60678 int "Low address space to protect from user allocation"
60679 depends on MMU
60680 - default 4096
60681 + default 65536
60682 help
60683 This is the portion of low virtual memory which should be protected
60684 from userspace allocation. Keeping a user from writing to low pages
60685 diff -urNp linux-3.0.3/mm/kmemleak.c linux-3.0.3/mm/kmemleak.c
60686 --- linux-3.0.3/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
60687 +++ linux-3.0.3/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
60688 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
60689
60690 for (i = 0; i < object->trace_len; i++) {
60691 void *ptr = (void *)object->trace[i];
60692 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
60693 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
60694 }
60695 }
60696
60697 diff -urNp linux-3.0.3/mm/madvise.c linux-3.0.3/mm/madvise.c
60698 --- linux-3.0.3/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
60699 +++ linux-3.0.3/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
60700 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
60701 pgoff_t pgoff;
60702 unsigned long new_flags = vma->vm_flags;
60703
60704 +#ifdef CONFIG_PAX_SEGMEXEC
60705 + struct vm_area_struct *vma_m;
60706 +#endif
60707 +
60708 switch (behavior) {
60709 case MADV_NORMAL:
60710 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
60711 @@ -110,6 +114,13 @@ success:
60712 /*
60713 * vm_flags is protected by the mmap_sem held in write mode.
60714 */
60715 +
60716 +#ifdef CONFIG_PAX_SEGMEXEC
60717 + vma_m = pax_find_mirror_vma(vma);
60718 + if (vma_m)
60719 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
60720 +#endif
60721 +
60722 vma->vm_flags = new_flags;
60723
60724 out:
60725 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
60726 struct vm_area_struct ** prev,
60727 unsigned long start, unsigned long end)
60728 {
60729 +
60730 +#ifdef CONFIG_PAX_SEGMEXEC
60731 + struct vm_area_struct *vma_m;
60732 +#endif
60733 +
60734 *prev = vma;
60735 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
60736 return -EINVAL;
60737 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
60738 zap_page_range(vma, start, end - start, &details);
60739 } else
60740 zap_page_range(vma, start, end - start, NULL);
60741 +
60742 +#ifdef CONFIG_PAX_SEGMEXEC
60743 + vma_m = pax_find_mirror_vma(vma);
60744 + if (vma_m) {
60745 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
60746 + struct zap_details details = {
60747 + .nonlinear_vma = vma_m,
60748 + .last_index = ULONG_MAX,
60749 + };
60750 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
60751 + } else
60752 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
60753 + }
60754 +#endif
60755 +
60756 return 0;
60757 }
60758
60759 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
60760 if (end < start)
60761 goto out;
60762
60763 +#ifdef CONFIG_PAX_SEGMEXEC
60764 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60765 + if (end > SEGMEXEC_TASK_SIZE)
60766 + goto out;
60767 + } else
60768 +#endif
60769 +
60770 + if (end > TASK_SIZE)
60771 + goto out;
60772 +
60773 error = 0;
60774 if (end == start)
60775 goto out;
60776 diff -urNp linux-3.0.3/mm/memory.c linux-3.0.3/mm/memory.c
60777 --- linux-3.0.3/mm/memory.c 2011-08-23 21:44:40.000000000 -0400
60778 +++ linux-3.0.3/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
60779 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
60780 return;
60781
60782 pmd = pmd_offset(pud, start);
60783 +
60784 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
60785 pud_clear(pud);
60786 pmd_free_tlb(tlb, pmd, start);
60787 +#endif
60788 +
60789 }
60790
60791 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
60792 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct
60793 if (end - 1 > ceiling - 1)
60794 return;
60795
60796 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
60797 pud = pud_offset(pgd, start);
60798 pgd_clear(pgd);
60799 pud_free_tlb(tlb, pud, start);
60800 +#endif
60801 +
60802 }
60803
60804 /*
60805 @@ -1577,12 +1584,6 @@ no_page_table:
60806 return page;
60807 }
60808
60809 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
60810 -{
60811 - return stack_guard_page_start(vma, addr) ||
60812 - stack_guard_page_end(vma, addr+PAGE_SIZE);
60813 -}
60814 -
60815 /**
60816 * __get_user_pages() - pin user pages in memory
60817 * @tsk: task_struct of target task
60818 @@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
60819 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
60820 i = 0;
60821
60822 - do {
60823 + while (nr_pages) {
60824 struct vm_area_struct *vma;
60825
60826 - vma = find_extend_vma(mm, start);
60827 + vma = find_vma(mm, start);
60828 if (!vma && in_gate_area(mm, start)) {
60829 unsigned long pg = start & PAGE_MASK;
60830 pgd_t *pgd;
60831 @@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
60832 goto next_page;
60833 }
60834
60835 - if (!vma ||
60836 + if (!vma || start < vma->vm_start ||
60837 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
60838 !(vm_flags & vma->vm_flags))
60839 return i ? : -EFAULT;
60840 @@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
60841 int ret;
60842 unsigned int fault_flags = 0;
60843
60844 - /* For mlock, just skip the stack guard page. */
60845 - if (foll_flags & FOLL_MLOCK) {
60846 - if (stack_guard_page(vma, start))
60847 - goto next_page;
60848 - }
60849 if (foll_flags & FOLL_WRITE)
60850 fault_flags |= FAULT_FLAG_WRITE;
60851 if (nonblocking)
60852 @@ -1811,7 +1807,7 @@ next_page:
60853 start += PAGE_SIZE;
60854 nr_pages--;
60855 } while (nr_pages && start < vma->vm_end);
60856 - } while (nr_pages);
60857 + }
60858 return i;
60859 }
60860 EXPORT_SYMBOL(__get_user_pages);
60861 @@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
60862 page_add_file_rmap(page);
60863 set_pte_at(mm, addr, pte, mk_pte(page, prot));
60864
60865 +#ifdef CONFIG_PAX_SEGMEXEC
60866 + pax_mirror_file_pte(vma, addr, page, ptl);
60867 +#endif
60868 +
60869 retval = 0;
60870 pte_unmap_unlock(pte, ptl);
60871 return retval;
60872 @@ -2052,10 +2052,22 @@ out:
60873 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
60874 struct page *page)
60875 {
60876 +
60877 +#ifdef CONFIG_PAX_SEGMEXEC
60878 + struct vm_area_struct *vma_m;
60879 +#endif
60880 +
60881 if (addr < vma->vm_start || addr >= vma->vm_end)
60882 return -EFAULT;
60883 if (!page_count(page))
60884 return -EINVAL;
60885 +
60886 +#ifdef CONFIG_PAX_SEGMEXEC
60887 + vma_m = pax_find_mirror_vma(vma);
60888 + if (vma_m)
60889 + vma_m->vm_flags |= VM_INSERTPAGE;
60890 +#endif
60891 +
60892 vma->vm_flags |= VM_INSERTPAGE;
60893 return insert_page(vma, addr, page, vma->vm_page_prot);
60894 }
60895 @@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
60896 unsigned long pfn)
60897 {
60898 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
60899 + BUG_ON(vma->vm_mirror);
60900
60901 if (addr < vma->vm_start || addr >= vma->vm_end)
60902 return -EFAULT;
60903 @@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
60904 copy_user_highpage(dst, src, va, vma);
60905 }
60906
60907 +#ifdef CONFIG_PAX_SEGMEXEC
60908 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
60909 +{
60910 + struct mm_struct *mm = vma->vm_mm;
60911 + spinlock_t *ptl;
60912 + pte_t *pte, entry;
60913 +
60914 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
60915 + entry = *pte;
60916 + if (!pte_present(entry)) {
60917 + if (!pte_none(entry)) {
60918 + BUG_ON(pte_file(entry));
60919 + free_swap_and_cache(pte_to_swp_entry(entry));
60920 + pte_clear_not_present_full(mm, address, pte, 0);
60921 + }
60922 + } else {
60923 + struct page *page;
60924 +
60925 + flush_cache_page(vma, address, pte_pfn(entry));
60926 + entry = ptep_clear_flush(vma, address, pte);
60927 + BUG_ON(pte_dirty(entry));
60928 + page = vm_normal_page(vma, address, entry);
60929 + if (page) {
60930 + update_hiwater_rss(mm);
60931 + if (PageAnon(page))
60932 + dec_mm_counter_fast(mm, MM_ANONPAGES);
60933 + else
60934 + dec_mm_counter_fast(mm, MM_FILEPAGES);
60935 + page_remove_rmap(page);
60936 + page_cache_release(page);
60937 + }
60938 + }
60939 + pte_unmap_unlock(pte, ptl);
60940 +}
60941 +
60942 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
60943 + *
60944 + * the ptl of the lower mapped page is held on entry and is not released on exit
60945 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
60946 + */
60947 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
60948 +{
60949 + struct mm_struct *mm = vma->vm_mm;
60950 + unsigned long address_m;
60951 + spinlock_t *ptl_m;
60952 + struct vm_area_struct *vma_m;
60953 + pmd_t *pmd_m;
60954 + pte_t *pte_m, entry_m;
60955 +
60956 + BUG_ON(!page_m || !PageAnon(page_m));
60957 +
60958 + vma_m = pax_find_mirror_vma(vma);
60959 + if (!vma_m)
60960 + return;
60961 +
60962 + BUG_ON(!PageLocked(page_m));
60963 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60964 + address_m = address + SEGMEXEC_TASK_SIZE;
60965 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
60966 + pte_m = pte_offset_map(pmd_m, address_m);
60967 + ptl_m = pte_lockptr(mm, pmd_m);
60968 + if (ptl != ptl_m) {
60969 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
60970 + if (!pte_none(*pte_m))
60971 + goto out;
60972 + }
60973 +
60974 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
60975 + page_cache_get(page_m);
60976 + page_add_anon_rmap(page_m, vma_m, address_m);
60977 + inc_mm_counter_fast(mm, MM_ANONPAGES);
60978 + set_pte_at(mm, address_m, pte_m, entry_m);
60979 + update_mmu_cache(vma_m, address_m, entry_m);
60980 +out:
60981 + if (ptl != ptl_m)
60982 + spin_unlock(ptl_m);
60983 + pte_unmap(pte_m);
60984 + unlock_page(page_m);
60985 +}
60986 +
60987 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
60988 +{
60989 + struct mm_struct *mm = vma->vm_mm;
60990 + unsigned long address_m;
60991 + spinlock_t *ptl_m;
60992 + struct vm_area_struct *vma_m;
60993 + pmd_t *pmd_m;
60994 + pte_t *pte_m, entry_m;
60995 +
60996 + BUG_ON(!page_m || PageAnon(page_m));
60997 +
60998 + vma_m = pax_find_mirror_vma(vma);
60999 + if (!vma_m)
61000 + return;
61001 +
61002 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61003 + address_m = address + SEGMEXEC_TASK_SIZE;
61004 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61005 + pte_m = pte_offset_map(pmd_m, address_m);
61006 + ptl_m = pte_lockptr(mm, pmd_m);
61007 + if (ptl != ptl_m) {
61008 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61009 + if (!pte_none(*pte_m))
61010 + goto out;
61011 + }
61012 +
61013 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61014 + page_cache_get(page_m);
61015 + page_add_file_rmap(page_m);
61016 + inc_mm_counter_fast(mm, MM_FILEPAGES);
61017 + set_pte_at(mm, address_m, pte_m, entry_m);
61018 + update_mmu_cache(vma_m, address_m, entry_m);
61019 +out:
61020 + if (ptl != ptl_m)
61021 + spin_unlock(ptl_m);
61022 + pte_unmap(pte_m);
61023 +}
61024 +
61025 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61026 +{
61027 + struct mm_struct *mm = vma->vm_mm;
61028 + unsigned long address_m;
61029 + spinlock_t *ptl_m;
61030 + struct vm_area_struct *vma_m;
61031 + pmd_t *pmd_m;
61032 + pte_t *pte_m, entry_m;
61033 +
61034 + vma_m = pax_find_mirror_vma(vma);
61035 + if (!vma_m)
61036 + return;
61037 +
61038 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61039 + address_m = address + SEGMEXEC_TASK_SIZE;
61040 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61041 + pte_m = pte_offset_map(pmd_m, address_m);
61042 + ptl_m = pte_lockptr(mm, pmd_m);
61043 + if (ptl != ptl_m) {
61044 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61045 + if (!pte_none(*pte_m))
61046 + goto out;
61047 + }
61048 +
61049 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61050 + set_pte_at(mm, address_m, pte_m, entry_m);
61051 +out:
61052 + if (ptl != ptl_m)
61053 + spin_unlock(ptl_m);
61054 + pte_unmap(pte_m);
61055 +}
61056 +
61057 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61058 +{
61059 + struct page *page_m;
61060 + pte_t entry;
61061 +
61062 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61063 + goto out;
61064 +
61065 + entry = *pte;
61066 + page_m = vm_normal_page(vma, address, entry);
61067 + if (!page_m)
61068 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61069 + else if (PageAnon(page_m)) {
61070 + if (pax_find_mirror_vma(vma)) {
61071 + pte_unmap_unlock(pte, ptl);
61072 + lock_page(page_m);
61073 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61074 + if (pte_same(entry, *pte))
61075 + pax_mirror_anon_pte(vma, address, page_m, ptl);
61076 + else
61077 + unlock_page(page_m);
61078 + }
61079 + } else
61080 + pax_mirror_file_pte(vma, address, page_m, ptl);
61081 +
61082 +out:
61083 + pte_unmap_unlock(pte, ptl);
61084 +}
61085 +#endif
61086 +
61087 /*
61088 * This routine handles present pages, when users try to write
61089 * to a shared page. It is done by copying the page to a new address
61090 @@ -2667,6 +2860,12 @@ gotten:
61091 */
61092 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61093 if (likely(pte_same(*page_table, orig_pte))) {
61094 +
61095 +#ifdef CONFIG_PAX_SEGMEXEC
61096 + if (pax_find_mirror_vma(vma))
61097 + BUG_ON(!trylock_page(new_page));
61098 +#endif
61099 +
61100 if (old_page) {
61101 if (!PageAnon(old_page)) {
61102 dec_mm_counter_fast(mm, MM_FILEPAGES);
61103 @@ -2718,6 +2917,10 @@ gotten:
61104 page_remove_rmap(old_page);
61105 }
61106
61107 +#ifdef CONFIG_PAX_SEGMEXEC
61108 + pax_mirror_anon_pte(vma, address, new_page, ptl);
61109 +#endif
61110 +
61111 /* Free the old page.. */
61112 new_page = old_page;
61113 ret |= VM_FAULT_WRITE;
61114 @@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
61115 swap_free(entry);
61116 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61117 try_to_free_swap(page);
61118 +
61119 +#ifdef CONFIG_PAX_SEGMEXEC
61120 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61121 +#endif
61122 +
61123 unlock_page(page);
61124 if (swapcache) {
61125 /*
61126 @@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
61127
61128 /* No need to invalidate - it was non-present before */
61129 update_mmu_cache(vma, address, page_table);
61130 +
61131 +#ifdef CONFIG_PAX_SEGMEXEC
61132 + pax_mirror_anon_pte(vma, address, page, ptl);
61133 +#endif
61134 +
61135 unlock:
61136 pte_unmap_unlock(page_table, ptl);
61137 out:
61138 @@ -3039,40 +3252,6 @@ out_release:
61139 }
61140
61141 /*
61142 - * This is like a special single-page "expand_{down|up}wards()",
61143 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
61144 - * doesn't hit another vma.
61145 - */
61146 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61147 -{
61148 - address &= PAGE_MASK;
61149 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61150 - struct vm_area_struct *prev = vma->vm_prev;
61151 -
61152 - /*
61153 - * Is there a mapping abutting this one below?
61154 - *
61155 - * That's only ok if it's the same stack mapping
61156 - * that has gotten split..
61157 - */
61158 - if (prev && prev->vm_end == address)
61159 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61160 -
61161 - expand_downwards(vma, address - PAGE_SIZE);
61162 - }
61163 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61164 - struct vm_area_struct *next = vma->vm_next;
61165 -
61166 - /* As VM_GROWSDOWN but s/below/above/ */
61167 - if (next && next->vm_start == address + PAGE_SIZE)
61168 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61169 -
61170 - expand_upwards(vma, address + PAGE_SIZE);
61171 - }
61172 - return 0;
61173 -}
61174 -
61175 -/*
61176 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61177 * but allow concurrent faults), and pte mapped but not yet locked.
61178 * We return with mmap_sem still held, but pte unmapped and unlocked.
61179 @@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
61180 unsigned long address, pte_t *page_table, pmd_t *pmd,
61181 unsigned int flags)
61182 {
61183 - struct page *page;
61184 + struct page *page = NULL;
61185 spinlock_t *ptl;
61186 pte_t entry;
61187
61188 - pte_unmap(page_table);
61189 -
61190 - /* Check if we need to add a guard page to the stack */
61191 - if (check_stack_guard_page(vma, address) < 0)
61192 - return VM_FAULT_SIGBUS;
61193 -
61194 - /* Use the zero-page for reads */
61195 if (!(flags & FAULT_FLAG_WRITE)) {
61196 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61197 vma->vm_page_prot));
61198 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61199 + ptl = pte_lockptr(mm, pmd);
61200 + spin_lock(ptl);
61201 if (!pte_none(*page_table))
61202 goto unlock;
61203 goto setpte;
61204 }
61205
61206 /* Allocate our own private page. */
61207 + pte_unmap(page_table);
61208 +
61209 if (unlikely(anon_vma_prepare(vma)))
61210 goto oom;
61211 page = alloc_zeroed_user_highpage_movable(vma, address);
61212 @@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
61213 if (!pte_none(*page_table))
61214 goto release;
61215
61216 +#ifdef CONFIG_PAX_SEGMEXEC
61217 + if (pax_find_mirror_vma(vma))
61218 + BUG_ON(!trylock_page(page));
61219 +#endif
61220 +
61221 inc_mm_counter_fast(mm, MM_ANONPAGES);
61222 page_add_new_anon_rmap(page, vma, address);
61223 setpte:
61224 @@ -3127,6 +3307,12 @@ setpte:
61225
61226 /* No need to invalidate - it was non-present before */
61227 update_mmu_cache(vma, address, page_table);
61228 +
61229 +#ifdef CONFIG_PAX_SEGMEXEC
61230 + if (page)
61231 + pax_mirror_anon_pte(vma, address, page, ptl);
61232 +#endif
61233 +
61234 unlock:
61235 pte_unmap_unlock(page_table, ptl);
61236 return 0;
61237 @@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
61238 */
61239 /* Only go through if we didn't race with anybody else... */
61240 if (likely(pte_same(*page_table, orig_pte))) {
61241 +
61242 +#ifdef CONFIG_PAX_SEGMEXEC
61243 + if (anon && pax_find_mirror_vma(vma))
61244 + BUG_ON(!trylock_page(page));
61245 +#endif
61246 +
61247 flush_icache_page(vma, page);
61248 entry = mk_pte(page, vma->vm_page_prot);
61249 if (flags & FAULT_FLAG_WRITE)
61250 @@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
61251
61252 /* no need to invalidate: a not-present page won't be cached */
61253 update_mmu_cache(vma, address, page_table);
61254 +
61255 +#ifdef CONFIG_PAX_SEGMEXEC
61256 + if (anon)
61257 + pax_mirror_anon_pte(vma, address, page, ptl);
61258 + else
61259 + pax_mirror_file_pte(vma, address, page, ptl);
61260 +#endif
61261 +
61262 } else {
61263 if (charged)
61264 mem_cgroup_uncharge_page(page);
61265 @@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
61266 if (flags & FAULT_FLAG_WRITE)
61267 flush_tlb_fix_spurious_fault(vma, address);
61268 }
61269 +
61270 +#ifdef CONFIG_PAX_SEGMEXEC
61271 + pax_mirror_pte(vma, address, pte, pmd, ptl);
61272 + return 0;
61273 +#endif
61274 +
61275 unlock:
61276 pte_unmap_unlock(pte, ptl);
61277 return 0;
61278 @@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
61279 pmd_t *pmd;
61280 pte_t *pte;
61281
61282 +#ifdef CONFIG_PAX_SEGMEXEC
61283 + struct vm_area_struct *vma_m;
61284 +#endif
61285 +
61286 __set_current_state(TASK_RUNNING);
61287
61288 count_vm_event(PGFAULT);
61289 @@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
61290 if (unlikely(is_vm_hugetlb_page(vma)))
61291 return hugetlb_fault(mm, vma, address, flags);
61292
61293 +#ifdef CONFIG_PAX_SEGMEXEC
61294 + vma_m = pax_find_mirror_vma(vma);
61295 + if (vma_m) {
61296 + unsigned long address_m;
61297 + pgd_t *pgd_m;
61298 + pud_t *pud_m;
61299 + pmd_t *pmd_m;
61300 +
61301 + if (vma->vm_start > vma_m->vm_start) {
61302 + address_m = address;
61303 + address -= SEGMEXEC_TASK_SIZE;
61304 + vma = vma_m;
61305 + } else
61306 + address_m = address + SEGMEXEC_TASK_SIZE;
61307 +
61308 + pgd_m = pgd_offset(mm, address_m);
61309 + pud_m = pud_alloc(mm, pgd_m, address_m);
61310 + if (!pud_m)
61311 + return VM_FAULT_OOM;
61312 + pmd_m = pmd_alloc(mm, pud_m, address_m);
61313 + if (!pmd_m)
61314 + return VM_FAULT_OOM;
61315 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61316 + return VM_FAULT_OOM;
61317 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61318 + }
61319 +#endif
61320 +
61321 pgd = pgd_offset(mm, address);
61322 pud = pud_alloc(mm, pgd, address);
61323 if (!pud)
61324 @@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
61325 * run pte_offset_map on the pmd, if an huge pmd could
61326 * materialize from under us from a different thread.
61327 */
61328 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61329 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61330 return VM_FAULT_OOM;
61331 /* if an huge pmd materialized from under us just retry later */
61332 if (unlikely(pmd_trans_huge(*pmd)))
61333 @@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
61334 gate_vma.vm_start = FIXADDR_USER_START;
61335 gate_vma.vm_end = FIXADDR_USER_END;
61336 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61337 - gate_vma.vm_page_prot = __P101;
61338 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61339 /*
61340 * Make sure the vDSO gets into every core dump.
61341 * Dumping its contents makes post-mortem fully interpretable later
61342 diff -urNp linux-3.0.3/mm/memory-failure.c linux-3.0.3/mm/memory-failure.c
61343 --- linux-3.0.3/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
61344 +++ linux-3.0.3/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
61345 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61346
61347 int sysctl_memory_failure_recovery __read_mostly = 1;
61348
61349 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61350 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61351
61352 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61353
61354 @@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
61355 }
61356
61357 nr_pages = 1 << compound_trans_order(hpage);
61358 - atomic_long_add(nr_pages, &mce_bad_pages);
61359 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61360
61361 /*
61362 * We need/can do nothing about count=0 pages.
61363 @@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
61364 if (!PageHWPoison(hpage)
61365 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61366 || (p != hpage && TestSetPageHWPoison(hpage))) {
61367 - atomic_long_sub(nr_pages, &mce_bad_pages);
61368 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61369 return 0;
61370 }
61371 set_page_hwpoison_huge_page(hpage);
61372 @@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
61373 }
61374 if (hwpoison_filter(p)) {
61375 if (TestClearPageHWPoison(p))
61376 - atomic_long_sub(nr_pages, &mce_bad_pages);
61377 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61378 unlock_page(hpage);
61379 put_page(hpage);
61380 return 0;
61381 @@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
61382 return 0;
61383 }
61384 if (TestClearPageHWPoison(p))
61385 - atomic_long_sub(nr_pages, &mce_bad_pages);
61386 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61387 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61388 return 0;
61389 }
61390 @@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
61391 */
61392 if (TestClearPageHWPoison(page)) {
61393 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61394 - atomic_long_sub(nr_pages, &mce_bad_pages);
61395 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61396 freeit = 1;
61397 if (PageHuge(page))
61398 clear_page_hwpoison_huge_page(page);
61399 @@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
61400 }
61401 done:
61402 if (!PageHWPoison(hpage))
61403 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61404 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61405 set_page_hwpoison_huge_page(hpage);
61406 dequeue_hwpoisoned_huge_page(hpage);
61407 /* keep elevated page count for bad page */
61408 @@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
61409 return ret;
61410
61411 done:
61412 - atomic_long_add(1, &mce_bad_pages);
61413 + atomic_long_add_unchecked(1, &mce_bad_pages);
61414 SetPageHWPoison(page);
61415 /* keep elevated page count for bad page */
61416 return ret;
61417 diff -urNp linux-3.0.3/mm/mempolicy.c linux-3.0.3/mm/mempolicy.c
61418 --- linux-3.0.3/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
61419 +++ linux-3.0.3/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
61420 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
61421 unsigned long vmstart;
61422 unsigned long vmend;
61423
61424 +#ifdef CONFIG_PAX_SEGMEXEC
61425 + struct vm_area_struct *vma_m;
61426 +#endif
61427 +
61428 vma = find_vma_prev(mm, start, &prev);
61429 if (!vma || vma->vm_start > start)
61430 return -EFAULT;
61431 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
61432 err = policy_vma(vma, new_pol);
61433 if (err)
61434 goto out;
61435 +
61436 +#ifdef CONFIG_PAX_SEGMEXEC
61437 + vma_m = pax_find_mirror_vma(vma);
61438 + if (vma_m) {
61439 + err = policy_vma(vma_m, new_pol);
61440 + if (err)
61441 + goto out;
61442 + }
61443 +#endif
61444 +
61445 }
61446
61447 out:
61448 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
61449
61450 if (end < start)
61451 return -EINVAL;
61452 +
61453 +#ifdef CONFIG_PAX_SEGMEXEC
61454 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61455 + if (end > SEGMEXEC_TASK_SIZE)
61456 + return -EINVAL;
61457 + } else
61458 +#endif
61459 +
61460 + if (end > TASK_SIZE)
61461 + return -EINVAL;
61462 +
61463 if (end == start)
61464 return 0;
61465
61466 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61467 if (!mm)
61468 goto out;
61469
61470 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61471 + if (mm != current->mm &&
61472 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61473 + err = -EPERM;
61474 + goto out;
61475 + }
61476 +#endif
61477 +
61478 /*
61479 * Check if this process has the right to modify the specified
61480 * process. The right exists if the process has administrative
61481 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61482 rcu_read_lock();
61483 tcred = __task_cred(task);
61484 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61485 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61486 - !capable(CAP_SYS_NICE)) {
61487 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61488 rcu_read_unlock();
61489 err = -EPERM;
61490 goto out;
61491 diff -urNp linux-3.0.3/mm/migrate.c linux-3.0.3/mm/migrate.c
61492 --- linux-3.0.3/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
61493 +++ linux-3.0.3/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
61494 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
61495 unsigned long chunk_start;
61496 int err;
61497
61498 + pax_track_stack();
61499 +
61500 task_nodes = cpuset_mems_allowed(task);
61501
61502 err = -ENOMEM;
61503 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61504 if (!mm)
61505 return -EINVAL;
61506
61507 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61508 + if (mm != current->mm &&
61509 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61510 + err = -EPERM;
61511 + goto out;
61512 + }
61513 +#endif
61514 +
61515 /*
61516 * Check if this process has the right to modify the specified
61517 * process. The right exists if the process has administrative
61518 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61519 rcu_read_lock();
61520 tcred = __task_cred(task);
61521 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61522 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61523 - !capable(CAP_SYS_NICE)) {
61524 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61525 rcu_read_unlock();
61526 err = -EPERM;
61527 goto out;
61528 diff -urNp linux-3.0.3/mm/mlock.c linux-3.0.3/mm/mlock.c
61529 --- linux-3.0.3/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
61530 +++ linux-3.0.3/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
61531 @@ -13,6 +13,7 @@
61532 #include <linux/pagemap.h>
61533 #include <linux/mempolicy.h>
61534 #include <linux/syscalls.h>
61535 +#include <linux/security.h>
61536 #include <linux/sched.h>
61537 #include <linux/module.h>
61538 #include <linux/rmap.h>
61539 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61540 return -EINVAL;
61541 if (end == start)
61542 return 0;
61543 + if (end > TASK_SIZE)
61544 + return -EINVAL;
61545 +
61546 vma = find_vma_prev(current->mm, start, &prev);
61547 if (!vma || vma->vm_start > start)
61548 return -ENOMEM;
61549 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61550 for (nstart = start ; ; ) {
61551 vm_flags_t newflags;
61552
61553 +#ifdef CONFIG_PAX_SEGMEXEC
61554 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61555 + break;
61556 +#endif
61557 +
61558 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61559
61560 newflags = vma->vm_flags | VM_LOCKED;
61561 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61562 lock_limit >>= PAGE_SHIFT;
61563
61564 /* check against resource limits */
61565 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61566 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61567 error = do_mlock(start, len, 1);
61568 up_write(&current->mm->mmap_sem);
61569 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61570 static int do_mlockall(int flags)
61571 {
61572 struct vm_area_struct * vma, * prev = NULL;
61573 - unsigned int def_flags = 0;
61574
61575 if (flags & MCL_FUTURE)
61576 - def_flags = VM_LOCKED;
61577 - current->mm->def_flags = def_flags;
61578 + current->mm->def_flags |= VM_LOCKED;
61579 + else
61580 + current->mm->def_flags &= ~VM_LOCKED;
61581 if (flags == MCL_FUTURE)
61582 goto out;
61583
61584 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61585 vm_flags_t newflags;
61586
61587 +#ifdef CONFIG_PAX_SEGMEXEC
61588 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61589 + break;
61590 +#endif
61591 +
61592 + BUG_ON(vma->vm_end > TASK_SIZE);
61593 newflags = vma->vm_flags | VM_LOCKED;
61594 if (!(flags & MCL_CURRENT))
61595 newflags &= ~VM_LOCKED;
61596 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61597 lock_limit >>= PAGE_SHIFT;
61598
61599 ret = -ENOMEM;
61600 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
61601 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
61602 capable(CAP_IPC_LOCK))
61603 ret = do_mlockall(flags);
61604 diff -urNp linux-3.0.3/mm/mmap.c linux-3.0.3/mm/mmap.c
61605 --- linux-3.0.3/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
61606 +++ linux-3.0.3/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
61607 @@ -46,6 +46,16 @@
61608 #define arch_rebalance_pgtables(addr, len) (addr)
61609 #endif
61610
61611 +static inline void verify_mm_writelocked(struct mm_struct *mm)
61612 +{
61613 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
61614 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
61615 + up_read(&mm->mmap_sem);
61616 + BUG();
61617 + }
61618 +#endif
61619 +}
61620 +
61621 static void unmap_region(struct mm_struct *mm,
61622 struct vm_area_struct *vma, struct vm_area_struct *prev,
61623 unsigned long start, unsigned long end);
61624 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
61625 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
61626 *
61627 */
61628 -pgprot_t protection_map[16] = {
61629 +pgprot_t protection_map[16] __read_only = {
61630 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
61631 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
61632 };
61633
61634 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
61635 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61636 {
61637 - return __pgprot(pgprot_val(protection_map[vm_flags &
61638 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
61639 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
61640 pgprot_val(arch_vm_get_page_prot(vm_flags)));
61641 +
61642 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61643 + if (!(__supported_pte_mask & _PAGE_NX) &&
61644 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
61645 + (vm_flags & (VM_READ | VM_WRITE)))
61646 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
61647 +#endif
61648 +
61649 + return prot;
61650 }
61651 EXPORT_SYMBOL(vm_get_page_prot);
61652
61653 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
61654 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
61655 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
61656 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
61657 /*
61658 * Make sure vm_committed_as in one cacheline and not cacheline shared with
61659 * other variables. It can be updated by several CPUs frequently.
61660 @@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
61661 struct vm_area_struct *next = vma->vm_next;
61662
61663 might_sleep();
61664 + BUG_ON(vma->vm_mirror);
61665 if (vma->vm_ops && vma->vm_ops->close)
61666 vma->vm_ops->close(vma);
61667 if (vma->vm_file) {
61668 @@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
61669 * not page aligned -Ram Gupta
61670 */
61671 rlim = rlimit(RLIMIT_DATA);
61672 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
61673 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
61674 (mm->end_data - mm->start_data) > rlim)
61675 goto out;
61676 @@ -697,6 +719,12 @@ static int
61677 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
61678 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61679 {
61680 +
61681 +#ifdef CONFIG_PAX_SEGMEXEC
61682 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
61683 + return 0;
61684 +#endif
61685 +
61686 if (is_mergeable_vma(vma, file, vm_flags) &&
61687 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61688 if (vma->vm_pgoff == vm_pgoff)
61689 @@ -716,6 +744,12 @@ static int
61690 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
61691 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61692 {
61693 +
61694 +#ifdef CONFIG_PAX_SEGMEXEC
61695 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
61696 + return 0;
61697 +#endif
61698 +
61699 if (is_mergeable_vma(vma, file, vm_flags) &&
61700 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
61701 pgoff_t vm_pglen;
61702 @@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
61703 struct vm_area_struct *vma_merge(struct mm_struct *mm,
61704 struct vm_area_struct *prev, unsigned long addr,
61705 unsigned long end, unsigned long vm_flags,
61706 - struct anon_vma *anon_vma, struct file *file,
61707 + struct anon_vma *anon_vma, struct file *file,
61708 pgoff_t pgoff, struct mempolicy *policy)
61709 {
61710 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
61711 struct vm_area_struct *area, *next;
61712 int err;
61713
61714 +#ifdef CONFIG_PAX_SEGMEXEC
61715 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
61716 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
61717 +
61718 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
61719 +#endif
61720 +
61721 /*
61722 * We later require that vma->vm_flags == vm_flags,
61723 * so this tests vma->vm_flags & VM_SPECIAL, too.
61724 @@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
61725 if (next && next->vm_end == end) /* cases 6, 7, 8 */
61726 next = next->vm_next;
61727
61728 +#ifdef CONFIG_PAX_SEGMEXEC
61729 + if (prev)
61730 + prev_m = pax_find_mirror_vma(prev);
61731 + if (area)
61732 + area_m = pax_find_mirror_vma(area);
61733 + if (next)
61734 + next_m = pax_find_mirror_vma(next);
61735 +#endif
61736 +
61737 /*
61738 * Can it merge with the predecessor?
61739 */
61740 @@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
61741 /* cases 1, 6 */
61742 err = vma_adjust(prev, prev->vm_start,
61743 next->vm_end, prev->vm_pgoff, NULL);
61744 - } else /* cases 2, 5, 7 */
61745 +
61746 +#ifdef CONFIG_PAX_SEGMEXEC
61747 + if (!err && prev_m)
61748 + err = vma_adjust(prev_m, prev_m->vm_start,
61749 + next_m->vm_end, prev_m->vm_pgoff, NULL);
61750 +#endif
61751 +
61752 + } else { /* cases 2, 5, 7 */
61753 err = vma_adjust(prev, prev->vm_start,
61754 end, prev->vm_pgoff, NULL);
61755 +
61756 +#ifdef CONFIG_PAX_SEGMEXEC
61757 + if (!err && prev_m)
61758 + err = vma_adjust(prev_m, prev_m->vm_start,
61759 + end_m, prev_m->vm_pgoff, NULL);
61760 +#endif
61761 +
61762 + }
61763 if (err)
61764 return NULL;
61765 khugepaged_enter_vma_merge(prev);
61766 @@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
61767 mpol_equal(policy, vma_policy(next)) &&
61768 can_vma_merge_before(next, vm_flags,
61769 anon_vma, file, pgoff+pglen)) {
61770 - if (prev && addr < prev->vm_end) /* case 4 */
61771 + if (prev && addr < prev->vm_end) { /* case 4 */
61772 err = vma_adjust(prev, prev->vm_start,
61773 addr, prev->vm_pgoff, NULL);
61774 - else /* cases 3, 8 */
61775 +
61776 +#ifdef CONFIG_PAX_SEGMEXEC
61777 + if (!err && prev_m)
61778 + err = vma_adjust(prev_m, prev_m->vm_start,
61779 + addr_m, prev_m->vm_pgoff, NULL);
61780 +#endif
61781 +
61782 + } else { /* cases 3, 8 */
61783 err = vma_adjust(area, addr, next->vm_end,
61784 next->vm_pgoff - pglen, NULL);
61785 +
61786 +#ifdef CONFIG_PAX_SEGMEXEC
61787 + if (!err && area_m)
61788 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
61789 + next_m->vm_pgoff - pglen, NULL);
61790 +#endif
61791 +
61792 + }
61793 if (err)
61794 return NULL;
61795 khugepaged_enter_vma_merge(area);
61796 @@ -929,14 +1009,11 @@ none:
61797 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
61798 struct file *file, long pages)
61799 {
61800 - const unsigned long stack_flags
61801 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
61802 -
61803 if (file) {
61804 mm->shared_vm += pages;
61805 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
61806 mm->exec_vm += pages;
61807 - } else if (flags & stack_flags)
61808 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
61809 mm->stack_vm += pages;
61810 if (flags & (VM_RESERVED|VM_IO))
61811 mm->reserved_vm += pages;
61812 @@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
61813 * (the exception is when the underlying filesystem is noexec
61814 * mounted, in which case we dont add PROT_EXEC.)
61815 */
61816 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
61817 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
61818 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
61819 prot |= PROT_EXEC;
61820
61821 @@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
61822 /* Obtain the address to map to. we verify (or select) it and ensure
61823 * that it represents a valid section of the address space.
61824 */
61825 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
61826 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
61827 if (addr & ~PAGE_MASK)
61828 return addr;
61829
61830 @@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
61831 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
61832 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
61833
61834 +#ifdef CONFIG_PAX_MPROTECT
61835 + if (mm->pax_flags & MF_PAX_MPROTECT) {
61836 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
61837 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
61838 + gr_log_rwxmmap(file);
61839 +
61840 +#ifdef CONFIG_PAX_EMUPLT
61841 + vm_flags &= ~VM_EXEC;
61842 +#else
61843 + return -EPERM;
61844 +#endif
61845 +
61846 + }
61847 +
61848 + if (!(vm_flags & VM_EXEC))
61849 + vm_flags &= ~VM_MAYEXEC;
61850 +#else
61851 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
61852 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61853 +#endif
61854 + else
61855 + vm_flags &= ~VM_MAYWRITE;
61856 + }
61857 +#endif
61858 +
61859 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61860 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
61861 + vm_flags &= ~VM_PAGEEXEC;
61862 +#endif
61863 +
61864 if (flags & MAP_LOCKED)
61865 if (!can_do_mlock())
61866 return -EPERM;
61867 @@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
61868 locked += mm->locked_vm;
61869 lock_limit = rlimit(RLIMIT_MEMLOCK);
61870 lock_limit >>= PAGE_SHIFT;
61871 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
61872 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
61873 return -EAGAIN;
61874 }
61875 @@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
61876 if (error)
61877 return error;
61878
61879 + if (!gr_acl_handle_mmap(file, prot))
61880 + return -EACCES;
61881 +
61882 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
61883 }
61884 EXPORT_SYMBOL(do_mmap_pgoff);
61885 @@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
61886 vm_flags_t vm_flags = vma->vm_flags;
61887
61888 /* If it was private or non-writable, the write bit is already clear */
61889 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
61890 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
61891 return 0;
61892
61893 /* The backer wishes to know when pages are first written to? */
61894 @@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
61895 unsigned long charged = 0;
61896 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
61897
61898 +#ifdef CONFIG_PAX_SEGMEXEC
61899 + struct vm_area_struct *vma_m = NULL;
61900 +#endif
61901 +
61902 + /*
61903 + * mm->mmap_sem is required to protect against another thread
61904 + * changing the mappings in case we sleep.
61905 + */
61906 + verify_mm_writelocked(mm);
61907 +
61908 /* Clear old maps */
61909 error = -ENOMEM;
61910 -munmap_back:
61911 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
61912 if (vma && vma->vm_start < addr + len) {
61913 if (do_munmap(mm, addr, len))
61914 return -ENOMEM;
61915 - goto munmap_back;
61916 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
61917 + BUG_ON(vma && vma->vm_start < addr + len);
61918 }
61919
61920 /* Check against address space limit. */
61921 @@ -1266,6 +1387,16 @@ munmap_back:
61922 goto unacct_error;
61923 }
61924
61925 +#ifdef CONFIG_PAX_SEGMEXEC
61926 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
61927 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
61928 + if (!vma_m) {
61929 + error = -ENOMEM;
61930 + goto free_vma;
61931 + }
61932 + }
61933 +#endif
61934 +
61935 vma->vm_mm = mm;
61936 vma->vm_start = addr;
61937 vma->vm_end = addr + len;
61938 @@ -1289,6 +1420,19 @@ munmap_back:
61939 error = file->f_op->mmap(file, vma);
61940 if (error)
61941 goto unmap_and_free_vma;
61942 +
61943 +#ifdef CONFIG_PAX_SEGMEXEC
61944 + if (vma_m && (vm_flags & VM_EXECUTABLE))
61945 + added_exe_file_vma(mm);
61946 +#endif
61947 +
61948 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61949 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
61950 + vma->vm_flags |= VM_PAGEEXEC;
61951 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61952 + }
61953 +#endif
61954 +
61955 if (vm_flags & VM_EXECUTABLE)
61956 added_exe_file_vma(mm);
61957
61958 @@ -1324,6 +1468,11 @@ munmap_back:
61959 vma_link(mm, vma, prev, rb_link, rb_parent);
61960 file = vma->vm_file;
61961
61962 +#ifdef CONFIG_PAX_SEGMEXEC
61963 + if (vma_m)
61964 + BUG_ON(pax_mirror_vma(vma_m, vma));
61965 +#endif
61966 +
61967 /* Once vma denies write, undo our temporary denial count */
61968 if (correct_wcount)
61969 atomic_inc(&inode->i_writecount);
61970 @@ -1332,6 +1481,7 @@ out:
61971
61972 mm->total_vm += len >> PAGE_SHIFT;
61973 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
61974 + track_exec_limit(mm, addr, addr + len, vm_flags);
61975 if (vm_flags & VM_LOCKED) {
61976 if (!mlock_vma_pages_range(vma, addr, addr + len))
61977 mm->locked_vm += (len >> PAGE_SHIFT);
61978 @@ -1349,6 +1499,12 @@ unmap_and_free_vma:
61979 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
61980 charged = 0;
61981 free_vma:
61982 +
61983 +#ifdef CONFIG_PAX_SEGMEXEC
61984 + if (vma_m)
61985 + kmem_cache_free(vm_area_cachep, vma_m);
61986 +#endif
61987 +
61988 kmem_cache_free(vm_area_cachep, vma);
61989 unacct_error:
61990 if (charged)
61991 @@ -1356,6 +1512,44 @@ unacct_error:
61992 return error;
61993 }
61994
61995 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
61996 +{
61997 + if (!vma) {
61998 +#ifdef CONFIG_STACK_GROWSUP
61999 + if (addr > sysctl_heap_stack_gap)
62000 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62001 + else
62002 + vma = find_vma(current->mm, 0);
62003 + if (vma && (vma->vm_flags & VM_GROWSUP))
62004 + return false;
62005 +#endif
62006 + return true;
62007 + }
62008 +
62009 + if (addr + len > vma->vm_start)
62010 + return false;
62011 +
62012 + if (vma->vm_flags & VM_GROWSDOWN)
62013 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62014 +#ifdef CONFIG_STACK_GROWSUP
62015 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62016 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62017 +#endif
62018 +
62019 + return true;
62020 +}
62021 +
62022 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62023 +{
62024 + if (vma->vm_start < len)
62025 + return -ENOMEM;
62026 + if (!(vma->vm_flags & VM_GROWSDOWN))
62027 + return vma->vm_start - len;
62028 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
62029 + return vma->vm_start - len - sysctl_heap_stack_gap;
62030 + return -ENOMEM;
62031 +}
62032 +
62033 /* Get an address range which is currently unmapped.
62034 * For shmat() with addr=0.
62035 *
62036 @@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
62037 if (flags & MAP_FIXED)
62038 return addr;
62039
62040 +#ifdef CONFIG_PAX_RANDMMAP
62041 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62042 +#endif
62043 +
62044 if (addr) {
62045 addr = PAGE_ALIGN(addr);
62046 - vma = find_vma(mm, addr);
62047 - if (TASK_SIZE - len >= addr &&
62048 - (!vma || addr + len <= vma->vm_start))
62049 - return addr;
62050 + if (TASK_SIZE - len >= addr) {
62051 + vma = find_vma(mm, addr);
62052 + if (check_heap_stack_gap(vma, addr, len))
62053 + return addr;
62054 + }
62055 }
62056 if (len > mm->cached_hole_size) {
62057 - start_addr = addr = mm->free_area_cache;
62058 + start_addr = addr = mm->free_area_cache;
62059 } else {
62060 - start_addr = addr = TASK_UNMAPPED_BASE;
62061 - mm->cached_hole_size = 0;
62062 + start_addr = addr = mm->mmap_base;
62063 + mm->cached_hole_size = 0;
62064 }
62065
62066 full_search:
62067 @@ -1404,34 +1603,40 @@ full_search:
62068 * Start a new search - just in case we missed
62069 * some holes.
62070 */
62071 - if (start_addr != TASK_UNMAPPED_BASE) {
62072 - addr = TASK_UNMAPPED_BASE;
62073 - start_addr = addr;
62074 + if (start_addr != mm->mmap_base) {
62075 + start_addr = addr = mm->mmap_base;
62076 mm->cached_hole_size = 0;
62077 goto full_search;
62078 }
62079 return -ENOMEM;
62080 }
62081 - if (!vma || addr + len <= vma->vm_start) {
62082 - /*
62083 - * Remember the place where we stopped the search:
62084 - */
62085 - mm->free_area_cache = addr + len;
62086 - return addr;
62087 - }
62088 + if (check_heap_stack_gap(vma, addr, len))
62089 + break;
62090 if (addr + mm->cached_hole_size < vma->vm_start)
62091 mm->cached_hole_size = vma->vm_start - addr;
62092 addr = vma->vm_end;
62093 }
62094 +
62095 + /*
62096 + * Remember the place where we stopped the search:
62097 + */
62098 + mm->free_area_cache = addr + len;
62099 + return addr;
62100 }
62101 #endif
62102
62103 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62104 {
62105 +
62106 +#ifdef CONFIG_PAX_SEGMEXEC
62107 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62108 + return;
62109 +#endif
62110 +
62111 /*
62112 * Is this a new hole at the lowest possible address?
62113 */
62114 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62115 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62116 mm->free_area_cache = addr;
62117 mm->cached_hole_size = ~0UL;
62118 }
62119 @@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
62120 {
62121 struct vm_area_struct *vma;
62122 struct mm_struct *mm = current->mm;
62123 - unsigned long addr = addr0;
62124 + unsigned long base = mm->mmap_base, addr = addr0;
62125
62126 /* requested length too big for entire address space */
62127 if (len > TASK_SIZE)
62128 @@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
62129 if (flags & MAP_FIXED)
62130 return addr;
62131
62132 +#ifdef CONFIG_PAX_RANDMMAP
62133 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62134 +#endif
62135 +
62136 /* requesting a specific address */
62137 if (addr) {
62138 addr = PAGE_ALIGN(addr);
62139 - vma = find_vma(mm, addr);
62140 - if (TASK_SIZE - len >= addr &&
62141 - (!vma || addr + len <= vma->vm_start))
62142 - return addr;
62143 + if (TASK_SIZE - len >= addr) {
62144 + vma = find_vma(mm, addr);
62145 + if (check_heap_stack_gap(vma, addr, len))
62146 + return addr;
62147 + }
62148 }
62149
62150 /* check if free_area_cache is useful for us */
62151 @@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
62152 /* make sure it can fit in the remaining address space */
62153 if (addr > len) {
62154 vma = find_vma(mm, addr-len);
62155 - if (!vma || addr <= vma->vm_start)
62156 + if (check_heap_stack_gap(vma, addr - len, len))
62157 /* remember the address as a hint for next time */
62158 return (mm->free_area_cache = addr-len);
62159 }
62160 @@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
62161 * return with success:
62162 */
62163 vma = find_vma(mm, addr);
62164 - if (!vma || addr+len <= vma->vm_start)
62165 + if (check_heap_stack_gap(vma, addr, len))
62166 /* remember the address as a hint for next time */
62167 return (mm->free_area_cache = addr);
62168
62169 @@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
62170 mm->cached_hole_size = vma->vm_start - addr;
62171
62172 /* try just below the current vma->vm_start */
62173 - addr = vma->vm_start-len;
62174 - } while (len < vma->vm_start);
62175 + addr = skip_heap_stack_gap(vma, len);
62176 + } while (!IS_ERR_VALUE(addr));
62177
62178 bottomup:
62179 /*
62180 @@ -1515,13 +1725,21 @@ bottomup:
62181 * can happen with large stack limits and large mmap()
62182 * allocations.
62183 */
62184 + mm->mmap_base = TASK_UNMAPPED_BASE;
62185 +
62186 +#ifdef CONFIG_PAX_RANDMMAP
62187 + if (mm->pax_flags & MF_PAX_RANDMMAP)
62188 + mm->mmap_base += mm->delta_mmap;
62189 +#endif
62190 +
62191 + mm->free_area_cache = mm->mmap_base;
62192 mm->cached_hole_size = ~0UL;
62193 - mm->free_area_cache = TASK_UNMAPPED_BASE;
62194 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62195 /*
62196 * Restore the topdown base:
62197 */
62198 - mm->free_area_cache = mm->mmap_base;
62199 + mm->mmap_base = base;
62200 + mm->free_area_cache = base;
62201 mm->cached_hole_size = ~0UL;
62202
62203 return addr;
62204 @@ -1530,6 +1748,12 @@ bottomup:
62205
62206 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62207 {
62208 +
62209 +#ifdef CONFIG_PAX_SEGMEXEC
62210 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62211 + return;
62212 +#endif
62213 +
62214 /*
62215 * Is this a new hole at the highest possible address?
62216 */
62217 @@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
62218 mm->free_area_cache = addr;
62219
62220 /* dont allow allocations above current base */
62221 - if (mm->free_area_cache > mm->mmap_base)
62222 + if (mm->free_area_cache > mm->mmap_base) {
62223 mm->free_area_cache = mm->mmap_base;
62224 + mm->cached_hole_size = ~0UL;
62225 + }
62226 }
62227
62228 unsigned long
62229 @@ -1646,6 +1872,28 @@ out:
62230 return prev ? prev->vm_next : vma;
62231 }
62232
62233 +#ifdef CONFIG_PAX_SEGMEXEC
62234 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62235 +{
62236 + struct vm_area_struct *vma_m;
62237 +
62238 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62239 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62240 + BUG_ON(vma->vm_mirror);
62241 + return NULL;
62242 + }
62243 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62244 + vma_m = vma->vm_mirror;
62245 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62246 + BUG_ON(vma->vm_file != vma_m->vm_file);
62247 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62248 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62249 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62250 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62251 + return vma_m;
62252 +}
62253 +#endif
62254 +
62255 /*
62256 * Verify that the stack growth is acceptable and
62257 * update accounting. This is shared with both the
62258 @@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
62259 return -ENOMEM;
62260
62261 /* Stack limit test */
62262 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
62263 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62264 return -ENOMEM;
62265
62266 @@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
62267 locked = mm->locked_vm + grow;
62268 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62269 limit >>= PAGE_SHIFT;
62270 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62271 if (locked > limit && !capable(CAP_IPC_LOCK))
62272 return -ENOMEM;
62273 }
62274 @@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
62275 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62276 * vma is the last one with address > vma->vm_end. Have to extend vma.
62277 */
62278 +#ifndef CONFIG_IA64
62279 +static
62280 +#endif
62281 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62282 {
62283 int error;
62284 + bool locknext;
62285
62286 if (!(vma->vm_flags & VM_GROWSUP))
62287 return -EFAULT;
62288
62289 + /* Also guard against wrapping around to address 0. */
62290 + if (address < PAGE_ALIGN(address+1))
62291 + address = PAGE_ALIGN(address+1);
62292 + else
62293 + return -ENOMEM;
62294 +
62295 /*
62296 * We must make sure the anon_vma is allocated
62297 * so that the anon_vma locking is not a noop.
62298 */
62299 if (unlikely(anon_vma_prepare(vma)))
62300 return -ENOMEM;
62301 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62302 + if (locknext && anon_vma_prepare(vma->vm_next))
62303 + return -ENOMEM;
62304 vma_lock_anon_vma(vma);
62305 + if (locknext)
62306 + vma_lock_anon_vma(vma->vm_next);
62307
62308 /*
62309 * vma->vm_start/vm_end cannot change under us because the caller
62310 * is required to hold the mmap_sem in read mode. We need the
62311 - * anon_vma lock to serialize against concurrent expand_stacks.
62312 - * Also guard against wrapping around to address 0.
62313 + * anon_vma locks to serialize against concurrent expand_stacks
62314 + * and expand_upwards.
62315 */
62316 - if (address < PAGE_ALIGN(address+4))
62317 - address = PAGE_ALIGN(address+4);
62318 - else {
62319 - vma_unlock_anon_vma(vma);
62320 - return -ENOMEM;
62321 - }
62322 error = 0;
62323
62324 /* Somebody else might have raced and expanded it already */
62325 - if (address > vma->vm_end) {
62326 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62327 + error = -ENOMEM;
62328 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62329 unsigned long size, grow;
62330
62331 size = address - vma->vm_start;
62332 @@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
62333 }
62334 }
62335 }
62336 + if (locknext)
62337 + vma_unlock_anon_vma(vma->vm_next);
62338 vma_unlock_anon_vma(vma);
62339 khugepaged_enter_vma_merge(vma);
62340 return error;
62341 @@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
62342 unsigned long address)
62343 {
62344 int error;
62345 + bool lockprev = false;
62346 + struct vm_area_struct *prev;
62347
62348 /*
62349 * We must make sure the anon_vma is allocated
62350 @@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
62351 if (error)
62352 return error;
62353
62354 + prev = vma->vm_prev;
62355 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62356 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62357 +#endif
62358 + if (lockprev && anon_vma_prepare(prev))
62359 + return -ENOMEM;
62360 + if (lockprev)
62361 + vma_lock_anon_vma(prev);
62362 +
62363 vma_lock_anon_vma(vma);
62364
62365 /*
62366 @@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
62367 */
62368
62369 /* Somebody else might have raced and expanded it already */
62370 - if (address < vma->vm_start) {
62371 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62372 + error = -ENOMEM;
62373 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62374 unsigned long size, grow;
62375
62376 +#ifdef CONFIG_PAX_SEGMEXEC
62377 + struct vm_area_struct *vma_m;
62378 +
62379 + vma_m = pax_find_mirror_vma(vma);
62380 +#endif
62381 +
62382 size = vma->vm_end - address;
62383 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62384
62385 @@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
62386 if (!error) {
62387 vma->vm_start = address;
62388 vma->vm_pgoff -= grow;
62389 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62390 +
62391 +#ifdef CONFIG_PAX_SEGMEXEC
62392 + if (vma_m) {
62393 + vma_m->vm_start -= grow << PAGE_SHIFT;
62394 + vma_m->vm_pgoff -= grow;
62395 + }
62396 +#endif
62397 +
62398 perf_event_mmap(vma);
62399 }
62400 }
62401 }
62402 vma_unlock_anon_vma(vma);
62403 + if (lockprev)
62404 + vma_unlock_anon_vma(prev);
62405 khugepaged_enter_vma_merge(vma);
62406 return error;
62407 }
62408 @@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
62409 do {
62410 long nrpages = vma_pages(vma);
62411
62412 +#ifdef CONFIG_PAX_SEGMEXEC
62413 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62414 + vma = remove_vma(vma);
62415 + continue;
62416 + }
62417 +#endif
62418 +
62419 mm->total_vm -= nrpages;
62420 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62421 vma = remove_vma(vma);
62422 @@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62423 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62424 vma->vm_prev = NULL;
62425 do {
62426 +
62427 +#ifdef CONFIG_PAX_SEGMEXEC
62428 + if (vma->vm_mirror) {
62429 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62430 + vma->vm_mirror->vm_mirror = NULL;
62431 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
62432 + vma->vm_mirror = NULL;
62433 + }
62434 +#endif
62435 +
62436 rb_erase(&vma->vm_rb, &mm->mm_rb);
62437 mm->map_count--;
62438 tail_vma = vma;
62439 @@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
62440 struct vm_area_struct *new;
62441 int err = -ENOMEM;
62442
62443 +#ifdef CONFIG_PAX_SEGMEXEC
62444 + struct vm_area_struct *vma_m, *new_m = NULL;
62445 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62446 +#endif
62447 +
62448 if (is_vm_hugetlb_page(vma) && (addr &
62449 ~(huge_page_mask(hstate_vma(vma)))))
62450 return -EINVAL;
62451
62452 +#ifdef CONFIG_PAX_SEGMEXEC
62453 + vma_m = pax_find_mirror_vma(vma);
62454 +#endif
62455 +
62456 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62457 if (!new)
62458 goto out_err;
62459
62460 +#ifdef CONFIG_PAX_SEGMEXEC
62461 + if (vma_m) {
62462 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62463 + if (!new_m) {
62464 + kmem_cache_free(vm_area_cachep, new);
62465 + goto out_err;
62466 + }
62467 + }
62468 +#endif
62469 +
62470 /* most fields are the same, copy all, and then fixup */
62471 *new = *vma;
62472
62473 @@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
62474 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62475 }
62476
62477 +#ifdef CONFIG_PAX_SEGMEXEC
62478 + if (vma_m) {
62479 + *new_m = *vma_m;
62480 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
62481 + new_m->vm_mirror = new;
62482 + new->vm_mirror = new_m;
62483 +
62484 + if (new_below)
62485 + new_m->vm_end = addr_m;
62486 + else {
62487 + new_m->vm_start = addr_m;
62488 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62489 + }
62490 + }
62491 +#endif
62492 +
62493 pol = mpol_dup(vma_policy(vma));
62494 if (IS_ERR(pol)) {
62495 err = PTR_ERR(pol);
62496 @@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
62497 else
62498 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62499
62500 +#ifdef CONFIG_PAX_SEGMEXEC
62501 + if (!err && vma_m) {
62502 + if (anon_vma_clone(new_m, vma_m))
62503 + goto out_free_mpol;
62504 +
62505 + mpol_get(pol);
62506 + vma_set_policy(new_m, pol);
62507 +
62508 + if (new_m->vm_file) {
62509 + get_file(new_m->vm_file);
62510 + if (vma_m->vm_flags & VM_EXECUTABLE)
62511 + added_exe_file_vma(mm);
62512 + }
62513 +
62514 + if (new_m->vm_ops && new_m->vm_ops->open)
62515 + new_m->vm_ops->open(new_m);
62516 +
62517 + if (new_below)
62518 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62519 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62520 + else
62521 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62522 +
62523 + if (err) {
62524 + if (new_m->vm_ops && new_m->vm_ops->close)
62525 + new_m->vm_ops->close(new_m);
62526 + if (new_m->vm_file) {
62527 + if (vma_m->vm_flags & VM_EXECUTABLE)
62528 + removed_exe_file_vma(mm);
62529 + fput(new_m->vm_file);
62530 + }
62531 + mpol_put(pol);
62532 + }
62533 + }
62534 +#endif
62535 +
62536 /* Success. */
62537 if (!err)
62538 return 0;
62539 @@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
62540 removed_exe_file_vma(mm);
62541 fput(new->vm_file);
62542 }
62543 - unlink_anon_vmas(new);
62544 out_free_mpol:
62545 mpol_put(pol);
62546 out_free_vma:
62547 +
62548 +#ifdef CONFIG_PAX_SEGMEXEC
62549 + if (new_m) {
62550 + unlink_anon_vmas(new_m);
62551 + kmem_cache_free(vm_area_cachep, new_m);
62552 + }
62553 +#endif
62554 +
62555 + unlink_anon_vmas(new);
62556 kmem_cache_free(vm_area_cachep, new);
62557 out_err:
62558 return err;
62559 @@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
62560 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62561 unsigned long addr, int new_below)
62562 {
62563 +
62564 +#ifdef CONFIG_PAX_SEGMEXEC
62565 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62566 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62567 + if (mm->map_count >= sysctl_max_map_count-1)
62568 + return -ENOMEM;
62569 + } else
62570 +#endif
62571 +
62572 if (mm->map_count >= sysctl_max_map_count)
62573 return -ENOMEM;
62574
62575 @@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
62576 * work. This now handles partial unmappings.
62577 * Jeremy Fitzhardinge <jeremy@goop.org>
62578 */
62579 +#ifdef CONFIG_PAX_SEGMEXEC
62580 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62581 {
62582 + int ret = __do_munmap(mm, start, len);
62583 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62584 + return ret;
62585 +
62586 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62587 +}
62588 +
62589 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62590 +#else
62591 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62592 +#endif
62593 +{
62594 unsigned long end;
62595 struct vm_area_struct *vma, *prev, *last;
62596
62597 + /*
62598 + * mm->mmap_sem is required to protect against another thread
62599 + * changing the mappings in case we sleep.
62600 + */
62601 + verify_mm_writelocked(mm);
62602 +
62603 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
62604 return -EINVAL;
62605
62606 @@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
62607 /* Fix up all other VM information */
62608 remove_vma_list(mm, vma);
62609
62610 + track_exec_limit(mm, start, end, 0UL);
62611 +
62612 return 0;
62613 }
62614
62615 @@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
62616
62617 profile_munmap(addr);
62618
62619 +#ifdef CONFIG_PAX_SEGMEXEC
62620 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
62621 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
62622 + return -EINVAL;
62623 +#endif
62624 +
62625 down_write(&mm->mmap_sem);
62626 ret = do_munmap(mm, addr, len);
62627 up_write(&mm->mmap_sem);
62628 return ret;
62629 }
62630
62631 -static inline void verify_mm_writelocked(struct mm_struct *mm)
62632 -{
62633 -#ifdef CONFIG_DEBUG_VM
62634 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62635 - WARN_ON(1);
62636 - up_read(&mm->mmap_sem);
62637 - }
62638 -#endif
62639 -}
62640 -
62641 /*
62642 * this is really a simplified "do_mmap". it only handles
62643 * anonymous maps. eventually we may be able to do some
62644 @@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
62645 struct rb_node ** rb_link, * rb_parent;
62646 pgoff_t pgoff = addr >> PAGE_SHIFT;
62647 int error;
62648 + unsigned long charged;
62649
62650 len = PAGE_ALIGN(len);
62651 if (!len)
62652 @@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
62653
62654 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
62655
62656 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62657 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62658 + flags &= ~VM_EXEC;
62659 +
62660 +#ifdef CONFIG_PAX_MPROTECT
62661 + if (mm->pax_flags & MF_PAX_MPROTECT)
62662 + flags &= ~VM_MAYEXEC;
62663 +#endif
62664 +
62665 + }
62666 +#endif
62667 +
62668 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
62669 if (error & ~PAGE_MASK)
62670 return error;
62671
62672 + charged = len >> PAGE_SHIFT;
62673 +
62674 /*
62675 * mlock MCL_FUTURE?
62676 */
62677 if (mm->def_flags & VM_LOCKED) {
62678 unsigned long locked, lock_limit;
62679 - locked = len >> PAGE_SHIFT;
62680 + locked = charged;
62681 locked += mm->locked_vm;
62682 lock_limit = rlimit(RLIMIT_MEMLOCK);
62683 lock_limit >>= PAGE_SHIFT;
62684 @@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
62685 /*
62686 * Clear old maps. this also does some error checking for us
62687 */
62688 - munmap_back:
62689 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62690 if (vma && vma->vm_start < addr + len) {
62691 if (do_munmap(mm, addr, len))
62692 return -ENOMEM;
62693 - goto munmap_back;
62694 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62695 + BUG_ON(vma && vma->vm_start < addr + len);
62696 }
62697
62698 /* Check against address space limits *after* clearing old maps... */
62699 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
62700 + if (!may_expand_vm(mm, charged))
62701 return -ENOMEM;
62702
62703 if (mm->map_count > sysctl_max_map_count)
62704 return -ENOMEM;
62705
62706 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
62707 + if (security_vm_enough_memory(charged))
62708 return -ENOMEM;
62709
62710 /* Can we just expand an old private anonymous mapping? */
62711 @@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
62712 */
62713 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62714 if (!vma) {
62715 - vm_unacct_memory(len >> PAGE_SHIFT);
62716 + vm_unacct_memory(charged);
62717 return -ENOMEM;
62718 }
62719
62720 @@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
62721 vma_link(mm, vma, prev, rb_link, rb_parent);
62722 out:
62723 perf_event_mmap(vma);
62724 - mm->total_vm += len >> PAGE_SHIFT;
62725 + mm->total_vm += charged;
62726 if (flags & VM_LOCKED) {
62727 if (!mlock_vma_pages_range(vma, addr, addr + len))
62728 - mm->locked_vm += (len >> PAGE_SHIFT);
62729 + mm->locked_vm += charged;
62730 }
62731 + track_exec_limit(mm, addr, addr + len, flags);
62732 return addr;
62733 }
62734
62735 @@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
62736 * Walk the list again, actually closing and freeing it,
62737 * with preemption enabled, without holding any MM locks.
62738 */
62739 - while (vma)
62740 + while (vma) {
62741 + vma->vm_mirror = NULL;
62742 vma = remove_vma(vma);
62743 + }
62744
62745 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
62746 }
62747 @@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
62748 struct vm_area_struct * __vma, * prev;
62749 struct rb_node ** rb_link, * rb_parent;
62750
62751 +#ifdef CONFIG_PAX_SEGMEXEC
62752 + struct vm_area_struct *vma_m = NULL;
62753 +#endif
62754 +
62755 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
62756 + return -EPERM;
62757 +
62758 /*
62759 * The vm_pgoff of a purely anonymous vma should be irrelevant
62760 * until its first write fault, when page's anon_vma and index
62761 @@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
62762 if ((vma->vm_flags & VM_ACCOUNT) &&
62763 security_vm_enough_memory_mm(mm, vma_pages(vma)))
62764 return -ENOMEM;
62765 +
62766 +#ifdef CONFIG_PAX_SEGMEXEC
62767 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
62768 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62769 + if (!vma_m)
62770 + return -ENOMEM;
62771 + }
62772 +#endif
62773 +
62774 vma_link(mm, vma, prev, rb_link, rb_parent);
62775 +
62776 +#ifdef CONFIG_PAX_SEGMEXEC
62777 + if (vma_m)
62778 + BUG_ON(pax_mirror_vma(vma_m, vma));
62779 +#endif
62780 +
62781 return 0;
62782 }
62783
62784 @@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
62785 struct rb_node **rb_link, *rb_parent;
62786 struct mempolicy *pol;
62787
62788 + BUG_ON(vma->vm_mirror);
62789 +
62790 /*
62791 * If anonymous vma has not yet been faulted, update new pgoff
62792 * to match new location, to increase its chance of merging.
62793 @@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
62794 return NULL;
62795 }
62796
62797 +#ifdef CONFIG_PAX_SEGMEXEC
62798 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
62799 +{
62800 + struct vm_area_struct *prev_m;
62801 + struct rb_node **rb_link_m, *rb_parent_m;
62802 + struct mempolicy *pol_m;
62803 +
62804 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
62805 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
62806 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
62807 + *vma_m = *vma;
62808 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
62809 + if (anon_vma_clone(vma_m, vma))
62810 + return -ENOMEM;
62811 + pol_m = vma_policy(vma_m);
62812 + mpol_get(pol_m);
62813 + vma_set_policy(vma_m, pol_m);
62814 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
62815 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
62816 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
62817 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
62818 + if (vma_m->vm_file)
62819 + get_file(vma_m->vm_file);
62820 + if (vma_m->vm_ops && vma_m->vm_ops->open)
62821 + vma_m->vm_ops->open(vma_m);
62822 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
62823 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
62824 + vma_m->vm_mirror = vma;
62825 + vma->vm_mirror = vma_m;
62826 + return 0;
62827 +}
62828 +#endif
62829 +
62830 /*
62831 * Return true if the calling process may expand its vm space by the passed
62832 * number of pages
62833 @@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
62834 unsigned long lim;
62835
62836 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
62837 -
62838 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
62839 if (cur + npages > lim)
62840 return 0;
62841 return 1;
62842 @@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
62843 vma->vm_start = addr;
62844 vma->vm_end = addr + len;
62845
62846 +#ifdef CONFIG_PAX_MPROTECT
62847 + if (mm->pax_flags & MF_PAX_MPROTECT) {
62848 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
62849 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
62850 + return -EPERM;
62851 + if (!(vm_flags & VM_EXEC))
62852 + vm_flags &= ~VM_MAYEXEC;
62853 +#else
62854 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62855 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62856 +#endif
62857 + else
62858 + vm_flags &= ~VM_MAYWRITE;
62859 + }
62860 +#endif
62861 +
62862 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
62863 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62864
62865 diff -urNp linux-3.0.3/mm/mprotect.c linux-3.0.3/mm/mprotect.c
62866 --- linux-3.0.3/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
62867 +++ linux-3.0.3/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
62868 @@ -23,10 +23,16 @@
62869 #include <linux/mmu_notifier.h>
62870 #include <linux/migrate.h>
62871 #include <linux/perf_event.h>
62872 +
62873 +#ifdef CONFIG_PAX_MPROTECT
62874 +#include <linux/elf.h>
62875 +#endif
62876 +
62877 #include <asm/uaccess.h>
62878 #include <asm/pgtable.h>
62879 #include <asm/cacheflush.h>
62880 #include <asm/tlbflush.h>
62881 +#include <asm/mmu_context.h>
62882
62883 #ifndef pgprot_modify
62884 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
62885 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
62886 flush_tlb_range(vma, start, end);
62887 }
62888
62889 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62890 +/* called while holding the mmap semaphor for writing except stack expansion */
62891 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
62892 +{
62893 + unsigned long oldlimit, newlimit = 0UL;
62894 +
62895 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
62896 + return;
62897 +
62898 + spin_lock(&mm->page_table_lock);
62899 + oldlimit = mm->context.user_cs_limit;
62900 + if ((prot & VM_EXEC) && oldlimit < end)
62901 + /* USER_CS limit moved up */
62902 + newlimit = end;
62903 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
62904 + /* USER_CS limit moved down */
62905 + newlimit = start;
62906 +
62907 + if (newlimit) {
62908 + mm->context.user_cs_limit = newlimit;
62909 +
62910 +#ifdef CONFIG_SMP
62911 + wmb();
62912 + cpus_clear(mm->context.cpu_user_cs_mask);
62913 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
62914 +#endif
62915 +
62916 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
62917 + }
62918 + spin_unlock(&mm->page_table_lock);
62919 + if (newlimit == end) {
62920 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
62921 +
62922 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
62923 + if (is_vm_hugetlb_page(vma))
62924 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
62925 + else
62926 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
62927 + }
62928 +}
62929 +#endif
62930 +
62931 int
62932 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
62933 unsigned long start, unsigned long end, unsigned long newflags)
62934 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
62935 int error;
62936 int dirty_accountable = 0;
62937
62938 +#ifdef CONFIG_PAX_SEGMEXEC
62939 + struct vm_area_struct *vma_m = NULL;
62940 + unsigned long start_m, end_m;
62941 +
62942 + start_m = start + SEGMEXEC_TASK_SIZE;
62943 + end_m = end + SEGMEXEC_TASK_SIZE;
62944 +#endif
62945 +
62946 if (newflags == oldflags) {
62947 *pprev = vma;
62948 return 0;
62949 }
62950
62951 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
62952 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
62953 +
62954 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
62955 + return -ENOMEM;
62956 +
62957 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
62958 + return -ENOMEM;
62959 + }
62960 +
62961 /*
62962 * If we make a private mapping writable we increase our commit;
62963 * but (without finer accounting) cannot reduce our commit if we
62964 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
62965 }
62966 }
62967
62968 +#ifdef CONFIG_PAX_SEGMEXEC
62969 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
62970 + if (start != vma->vm_start) {
62971 + error = split_vma(mm, vma, start, 1);
62972 + if (error)
62973 + goto fail;
62974 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
62975 + *pprev = (*pprev)->vm_next;
62976 + }
62977 +
62978 + if (end != vma->vm_end) {
62979 + error = split_vma(mm, vma, end, 0);
62980 + if (error)
62981 + goto fail;
62982 + }
62983 +
62984 + if (pax_find_mirror_vma(vma)) {
62985 + error = __do_munmap(mm, start_m, end_m - start_m);
62986 + if (error)
62987 + goto fail;
62988 + } else {
62989 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62990 + if (!vma_m) {
62991 + error = -ENOMEM;
62992 + goto fail;
62993 + }
62994 + vma->vm_flags = newflags;
62995 + error = pax_mirror_vma(vma_m, vma);
62996 + if (error) {
62997 + vma->vm_flags = oldflags;
62998 + goto fail;
62999 + }
63000 + }
63001 + }
63002 +#endif
63003 +
63004 /*
63005 * First try to merge with previous and/or next vma.
63006 */
63007 @@ -204,9 +306,21 @@ success:
63008 * vm_flags and vm_page_prot are protected by the mmap_sem
63009 * held in write mode.
63010 */
63011 +
63012 +#ifdef CONFIG_PAX_SEGMEXEC
63013 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63014 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63015 +#endif
63016 +
63017 vma->vm_flags = newflags;
63018 +
63019 +#ifdef CONFIG_PAX_MPROTECT
63020 + if (mm->binfmt && mm->binfmt->handle_mprotect)
63021 + mm->binfmt->handle_mprotect(vma, newflags);
63022 +#endif
63023 +
63024 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63025 - vm_get_page_prot(newflags));
63026 + vm_get_page_prot(vma->vm_flags));
63027
63028 if (vma_wants_writenotify(vma)) {
63029 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63030 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63031 end = start + len;
63032 if (end <= start)
63033 return -ENOMEM;
63034 +
63035 +#ifdef CONFIG_PAX_SEGMEXEC
63036 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63037 + if (end > SEGMEXEC_TASK_SIZE)
63038 + return -EINVAL;
63039 + } else
63040 +#endif
63041 +
63042 + if (end > TASK_SIZE)
63043 + return -EINVAL;
63044 +
63045 if (!arch_validate_prot(prot))
63046 return -EINVAL;
63047
63048 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63049 /*
63050 * Does the application expect PROT_READ to imply PROT_EXEC:
63051 */
63052 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63053 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63054 prot |= PROT_EXEC;
63055
63056 vm_flags = calc_vm_prot_bits(prot);
63057 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63058 if (start > vma->vm_start)
63059 prev = vma;
63060
63061 +#ifdef CONFIG_PAX_MPROTECT
63062 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63063 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
63064 +#endif
63065 +
63066 for (nstart = start ; ; ) {
63067 unsigned long newflags;
63068
63069 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63070
63071 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63072 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63073 + if (prot & (PROT_WRITE | PROT_EXEC))
63074 + gr_log_rwxmprotect(vma->vm_file);
63075 +
63076 + error = -EACCES;
63077 + goto out;
63078 + }
63079 +
63080 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63081 error = -EACCES;
63082 goto out;
63083 }
63084 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63085 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63086 if (error)
63087 goto out;
63088 +
63089 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
63090 +
63091 nstart = tmp;
63092
63093 if (nstart < prev->vm_end)
63094 diff -urNp linux-3.0.3/mm/mremap.c linux-3.0.3/mm/mremap.c
63095 --- linux-3.0.3/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
63096 +++ linux-3.0.3/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
63097 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
63098 continue;
63099 pte = ptep_clear_flush(vma, old_addr, old_pte);
63100 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63101 +
63102 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63103 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63104 + pte = pte_exprotect(pte);
63105 +#endif
63106 +
63107 set_pte_at(mm, new_addr, new_pte, pte);
63108 }
63109
63110 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
63111 if (is_vm_hugetlb_page(vma))
63112 goto Einval;
63113
63114 +#ifdef CONFIG_PAX_SEGMEXEC
63115 + if (pax_find_mirror_vma(vma))
63116 + goto Einval;
63117 +#endif
63118 +
63119 /* We can't remap across vm area boundaries */
63120 if (old_len > vma->vm_end - addr)
63121 goto Efault;
63122 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
63123 unsigned long ret = -EINVAL;
63124 unsigned long charged = 0;
63125 unsigned long map_flags;
63126 + unsigned long pax_task_size = TASK_SIZE;
63127
63128 if (new_addr & ~PAGE_MASK)
63129 goto out;
63130
63131 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63132 +#ifdef CONFIG_PAX_SEGMEXEC
63133 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63134 + pax_task_size = SEGMEXEC_TASK_SIZE;
63135 +#endif
63136 +
63137 + pax_task_size -= PAGE_SIZE;
63138 +
63139 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63140 goto out;
63141
63142 /* Check if the location we're moving into overlaps the
63143 * old location at all, and fail if it does.
63144 */
63145 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
63146 - goto out;
63147 -
63148 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
63149 + if (addr + old_len > new_addr && new_addr + new_len > addr)
63150 goto out;
63151
63152 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63153 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
63154 struct vm_area_struct *vma;
63155 unsigned long ret = -EINVAL;
63156 unsigned long charged = 0;
63157 + unsigned long pax_task_size = TASK_SIZE;
63158
63159 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63160 goto out;
63161 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
63162 if (!new_len)
63163 goto out;
63164
63165 +#ifdef CONFIG_PAX_SEGMEXEC
63166 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63167 + pax_task_size = SEGMEXEC_TASK_SIZE;
63168 +#endif
63169 +
63170 + pax_task_size -= PAGE_SIZE;
63171 +
63172 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63173 + old_len > pax_task_size || addr > pax_task_size-old_len)
63174 + goto out;
63175 +
63176 if (flags & MREMAP_FIXED) {
63177 if (flags & MREMAP_MAYMOVE)
63178 ret = mremap_to(addr, old_len, new_addr, new_len);
63179 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
63180 addr + new_len);
63181 }
63182 ret = addr;
63183 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63184 goto out;
63185 }
63186 }
63187 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
63188 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63189 if (ret)
63190 goto out;
63191 +
63192 + map_flags = vma->vm_flags;
63193 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63194 + if (!(ret & ~PAGE_MASK)) {
63195 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63196 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63197 + }
63198 }
63199 out:
63200 if (ret & ~PAGE_MASK)
63201 diff -urNp linux-3.0.3/mm/nobootmem.c linux-3.0.3/mm/nobootmem.c
63202 --- linux-3.0.3/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
63203 +++ linux-3.0.3/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
63204 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63205 unsigned long __init free_all_memory_core_early(int nodeid)
63206 {
63207 int i;
63208 - u64 start, end;
63209 + u64 start, end, startrange, endrange;
63210 unsigned long count = 0;
63211 - struct range *range = NULL;
63212 + struct range *range = NULL, rangerange = { 0, 0 };
63213 int nr_range;
63214
63215 nr_range = get_free_all_memory_range(&range, nodeid);
63216 + startrange = __pa(range) >> PAGE_SHIFT;
63217 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63218
63219 for (i = 0; i < nr_range; i++) {
63220 start = range[i].start;
63221 end = range[i].end;
63222 + if (start <= endrange && startrange < end) {
63223 + BUG_ON(rangerange.start | rangerange.end);
63224 + rangerange = range[i];
63225 + continue;
63226 + }
63227 count += end - start;
63228 __free_pages_memory(start, end);
63229 }
63230 + start = rangerange.start;
63231 + end = rangerange.end;
63232 + count += end - start;
63233 + __free_pages_memory(start, end);
63234
63235 return count;
63236 }
63237 diff -urNp linux-3.0.3/mm/nommu.c linux-3.0.3/mm/nommu.c
63238 --- linux-3.0.3/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
63239 +++ linux-3.0.3/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
63240 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63241 int sysctl_overcommit_ratio = 50; /* default is 50% */
63242 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63243 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63244 -int heap_stack_gap = 0;
63245
63246 atomic_long_t mmap_pages_allocated;
63247
63248 @@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
63249 EXPORT_SYMBOL(find_vma);
63250
63251 /*
63252 - * find a VMA
63253 - * - we don't extend stack VMAs under NOMMU conditions
63254 - */
63255 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63256 -{
63257 - return find_vma(mm, addr);
63258 -}
63259 -
63260 -/*
63261 * expand a stack to a given address
63262 * - not supported under NOMMU conditions
63263 */
63264 @@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
63265
63266 /* most fields are the same, copy all, and then fixup */
63267 *new = *vma;
63268 + INIT_LIST_HEAD(&new->anon_vma_chain);
63269 *region = *vma->vm_region;
63270 new->vm_region = region;
63271
63272 diff -urNp linux-3.0.3/mm/page_alloc.c linux-3.0.3/mm/page_alloc.c
63273 --- linux-3.0.3/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
63274 +++ linux-3.0.3/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
63275 @@ -340,7 +340,7 @@ out:
63276 * This usage means that zero-order pages may not be compound.
63277 */
63278
63279 -static void free_compound_page(struct page *page)
63280 +void free_compound_page(struct page *page)
63281 {
63282 __free_pages_ok(page, compound_order(page));
63283 }
63284 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
63285 int i;
63286 int bad = 0;
63287
63288 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63289 + unsigned long index = 1UL << order;
63290 +#endif
63291 +
63292 trace_mm_page_free_direct(page, order);
63293 kmemcheck_free_shadow(page, order);
63294
63295 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
63296 debug_check_no_obj_freed(page_address(page),
63297 PAGE_SIZE << order);
63298 }
63299 +
63300 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63301 + for (; index; --index)
63302 + sanitize_highpage(page + index - 1);
63303 +#endif
63304 +
63305 arch_free_page(page, order);
63306 kernel_map_pages(page, 1 << order, 0);
63307
63308 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
63309 arch_alloc_page(page, order);
63310 kernel_map_pages(page, 1 << order, 1);
63311
63312 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
63313 if (gfp_flags & __GFP_ZERO)
63314 prep_zero_page(page, order, gfp_flags);
63315 +#endif
63316
63317 if (order && (gfp_flags & __GFP_COMP))
63318 prep_compound_page(page, order);
63319 @@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
63320 int cpu;
63321 struct zone *zone;
63322
63323 + pax_track_stack();
63324 +
63325 for_each_populated_zone(zone) {
63326 if (skip_free_areas_node(filter, zone_to_nid(zone)))
63327 continue;
63328 diff -urNp linux-3.0.3/mm/percpu.c linux-3.0.3/mm/percpu.c
63329 --- linux-3.0.3/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
63330 +++ linux-3.0.3/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
63331 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63332 static unsigned int pcpu_last_unit_cpu __read_mostly;
63333
63334 /* the address of the first chunk which starts with the kernel static area */
63335 -void *pcpu_base_addr __read_mostly;
63336 +void *pcpu_base_addr __read_only;
63337 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63338
63339 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63340 diff -urNp linux-3.0.3/mm/rmap.c linux-3.0.3/mm/rmap.c
63341 --- linux-3.0.3/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
63342 +++ linux-3.0.3/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
63343 @@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
63344 struct anon_vma *anon_vma = vma->anon_vma;
63345 struct anon_vma_chain *avc;
63346
63347 +#ifdef CONFIG_PAX_SEGMEXEC
63348 + struct anon_vma_chain *avc_m = NULL;
63349 +#endif
63350 +
63351 might_sleep();
63352 if (unlikely(!anon_vma)) {
63353 struct mm_struct *mm = vma->vm_mm;
63354 @@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
63355 if (!avc)
63356 goto out_enomem;
63357
63358 +#ifdef CONFIG_PAX_SEGMEXEC
63359 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
63360 + if (!avc_m)
63361 + goto out_enomem_free_avc;
63362 +#endif
63363 +
63364 anon_vma = find_mergeable_anon_vma(vma);
63365 allocated = NULL;
63366 if (!anon_vma) {
63367 @@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
63368 /* page_table_lock to protect against threads */
63369 spin_lock(&mm->page_table_lock);
63370 if (likely(!vma->anon_vma)) {
63371 +
63372 +#ifdef CONFIG_PAX_SEGMEXEC
63373 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63374 +
63375 + if (vma_m) {
63376 + BUG_ON(vma_m->anon_vma);
63377 + vma_m->anon_vma = anon_vma;
63378 + avc_m->anon_vma = anon_vma;
63379 + avc_m->vma = vma;
63380 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63381 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
63382 + avc_m = NULL;
63383 + }
63384 +#endif
63385 +
63386 vma->anon_vma = anon_vma;
63387 avc->anon_vma = anon_vma;
63388 avc->vma = vma;
63389 @@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
63390
63391 if (unlikely(allocated))
63392 put_anon_vma(allocated);
63393 +
63394 +#ifdef CONFIG_PAX_SEGMEXEC
63395 + if (unlikely(avc_m))
63396 + anon_vma_chain_free(avc_m);
63397 +#endif
63398 +
63399 if (unlikely(avc))
63400 anon_vma_chain_free(avc);
63401 }
63402 return 0;
63403
63404 out_enomem_free_avc:
63405 +
63406 +#ifdef CONFIG_PAX_SEGMEXEC
63407 + if (avc_m)
63408 + anon_vma_chain_free(avc_m);
63409 +#endif
63410 +
63411 anon_vma_chain_free(avc);
63412 out_enomem:
63413 return -ENOMEM;
63414 @@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
63415 * Attach the anon_vmas from src to dst.
63416 * Returns 0 on success, -ENOMEM on failure.
63417 */
63418 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63419 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63420 {
63421 struct anon_vma_chain *avc, *pavc;
63422 struct anon_vma *root = NULL;
63423 @@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
63424 * the corresponding VMA in the parent process is attached to.
63425 * Returns 0 on success, non-zero on failure.
63426 */
63427 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63428 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63429 {
63430 struct anon_vma_chain *avc;
63431 struct anon_vma *anon_vma;
63432 diff -urNp linux-3.0.3/mm/shmem.c linux-3.0.3/mm/shmem.c
63433 --- linux-3.0.3/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
63434 +++ linux-3.0.3/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
63435 @@ -31,7 +31,7 @@
63436 #include <linux/percpu_counter.h>
63437 #include <linux/swap.h>
63438
63439 -static struct vfsmount *shm_mnt;
63440 +struct vfsmount *shm_mnt;
63441
63442 #ifdef CONFIG_SHMEM
63443 /*
63444 @@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
63445 goto unlock;
63446 }
63447 entry = shmem_swp_entry(info, index, NULL);
63448 + if (!entry)
63449 + goto unlock;
63450 if (entry->val) {
63451 /*
63452 * The more uptodate page coming down from a stacked
63453 @@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
63454 struct vm_area_struct pvma;
63455 struct page *page;
63456
63457 + pax_track_stack();
63458 +
63459 spol = mpol_cond_copy(&mpol,
63460 mpol_shared_policy_lookup(&info->policy, idx));
63461
63462 @@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
63463 int err = -ENOMEM;
63464
63465 /* Round up to L1_CACHE_BYTES to resist false sharing */
63466 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63467 - L1_CACHE_BYTES), GFP_KERNEL);
63468 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63469 if (!sbinfo)
63470 return -ENOMEM;
63471
63472 diff -urNp linux-3.0.3/mm/slab.c linux-3.0.3/mm/slab.c
63473 --- linux-3.0.3/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
63474 +++ linux-3.0.3/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
63475 @@ -151,7 +151,7 @@
63476
63477 /* Legal flag mask for kmem_cache_create(). */
63478 #if DEBUG
63479 -# define CREATE_MASK (SLAB_RED_ZONE | \
63480 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63481 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63482 SLAB_CACHE_DMA | \
63483 SLAB_STORE_USER | \
63484 @@ -159,7 +159,7 @@
63485 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63486 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63487 #else
63488 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63489 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63490 SLAB_CACHE_DMA | \
63491 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63492 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63493 @@ -288,7 +288,7 @@ struct kmem_list3 {
63494 * Need this for bootstrapping a per node allocator.
63495 */
63496 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63497 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63498 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63499 #define CACHE_CACHE 0
63500 #define SIZE_AC MAX_NUMNODES
63501 #define SIZE_L3 (2 * MAX_NUMNODES)
63502 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
63503 if ((x)->max_freeable < i) \
63504 (x)->max_freeable = i; \
63505 } while (0)
63506 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63507 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63508 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63509 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63510 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63511 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63512 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63513 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63514 #else
63515 #define STATS_INC_ACTIVE(x) do { } while (0)
63516 #define STATS_DEC_ACTIVE(x) do { } while (0)
63517 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
63518 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63519 */
63520 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63521 - const struct slab *slab, void *obj)
63522 + const struct slab *slab, const void *obj)
63523 {
63524 u32 offset = (obj - slab->s_mem);
63525 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63526 @@ -564,7 +564,7 @@ struct cache_names {
63527 static struct cache_names __initdata cache_names[] = {
63528 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63529 #include <linux/kmalloc_sizes.h>
63530 - {NULL,}
63531 + {NULL}
63532 #undef CACHE
63533 };
63534
63535 @@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
63536 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63537 sizes[INDEX_AC].cs_size,
63538 ARCH_KMALLOC_MINALIGN,
63539 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63540 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63541 NULL);
63542
63543 if (INDEX_AC != INDEX_L3) {
63544 @@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
63545 kmem_cache_create(names[INDEX_L3].name,
63546 sizes[INDEX_L3].cs_size,
63547 ARCH_KMALLOC_MINALIGN,
63548 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63549 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63550 NULL);
63551 }
63552
63553 @@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
63554 sizes->cs_cachep = kmem_cache_create(names->name,
63555 sizes->cs_size,
63556 ARCH_KMALLOC_MINALIGN,
63557 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63558 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63559 NULL);
63560 }
63561 #ifdef CONFIG_ZONE_DMA
63562 @@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
63563 }
63564 /* cpu stats */
63565 {
63566 - unsigned long allochit = atomic_read(&cachep->allochit);
63567 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63568 - unsigned long freehit = atomic_read(&cachep->freehit);
63569 - unsigned long freemiss = atomic_read(&cachep->freemiss);
63570 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63571 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63572 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63573 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63574
63575 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63576 allochit, allocmiss, freehit, freemiss);
63577 @@ -4532,15 +4532,66 @@ static const struct file_operations proc
63578
63579 static int __init slab_proc_init(void)
63580 {
63581 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63582 + mode_t gr_mode = S_IRUGO;
63583 +
63584 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63585 + gr_mode = S_IRUSR;
63586 +#endif
63587 +
63588 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
63589 #ifdef CONFIG_DEBUG_SLAB_LEAK
63590 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
63591 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
63592 #endif
63593 return 0;
63594 }
63595 module_init(slab_proc_init);
63596 #endif
63597
63598 +void check_object_size(const void *ptr, unsigned long n, bool to)
63599 +{
63600 +
63601 +#ifdef CONFIG_PAX_USERCOPY
63602 + struct page *page;
63603 + struct kmem_cache *cachep = NULL;
63604 + struct slab *slabp;
63605 + unsigned int objnr;
63606 + unsigned long offset;
63607 +
63608 + if (!n)
63609 + return;
63610 +
63611 + if (ZERO_OR_NULL_PTR(ptr))
63612 + goto report;
63613 +
63614 + if (!virt_addr_valid(ptr))
63615 + return;
63616 +
63617 + page = virt_to_head_page(ptr);
63618 +
63619 + if (!PageSlab(page)) {
63620 + if (object_is_on_stack(ptr, n) == -1)
63621 + goto report;
63622 + return;
63623 + }
63624 +
63625 + cachep = page_get_cache(page);
63626 + if (!(cachep->flags & SLAB_USERCOPY))
63627 + goto report;
63628 +
63629 + slabp = page_get_slab(page);
63630 + objnr = obj_to_index(cachep, slabp, ptr);
63631 + BUG_ON(objnr >= cachep->num);
63632 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
63633 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
63634 + return;
63635 +
63636 +report:
63637 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
63638 +#endif
63639 +
63640 +}
63641 +EXPORT_SYMBOL(check_object_size);
63642 +
63643 /**
63644 * ksize - get the actual amount of memory allocated for a given object
63645 * @objp: Pointer to the object
63646 diff -urNp linux-3.0.3/mm/slob.c linux-3.0.3/mm/slob.c
63647 --- linux-3.0.3/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
63648 +++ linux-3.0.3/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
63649 @@ -29,7 +29,7 @@
63650 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
63651 * alloc_pages() directly, allocating compound pages so the page order
63652 * does not have to be separately tracked, and also stores the exact
63653 - * allocation size in page->private so that it can be used to accurately
63654 + * allocation size in slob_page->size so that it can be used to accurately
63655 * provide ksize(). These objects are detected in kfree() because slob_page()
63656 * is false for them.
63657 *
63658 @@ -58,6 +58,7 @@
63659 */
63660
63661 #include <linux/kernel.h>
63662 +#include <linux/sched.h>
63663 #include <linux/slab.h>
63664 #include <linux/mm.h>
63665 #include <linux/swap.h> /* struct reclaim_state */
63666 @@ -102,7 +103,8 @@ struct slob_page {
63667 unsigned long flags; /* mandatory */
63668 atomic_t _count; /* mandatory */
63669 slobidx_t units; /* free units left in page */
63670 - unsigned long pad[2];
63671 + unsigned long pad[1];
63672 + unsigned long size; /* size when >=PAGE_SIZE */
63673 slob_t *free; /* first free slob_t in page */
63674 struct list_head list; /* linked list of free pages */
63675 };
63676 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
63677 */
63678 static inline int is_slob_page(struct slob_page *sp)
63679 {
63680 - return PageSlab((struct page *)sp);
63681 + return PageSlab((struct page *)sp) && !sp->size;
63682 }
63683
63684 static inline void set_slob_page(struct slob_page *sp)
63685 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
63686
63687 static inline struct slob_page *slob_page(const void *addr)
63688 {
63689 - return (struct slob_page *)virt_to_page(addr);
63690 + return (struct slob_page *)virt_to_head_page(addr);
63691 }
63692
63693 /*
63694 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
63695 /*
63696 * Return the size of a slob block.
63697 */
63698 -static slobidx_t slob_units(slob_t *s)
63699 +static slobidx_t slob_units(const slob_t *s)
63700 {
63701 if (s->units > 0)
63702 return s->units;
63703 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
63704 /*
63705 * Return the next free slob block pointer after this one.
63706 */
63707 -static slob_t *slob_next(slob_t *s)
63708 +static slob_t *slob_next(const slob_t *s)
63709 {
63710 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
63711 slobidx_t next;
63712 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
63713 /*
63714 * Returns true if s is the last free block in its page.
63715 */
63716 -static int slob_last(slob_t *s)
63717 +static int slob_last(const slob_t *s)
63718 {
63719 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
63720 }
63721 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
63722 if (!page)
63723 return NULL;
63724
63725 + set_slob_page(page);
63726 return page_address(page);
63727 }
63728
63729 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
63730 if (!b)
63731 return NULL;
63732 sp = slob_page(b);
63733 - set_slob_page(sp);
63734
63735 spin_lock_irqsave(&slob_lock, flags);
63736 sp->units = SLOB_UNITS(PAGE_SIZE);
63737 sp->free = b;
63738 + sp->size = 0;
63739 INIT_LIST_HEAD(&sp->list);
63740 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
63741 set_slob_page_free(sp, slob_list);
63742 @@ -476,10 +479,9 @@ out:
63743 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
63744 */
63745
63746 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63747 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
63748 {
63749 - unsigned int *m;
63750 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63751 + slob_t *m;
63752 void *ret;
63753
63754 lockdep_trace_alloc(gfp);
63755 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
63756
63757 if (!m)
63758 return NULL;
63759 - *m = size;
63760 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
63761 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
63762 + m[0].units = size;
63763 + m[1].units = align;
63764 ret = (void *)m + align;
63765
63766 trace_kmalloc_node(_RET_IP_, ret,
63767 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
63768 gfp |= __GFP_COMP;
63769 ret = slob_new_pages(gfp, order, node);
63770 if (ret) {
63771 - struct page *page;
63772 - page = virt_to_page(ret);
63773 - page->private = size;
63774 + struct slob_page *sp;
63775 + sp = slob_page(ret);
63776 + sp->size = size;
63777 }
63778
63779 trace_kmalloc_node(_RET_IP_, ret,
63780 size, PAGE_SIZE << order, gfp, node);
63781 }
63782
63783 - kmemleak_alloc(ret, size, 1, gfp);
63784 + return ret;
63785 +}
63786 +
63787 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
63788 +{
63789 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63790 + void *ret = __kmalloc_node_align(size, gfp, node, align);
63791 +
63792 + if (!ZERO_OR_NULL_PTR(ret))
63793 + kmemleak_alloc(ret, size, 1, gfp);
63794 return ret;
63795 }
63796 EXPORT_SYMBOL(__kmalloc_node);
63797 @@ -531,13 +545,88 @@ void kfree(const void *block)
63798 sp = slob_page(block);
63799 if (is_slob_page(sp)) {
63800 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63801 - unsigned int *m = (unsigned int *)(block - align);
63802 - slob_free(m, *m + align);
63803 - } else
63804 + slob_t *m = (slob_t *)(block - align);
63805 + slob_free(m, m[0].units + align);
63806 + } else {
63807 + clear_slob_page(sp);
63808 + free_slob_page(sp);
63809 + sp->size = 0;
63810 put_page(&sp->page);
63811 + }
63812 }
63813 EXPORT_SYMBOL(kfree);
63814
63815 +void check_object_size(const void *ptr, unsigned long n, bool to)
63816 +{
63817 +
63818 +#ifdef CONFIG_PAX_USERCOPY
63819 + struct slob_page *sp;
63820 + const slob_t *free;
63821 + const void *base;
63822 + unsigned long flags;
63823 +
63824 + if (!n)
63825 + return;
63826 +
63827 + if (ZERO_OR_NULL_PTR(ptr))
63828 + goto report;
63829 +
63830 + if (!virt_addr_valid(ptr))
63831 + return;
63832 +
63833 + sp = slob_page(ptr);
63834 + if (!PageSlab((struct page*)sp)) {
63835 + if (object_is_on_stack(ptr, n) == -1)
63836 + goto report;
63837 + return;
63838 + }
63839 +
63840 + if (sp->size) {
63841 + base = page_address(&sp->page);
63842 + if (base <= ptr && n <= sp->size - (ptr - base))
63843 + return;
63844 + goto report;
63845 + }
63846 +
63847 + /* some tricky double walking to find the chunk */
63848 + spin_lock_irqsave(&slob_lock, flags);
63849 + base = (void *)((unsigned long)ptr & PAGE_MASK);
63850 + free = sp->free;
63851 +
63852 + while (!slob_last(free) && (void *)free <= ptr) {
63853 + base = free + slob_units(free);
63854 + free = slob_next(free);
63855 + }
63856 +
63857 + while (base < (void *)free) {
63858 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
63859 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
63860 + int offset;
63861 +
63862 + if (ptr < base + align)
63863 + break;
63864 +
63865 + offset = ptr - base - align;
63866 + if (offset >= m) {
63867 + base += size;
63868 + continue;
63869 + }
63870 +
63871 + if (n > m - offset)
63872 + break;
63873 +
63874 + spin_unlock_irqrestore(&slob_lock, flags);
63875 + return;
63876 + }
63877 +
63878 + spin_unlock_irqrestore(&slob_lock, flags);
63879 +report:
63880 + pax_report_usercopy(ptr, n, to, NULL);
63881 +#endif
63882 +
63883 +}
63884 +EXPORT_SYMBOL(check_object_size);
63885 +
63886 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
63887 size_t ksize(const void *block)
63888 {
63889 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
63890 sp = slob_page(block);
63891 if (is_slob_page(sp)) {
63892 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
63893 - unsigned int *m = (unsigned int *)(block - align);
63894 - return SLOB_UNITS(*m) * SLOB_UNIT;
63895 + slob_t *m = (slob_t *)(block - align);
63896 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
63897 } else
63898 - return sp->page.private;
63899 + return sp->size;
63900 }
63901 EXPORT_SYMBOL(ksize);
63902
63903 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
63904 {
63905 struct kmem_cache *c;
63906
63907 +#ifdef CONFIG_PAX_USERCOPY
63908 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
63909 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
63910 +#else
63911 c = slob_alloc(sizeof(struct kmem_cache),
63912 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
63913 +#endif
63914
63915 if (c) {
63916 c->name = name;
63917 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
63918 {
63919 void *b;
63920
63921 +#ifdef CONFIG_PAX_USERCOPY
63922 + b = __kmalloc_node_align(c->size, flags, node, c->align);
63923 +#else
63924 if (c->size < PAGE_SIZE) {
63925 b = slob_alloc(c->size, flags, c->align, node);
63926 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
63927 SLOB_UNITS(c->size) * SLOB_UNIT,
63928 flags, node);
63929 } else {
63930 + struct slob_page *sp;
63931 +
63932 b = slob_new_pages(flags, get_order(c->size), node);
63933 + sp = slob_page(b);
63934 + sp->size = c->size;
63935 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
63936 PAGE_SIZE << get_order(c->size),
63937 flags, node);
63938 }
63939 +#endif
63940
63941 if (c->ctor)
63942 c->ctor(b);
63943 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
63944
63945 static void __kmem_cache_free(void *b, int size)
63946 {
63947 - if (size < PAGE_SIZE)
63948 + struct slob_page *sp = slob_page(b);
63949 +
63950 + if (is_slob_page(sp))
63951 slob_free(b, size);
63952 - else
63953 + else {
63954 + clear_slob_page(sp);
63955 + free_slob_page(sp);
63956 + sp->size = 0;
63957 slob_free_pages(b, get_order(size));
63958 + }
63959 }
63960
63961 static void kmem_rcu_free(struct rcu_head *head)
63962 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
63963
63964 void kmem_cache_free(struct kmem_cache *c, void *b)
63965 {
63966 + int size = c->size;
63967 +
63968 +#ifdef CONFIG_PAX_USERCOPY
63969 + if (size + c->align < PAGE_SIZE) {
63970 + size += c->align;
63971 + b -= c->align;
63972 + }
63973 +#endif
63974 +
63975 kmemleak_free_recursive(b, c->flags);
63976 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
63977 struct slob_rcu *slob_rcu;
63978 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
63979 - slob_rcu->size = c->size;
63980 + slob_rcu = b + (size - sizeof(struct slob_rcu));
63981 + slob_rcu->size = size;
63982 call_rcu(&slob_rcu->head, kmem_rcu_free);
63983 } else {
63984 - __kmem_cache_free(b, c->size);
63985 + __kmem_cache_free(b, size);
63986 }
63987
63988 +#ifdef CONFIG_PAX_USERCOPY
63989 + trace_kfree(_RET_IP_, b);
63990 +#else
63991 trace_kmem_cache_free(_RET_IP_, b);
63992 +#endif
63993 +
63994 }
63995 EXPORT_SYMBOL(kmem_cache_free);
63996
63997 diff -urNp linux-3.0.3/mm/slub.c linux-3.0.3/mm/slub.c
63998 --- linux-3.0.3/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
63999 +++ linux-3.0.3/mm/slub.c 2011-08-23 21:48:14.000000000 -0400
64000 @@ -442,7 +442,7 @@ static void print_track(const char *s, s
64001 if (!t->addr)
64002 return;
64003
64004 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64005 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64006 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64007 }
64008
64009 @@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
64010
64011 page = virt_to_head_page(x);
64012
64013 + BUG_ON(!PageSlab(page));
64014 +
64015 slab_free(s, page, x, _RET_IP_);
64016
64017 trace_kmem_cache_free(_RET_IP_, x);
64018 @@ -2170,7 +2172,7 @@ static int slub_min_objects;
64019 * Merge control. If this is set then no merging of slab caches will occur.
64020 * (Could be removed. This was introduced to pacify the merge skeptics.)
64021 */
64022 -static int slub_nomerge;
64023 +static int slub_nomerge = 1;
64024
64025 /*
64026 * Calculate the order of allocation given an slab object size.
64027 @@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
64028 * list to avoid pounding the page allocator excessively.
64029 */
64030 set_min_partial(s, ilog2(s->size));
64031 - s->refcount = 1;
64032 + atomic_set(&s->refcount, 1);
64033 #ifdef CONFIG_NUMA
64034 s->remote_node_defrag_ratio = 1000;
64035 #endif
64036 @@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
64037 void kmem_cache_destroy(struct kmem_cache *s)
64038 {
64039 down_write(&slub_lock);
64040 - s->refcount--;
64041 - if (!s->refcount) {
64042 + if (atomic_dec_and_test(&s->refcount)) {
64043 list_del(&s->list);
64044 if (kmem_cache_close(s)) {
64045 printk(KERN_ERR "SLUB %s: %s called for cache that "
64046 @@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
64047 EXPORT_SYMBOL(__kmalloc_node);
64048 #endif
64049
64050 +void check_object_size(const void *ptr, unsigned long n, bool to)
64051 +{
64052 +
64053 +#ifdef CONFIG_PAX_USERCOPY
64054 + struct page *page;
64055 + struct kmem_cache *s = NULL;
64056 + unsigned long offset;
64057 +
64058 + if (!n)
64059 + return;
64060 +
64061 + if (ZERO_OR_NULL_PTR(ptr))
64062 + goto report;
64063 +
64064 + if (!virt_addr_valid(ptr))
64065 + return;
64066 +
64067 + page = virt_to_head_page(ptr);
64068 +
64069 + if (!PageSlab(page)) {
64070 + if (object_is_on_stack(ptr, n) == -1)
64071 + goto report;
64072 + return;
64073 + }
64074 +
64075 + s = page->slab;
64076 + if (!(s->flags & SLAB_USERCOPY))
64077 + goto report;
64078 +
64079 + offset = (ptr - page_address(page)) % s->size;
64080 + if (offset <= s->objsize && n <= s->objsize - offset)
64081 + return;
64082 +
64083 +report:
64084 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64085 +#endif
64086 +
64087 +}
64088 +EXPORT_SYMBOL(check_object_size);
64089 +
64090 size_t ksize(const void *object)
64091 {
64092 struct page *page;
64093 @@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
64094 int node;
64095
64096 list_add(&s->list, &slab_caches);
64097 - s->refcount = -1;
64098 + atomic_set(&s->refcount, -1);
64099
64100 for_each_node_state(node, N_NORMAL_MEMORY) {
64101 struct kmem_cache_node *n = get_node(s, node);
64102 @@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
64103
64104 /* Caches that are not of the two-to-the-power-of size */
64105 if (KMALLOC_MIN_SIZE <= 32) {
64106 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64107 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64108 caches++;
64109 }
64110
64111 if (KMALLOC_MIN_SIZE <= 64) {
64112 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64113 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64114 caches++;
64115 }
64116
64117 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64118 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64119 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64120 caches++;
64121 }
64122
64123 @@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
64124 /*
64125 * We may have set a slab to be unmergeable during bootstrap.
64126 */
64127 - if (s->refcount < 0)
64128 + if (atomic_read(&s->refcount) < 0)
64129 return 1;
64130
64131 return 0;
64132 @@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
64133 down_write(&slub_lock);
64134 s = find_mergeable(size, align, flags, name, ctor);
64135 if (s) {
64136 - s->refcount++;
64137 + atomic_inc(&s->refcount);
64138 /*
64139 * Adjust the object sizes so that we clear
64140 * the complete object on kzalloc.
64141 @@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
64142 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64143
64144 if (sysfs_slab_alias(s, name)) {
64145 - s->refcount--;
64146 + atomic_dec(&s->refcount);
64147 goto err;
64148 }
64149 up_write(&slub_lock);
64150 @@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
64151
64152 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64153 {
64154 - return sprintf(buf, "%d\n", s->refcount - 1);
64155 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64156 }
64157 SLAB_ATTR_RO(aliases);
64158
64159 @@ -4894,7 +4935,13 @@ static const struct file_operations proc
64160
64161 static int __init slab_proc_init(void)
64162 {
64163 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64164 + mode_t gr_mode = S_IRUGO;
64165 +
64166 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64167 + gr_mode = S_IRUSR;
64168 +#endif
64169 +
64170 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64171 return 0;
64172 }
64173 module_init(slab_proc_init);
64174 diff -urNp linux-3.0.3/mm/swap.c linux-3.0.3/mm/swap.c
64175 --- linux-3.0.3/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
64176 +++ linux-3.0.3/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
64177 @@ -31,6 +31,7 @@
64178 #include <linux/backing-dev.h>
64179 #include <linux/memcontrol.h>
64180 #include <linux/gfp.h>
64181 +#include <linux/hugetlb.h>
64182
64183 #include "internal.h"
64184
64185 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64186
64187 __page_cache_release(page);
64188 dtor = get_compound_page_dtor(page);
64189 + if (!PageHuge(page))
64190 + BUG_ON(dtor != free_compound_page);
64191 (*dtor)(page);
64192 }
64193
64194 diff -urNp linux-3.0.3/mm/swapfile.c linux-3.0.3/mm/swapfile.c
64195 --- linux-3.0.3/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
64196 +++ linux-3.0.3/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
64197 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
64198
64199 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64200 /* Activity counter to indicate that a swapon or swapoff has occurred */
64201 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
64202 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64203
64204 static inline unsigned char swap_count(unsigned char ent)
64205 {
64206 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64207 }
64208 filp_close(swap_file, NULL);
64209 err = 0;
64210 - atomic_inc(&proc_poll_event);
64211 + atomic_inc_unchecked(&proc_poll_event);
64212 wake_up_interruptible(&proc_poll_wait);
64213
64214 out_dput:
64215 @@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
64216
64217 poll_wait(file, &proc_poll_wait, wait);
64218
64219 - if (s->event != atomic_read(&proc_poll_event)) {
64220 - s->event = atomic_read(&proc_poll_event);
64221 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64222 + s->event = atomic_read_unchecked(&proc_poll_event);
64223 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64224 }
64225
64226 @@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
64227 }
64228
64229 s->seq.private = s;
64230 - s->event = atomic_read(&proc_poll_event);
64231 + s->event = atomic_read_unchecked(&proc_poll_event);
64232 return ret;
64233 }
64234
64235 @@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64236 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64237
64238 mutex_unlock(&swapon_mutex);
64239 - atomic_inc(&proc_poll_event);
64240 + atomic_inc_unchecked(&proc_poll_event);
64241 wake_up_interruptible(&proc_poll_wait);
64242
64243 if (S_ISREG(inode->i_mode))
64244 diff -urNp linux-3.0.3/mm/util.c linux-3.0.3/mm/util.c
64245 --- linux-3.0.3/mm/util.c 2011-07-21 22:17:23.000000000 -0400
64246 +++ linux-3.0.3/mm/util.c 2011-08-23 21:47:56.000000000 -0400
64247 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
64248 * allocated buffer. Use this if you don't want to free the buffer immediately
64249 * like, for example, with RCU.
64250 */
64251 +#undef __krealloc
64252 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64253 {
64254 void *ret;
64255 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
64256 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64257 * %NULL pointer, the object pointed to is freed.
64258 */
64259 +#undef krealloc
64260 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64261 {
64262 void *ret;
64263 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
64264 void arch_pick_mmap_layout(struct mm_struct *mm)
64265 {
64266 mm->mmap_base = TASK_UNMAPPED_BASE;
64267 +
64268 +#ifdef CONFIG_PAX_RANDMMAP
64269 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64270 + mm->mmap_base += mm->delta_mmap;
64271 +#endif
64272 +
64273 mm->get_unmapped_area = arch_get_unmapped_area;
64274 mm->unmap_area = arch_unmap_area;
64275 }
64276 diff -urNp linux-3.0.3/mm/vmalloc.c linux-3.0.3/mm/vmalloc.c
64277 --- linux-3.0.3/mm/vmalloc.c 2011-08-23 21:44:40.000000000 -0400
64278 +++ linux-3.0.3/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
64279 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64280
64281 pte = pte_offset_kernel(pmd, addr);
64282 do {
64283 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64284 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64285 +
64286 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64287 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64288 + BUG_ON(!pte_exec(*pte));
64289 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64290 + continue;
64291 + }
64292 +#endif
64293 +
64294 + {
64295 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64296 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64297 + }
64298 } while (pte++, addr += PAGE_SIZE, addr != end);
64299 }
64300
64301 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64302 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64303 {
64304 pte_t *pte;
64305 + int ret = -ENOMEM;
64306
64307 /*
64308 * nr is a running index into the array which helps higher level
64309 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64310 pte = pte_alloc_kernel(pmd, addr);
64311 if (!pte)
64312 return -ENOMEM;
64313 +
64314 + pax_open_kernel();
64315 do {
64316 struct page *page = pages[*nr];
64317
64318 - if (WARN_ON(!pte_none(*pte)))
64319 - return -EBUSY;
64320 - if (WARN_ON(!page))
64321 - return -ENOMEM;
64322 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64323 + if (pgprot_val(prot) & _PAGE_NX)
64324 +#endif
64325 +
64326 + if (WARN_ON(!pte_none(*pte))) {
64327 + ret = -EBUSY;
64328 + goto out;
64329 + }
64330 + if (WARN_ON(!page)) {
64331 + ret = -ENOMEM;
64332 + goto out;
64333 + }
64334 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64335 (*nr)++;
64336 } while (pte++, addr += PAGE_SIZE, addr != end);
64337 - return 0;
64338 + ret = 0;
64339 +out:
64340 + pax_close_kernel();
64341 + return ret;
64342 }
64343
64344 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64345 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64346 * and fall back on vmalloc() if that fails. Others
64347 * just put it in the vmalloc space.
64348 */
64349 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64350 +#ifdef CONFIG_MODULES
64351 +#ifdef MODULES_VADDR
64352 unsigned long addr = (unsigned long)x;
64353 if (addr >= MODULES_VADDR && addr < MODULES_END)
64354 return 1;
64355 #endif
64356 +
64357 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64358 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64359 + return 1;
64360 +#endif
64361 +
64362 +#endif
64363 +
64364 return is_vmalloc_addr(x);
64365 }
64366
64367 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64368
64369 if (!pgd_none(*pgd)) {
64370 pud_t *pud = pud_offset(pgd, addr);
64371 +#ifdef CONFIG_X86
64372 + if (!pud_large(*pud))
64373 +#endif
64374 if (!pud_none(*pud)) {
64375 pmd_t *pmd = pmd_offset(pud, addr);
64376 +#ifdef CONFIG_X86
64377 + if (!pmd_large(*pmd))
64378 +#endif
64379 if (!pmd_none(*pmd)) {
64380 pte_t *ptep, pte;
64381
64382 @@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
64383 struct vm_struct *area;
64384
64385 BUG_ON(in_interrupt());
64386 +
64387 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64388 + if (flags & VM_KERNEXEC) {
64389 + if (start != VMALLOC_START || end != VMALLOC_END)
64390 + return NULL;
64391 + start = (unsigned long)MODULES_EXEC_VADDR;
64392 + end = (unsigned long)MODULES_EXEC_END;
64393 + }
64394 +#endif
64395 +
64396 if (flags & VM_IOREMAP) {
64397 int bit = fls(size);
64398
64399 @@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
64400 if (count > totalram_pages)
64401 return NULL;
64402
64403 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64404 + if (!(pgprot_val(prot) & _PAGE_NX))
64405 + flags |= VM_KERNEXEC;
64406 +#endif
64407 +
64408 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64409 __builtin_return_address(0));
64410 if (!area)
64411 @@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
64412 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64413 return NULL;
64414
64415 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64416 + if (!(pgprot_val(prot) & _PAGE_NX))
64417 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64418 + node, gfp_mask, caller);
64419 + else
64420 +#endif
64421 +
64422 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64423 gfp_mask, caller);
64424
64425 @@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
64426 gfp_mask, prot, node, caller);
64427 }
64428
64429 +#undef __vmalloc
64430 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64431 {
64432 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64433 @@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
64434 * For tight control over page level allocator and protection flags
64435 * use __vmalloc() instead.
64436 */
64437 +#undef vmalloc
64438 void *vmalloc(unsigned long size)
64439 {
64440 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64441 @@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
64442 * For tight control over page level allocator and protection flags
64443 * use __vmalloc() instead.
64444 */
64445 +#undef vzalloc
64446 void *vzalloc(unsigned long size)
64447 {
64448 return __vmalloc_node_flags(size, -1,
64449 @@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
64450 * The resulting memory area is zeroed so it can be mapped to userspace
64451 * without leaking data.
64452 */
64453 +#undef vmalloc_user
64454 void *vmalloc_user(unsigned long size)
64455 {
64456 struct vm_struct *area;
64457 @@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
64458 * For tight control over page level allocator and protection flags
64459 * use __vmalloc() instead.
64460 */
64461 +#undef vmalloc_node
64462 void *vmalloc_node(unsigned long size, int node)
64463 {
64464 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64465 @@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
64466 * For tight control over page level allocator and protection flags
64467 * use __vmalloc_node() instead.
64468 */
64469 +#undef vzalloc_node
64470 void *vzalloc_node(unsigned long size, int node)
64471 {
64472 return __vmalloc_node_flags(size, node,
64473 @@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
64474 * For tight control over page level allocator and protection flags
64475 * use __vmalloc() instead.
64476 */
64477 -
64478 +#undef vmalloc_exec
64479 void *vmalloc_exec(unsigned long size)
64480 {
64481 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64482 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64483 -1, __builtin_return_address(0));
64484 }
64485
64486 @@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
64487 * Allocate enough 32bit PA addressable pages to cover @size from the
64488 * page level allocator and map them into contiguous kernel virtual space.
64489 */
64490 +#undef vmalloc_32
64491 void *vmalloc_32(unsigned long size)
64492 {
64493 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64494 @@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
64495 * The resulting memory area is 32bit addressable and zeroed so it can be
64496 * mapped to userspace without leaking data.
64497 */
64498 +#undef vmalloc_32_user
64499 void *vmalloc_32_user(unsigned long size)
64500 {
64501 struct vm_struct *area;
64502 @@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
64503 unsigned long uaddr = vma->vm_start;
64504 unsigned long usize = vma->vm_end - vma->vm_start;
64505
64506 + BUG_ON(vma->vm_mirror);
64507 +
64508 if ((PAGE_SIZE-1) & (unsigned long)addr)
64509 return -EINVAL;
64510
64511 diff -urNp linux-3.0.3/mm/vmstat.c linux-3.0.3/mm/vmstat.c
64512 --- linux-3.0.3/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
64513 +++ linux-3.0.3/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
64514 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64515 *
64516 * vm_stat contains the global counters
64517 */
64518 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64519 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64520 EXPORT_SYMBOL(vm_stat);
64521
64522 #ifdef CONFIG_SMP
64523 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64524 v = p->vm_stat_diff[i];
64525 p->vm_stat_diff[i] = 0;
64526 local_irq_restore(flags);
64527 - atomic_long_add(v, &zone->vm_stat[i]);
64528 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64529 global_diff[i] += v;
64530 #ifdef CONFIG_NUMA
64531 /* 3 seconds idle till flush */
64532 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64533
64534 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64535 if (global_diff[i])
64536 - atomic_long_add(global_diff[i], &vm_stat[i]);
64537 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64538 }
64539
64540 #endif
64541 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
64542 start_cpu_timer(cpu);
64543 #endif
64544 #ifdef CONFIG_PROC_FS
64545 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64546 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64547 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64548 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64549 + {
64550 + mode_t gr_mode = S_IRUGO;
64551 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64552 + gr_mode = S_IRUSR;
64553 +#endif
64554 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64555 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64556 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64557 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64558 +#else
64559 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64560 +#endif
64561 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64562 + }
64563 #endif
64564 return 0;
64565 }
64566 diff -urNp linux-3.0.3/net/8021q/vlan.c linux-3.0.3/net/8021q/vlan.c
64567 --- linux-3.0.3/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
64568 +++ linux-3.0.3/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
64569 @@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
64570 err = -EPERM;
64571 if (!capable(CAP_NET_ADMIN))
64572 break;
64573 - if ((args.u.name_type >= 0) &&
64574 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64575 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64576 struct vlan_net *vn;
64577
64578 vn = net_generic(net, vlan_net_id);
64579 diff -urNp linux-3.0.3/net/atm/atm_misc.c linux-3.0.3/net/atm/atm_misc.c
64580 --- linux-3.0.3/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
64581 +++ linux-3.0.3/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
64582 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64583 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64584 return 1;
64585 atm_return(vcc, truesize);
64586 - atomic_inc(&vcc->stats->rx_drop);
64587 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64588 return 0;
64589 }
64590 EXPORT_SYMBOL(atm_charge);
64591 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
64592 }
64593 }
64594 atm_return(vcc, guess);
64595 - atomic_inc(&vcc->stats->rx_drop);
64596 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64597 return NULL;
64598 }
64599 EXPORT_SYMBOL(atm_alloc_charge);
64600 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
64601
64602 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64603 {
64604 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64605 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64606 __SONET_ITEMS
64607 #undef __HANDLE_ITEM
64608 }
64609 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
64610
64611 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64612 {
64613 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64614 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
64615 __SONET_ITEMS
64616 #undef __HANDLE_ITEM
64617 }
64618 diff -urNp linux-3.0.3/net/atm/lec.h linux-3.0.3/net/atm/lec.h
64619 --- linux-3.0.3/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
64620 +++ linux-3.0.3/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
64621 @@ -48,7 +48,7 @@ struct lane2_ops {
64622 const u8 *tlvs, u32 sizeoftlvs);
64623 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
64624 const u8 *tlvs, u32 sizeoftlvs);
64625 -};
64626 +} __no_const;
64627
64628 /*
64629 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
64630 diff -urNp linux-3.0.3/net/atm/mpc.h linux-3.0.3/net/atm/mpc.h
64631 --- linux-3.0.3/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
64632 +++ linux-3.0.3/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
64633 @@ -33,7 +33,7 @@ struct mpoa_client {
64634 struct mpc_parameters parameters; /* parameters for this client */
64635
64636 const struct net_device_ops *old_ops;
64637 - struct net_device_ops new_ops;
64638 + net_device_ops_no_const new_ops;
64639 };
64640
64641
64642 diff -urNp linux-3.0.3/net/atm/mpoa_caches.c linux-3.0.3/net/atm/mpoa_caches.c
64643 --- linux-3.0.3/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
64644 +++ linux-3.0.3/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
64645 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
64646 struct timeval now;
64647 struct k_message msg;
64648
64649 + pax_track_stack();
64650 +
64651 do_gettimeofday(&now);
64652
64653 read_lock_bh(&client->ingress_lock);
64654 diff -urNp linux-3.0.3/net/atm/proc.c linux-3.0.3/net/atm/proc.c
64655 --- linux-3.0.3/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
64656 +++ linux-3.0.3/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
64657 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
64658 const struct k_atm_aal_stats *stats)
64659 {
64660 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
64661 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
64662 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
64663 - atomic_read(&stats->rx_drop));
64664 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
64665 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
64666 + atomic_read_unchecked(&stats->rx_drop));
64667 }
64668
64669 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
64670 diff -urNp linux-3.0.3/net/atm/resources.c linux-3.0.3/net/atm/resources.c
64671 --- linux-3.0.3/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
64672 +++ linux-3.0.3/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
64673 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
64674 static void copy_aal_stats(struct k_atm_aal_stats *from,
64675 struct atm_aal_stats *to)
64676 {
64677 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64678 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64679 __AAL_STAT_ITEMS
64680 #undef __HANDLE_ITEM
64681 }
64682 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
64683 static void subtract_aal_stats(struct k_atm_aal_stats *from,
64684 struct atm_aal_stats *to)
64685 {
64686 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64687 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
64688 __AAL_STAT_ITEMS
64689 #undef __HANDLE_ITEM
64690 }
64691 diff -urNp linux-3.0.3/net/batman-adv/hard-interface.c linux-3.0.3/net/batman-adv/hard-interface.c
64692 --- linux-3.0.3/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
64693 +++ linux-3.0.3/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
64694 @@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
64695 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
64696 dev_add_pack(&hard_iface->batman_adv_ptype);
64697
64698 - atomic_set(&hard_iface->seqno, 1);
64699 - atomic_set(&hard_iface->frag_seqno, 1);
64700 + atomic_set_unchecked(&hard_iface->seqno, 1);
64701 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
64702 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
64703 hard_iface->net_dev->name);
64704
64705 diff -urNp linux-3.0.3/net/batman-adv/routing.c linux-3.0.3/net/batman-adv/routing.c
64706 --- linux-3.0.3/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
64707 +++ linux-3.0.3/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
64708 @@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
64709 return;
64710
64711 /* could be changed by schedule_own_packet() */
64712 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
64713 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
64714
64715 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
64716
64717 diff -urNp linux-3.0.3/net/batman-adv/send.c linux-3.0.3/net/batman-adv/send.c
64718 --- linux-3.0.3/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
64719 +++ linux-3.0.3/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
64720 @@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
64721
64722 /* change sequence number to network order */
64723 batman_packet->seqno =
64724 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
64725 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
64726
64727 if (vis_server == VIS_TYPE_SERVER_SYNC)
64728 batman_packet->flags |= VIS_SERVER;
64729 @@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
64730 else
64731 batman_packet->gw_flags = 0;
64732
64733 - atomic_inc(&hard_iface->seqno);
64734 + atomic_inc_unchecked(&hard_iface->seqno);
64735
64736 slide_own_bcast_window(hard_iface);
64737 send_time = own_send_time(bat_priv);
64738 diff -urNp linux-3.0.3/net/batman-adv/soft-interface.c linux-3.0.3/net/batman-adv/soft-interface.c
64739 --- linux-3.0.3/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
64740 +++ linux-3.0.3/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
64741 @@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
64742
64743 /* set broadcast sequence number */
64744 bcast_packet->seqno =
64745 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
64746 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
64747
64748 add_bcast_packet_to_list(bat_priv, skb);
64749
64750 @@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
64751 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
64752
64753 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
64754 - atomic_set(&bat_priv->bcast_seqno, 1);
64755 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
64756 atomic_set(&bat_priv->tt_local_changed, 0);
64757
64758 bat_priv->primary_if = NULL;
64759 diff -urNp linux-3.0.3/net/batman-adv/types.h linux-3.0.3/net/batman-adv/types.h
64760 --- linux-3.0.3/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
64761 +++ linux-3.0.3/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
64762 @@ -38,8 +38,8 @@ struct hard_iface {
64763 int16_t if_num;
64764 char if_status;
64765 struct net_device *net_dev;
64766 - atomic_t seqno;
64767 - atomic_t frag_seqno;
64768 + atomic_unchecked_t seqno;
64769 + atomic_unchecked_t frag_seqno;
64770 unsigned char *packet_buff;
64771 int packet_len;
64772 struct kobject *hardif_obj;
64773 @@ -142,7 +142,7 @@ struct bat_priv {
64774 atomic_t orig_interval; /* uint */
64775 atomic_t hop_penalty; /* uint */
64776 atomic_t log_level; /* uint */
64777 - atomic_t bcast_seqno;
64778 + atomic_unchecked_t bcast_seqno;
64779 atomic_t bcast_queue_left;
64780 atomic_t batman_queue_left;
64781 char num_ifaces;
64782 diff -urNp linux-3.0.3/net/batman-adv/unicast.c linux-3.0.3/net/batman-adv/unicast.c
64783 --- linux-3.0.3/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
64784 +++ linux-3.0.3/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
64785 @@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
64786 frag1->flags = UNI_FRAG_HEAD | large_tail;
64787 frag2->flags = large_tail;
64788
64789 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
64790 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
64791 frag1->seqno = htons(seqno - 1);
64792 frag2->seqno = htons(seqno);
64793
64794 diff -urNp linux-3.0.3/net/bridge/br_multicast.c linux-3.0.3/net/bridge/br_multicast.c
64795 --- linux-3.0.3/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
64796 +++ linux-3.0.3/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
64797 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
64798 nexthdr = ip6h->nexthdr;
64799 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
64800
64801 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
64802 + if (nexthdr != IPPROTO_ICMPV6)
64803 return 0;
64804
64805 /* Okay, we found ICMPv6 header */
64806 diff -urNp linux-3.0.3/net/bridge/netfilter/ebtables.c linux-3.0.3/net/bridge/netfilter/ebtables.c
64807 --- linux-3.0.3/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
64808 +++ linux-3.0.3/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
64809 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
64810 tmp.valid_hooks = t->table->valid_hooks;
64811 }
64812 mutex_unlock(&ebt_mutex);
64813 - if (copy_to_user(user, &tmp, *len) != 0){
64814 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
64815 BUGPRINT("c2u Didn't work\n");
64816 ret = -EFAULT;
64817 break;
64818 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
64819 int ret;
64820 void __user *pos;
64821
64822 + pax_track_stack();
64823 +
64824 memset(&tinfo, 0, sizeof(tinfo));
64825
64826 if (cmd == EBT_SO_GET_ENTRIES) {
64827 diff -urNp linux-3.0.3/net/caif/caif_socket.c linux-3.0.3/net/caif/caif_socket.c
64828 --- linux-3.0.3/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
64829 +++ linux-3.0.3/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
64830 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
64831 #ifdef CONFIG_DEBUG_FS
64832 struct debug_fs_counter {
64833 atomic_t caif_nr_socks;
64834 - atomic_t caif_sock_create;
64835 - atomic_t num_connect_req;
64836 - atomic_t num_connect_resp;
64837 - atomic_t num_connect_fail_resp;
64838 - atomic_t num_disconnect;
64839 - atomic_t num_remote_shutdown_ind;
64840 - atomic_t num_tx_flow_off_ind;
64841 - atomic_t num_tx_flow_on_ind;
64842 - atomic_t num_rx_flow_off;
64843 - atomic_t num_rx_flow_on;
64844 + atomic_unchecked_t caif_sock_create;
64845 + atomic_unchecked_t num_connect_req;
64846 + atomic_unchecked_t num_connect_resp;
64847 + atomic_unchecked_t num_connect_fail_resp;
64848 + atomic_unchecked_t num_disconnect;
64849 + atomic_unchecked_t num_remote_shutdown_ind;
64850 + atomic_unchecked_t num_tx_flow_off_ind;
64851 + atomic_unchecked_t num_tx_flow_on_ind;
64852 + atomic_unchecked_t num_rx_flow_off;
64853 + atomic_unchecked_t num_rx_flow_on;
64854 };
64855 static struct debug_fs_counter cnt;
64856 #define dbfs_atomic_inc(v) atomic_inc_return(v)
64857 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
64858 #define dbfs_atomic_dec(v) atomic_dec_return(v)
64859 #else
64860 #define dbfs_atomic_inc(v) 0
64861 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
64862 atomic_read(&cf_sk->sk.sk_rmem_alloc),
64863 sk_rcvbuf_lowwater(cf_sk));
64864 set_rx_flow_off(cf_sk);
64865 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
64866 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64867 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64868 }
64869
64870 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
64871 set_rx_flow_off(cf_sk);
64872 if (net_ratelimit())
64873 pr_debug("sending flow OFF due to rmem_schedule\n");
64874 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
64875 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
64876 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
64877 }
64878 skb->dev = NULL;
64879 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
64880 switch (flow) {
64881 case CAIF_CTRLCMD_FLOW_ON_IND:
64882 /* OK from modem to start sending again */
64883 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
64884 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
64885 set_tx_flow_on(cf_sk);
64886 cf_sk->sk.sk_state_change(&cf_sk->sk);
64887 break;
64888
64889 case CAIF_CTRLCMD_FLOW_OFF_IND:
64890 /* Modem asks us to shut up */
64891 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
64892 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
64893 set_tx_flow_off(cf_sk);
64894 cf_sk->sk.sk_state_change(&cf_sk->sk);
64895 break;
64896 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
64897 /* We're now connected */
64898 caif_client_register_refcnt(&cf_sk->layer,
64899 cfsk_hold, cfsk_put);
64900 - dbfs_atomic_inc(&cnt.num_connect_resp);
64901 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
64902 cf_sk->sk.sk_state = CAIF_CONNECTED;
64903 set_tx_flow_on(cf_sk);
64904 cf_sk->sk.sk_state_change(&cf_sk->sk);
64905 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
64906
64907 case CAIF_CTRLCMD_INIT_FAIL_RSP:
64908 /* Connect request failed */
64909 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
64910 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
64911 cf_sk->sk.sk_err = ECONNREFUSED;
64912 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
64913 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
64914 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
64915
64916 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
64917 /* Modem has closed this connection, or device is down. */
64918 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
64919 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
64920 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
64921 cf_sk->sk.sk_err = ECONNRESET;
64922 set_rx_flow_on(cf_sk);
64923 @@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
64924 return;
64925
64926 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
64927 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
64928 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
64929 set_rx_flow_on(cf_sk);
64930 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
64931 }
64932 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
64933 /*ifindex = id of the interface.*/
64934 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
64935
64936 - dbfs_atomic_inc(&cnt.num_connect_req);
64937 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
64938 cf_sk->layer.receive = caif_sktrecv_cb;
64939
64940 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
64941 @@ -943,7 +944,7 @@ static int caif_release(struct socket *s
64942 spin_unlock_bh(&sk->sk_receive_queue.lock);
64943 sock->sk = NULL;
64944
64945 - dbfs_atomic_inc(&cnt.num_disconnect);
64946 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
64947
64948 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
64949 if (cf_sk->debugfs_socket_dir != NULL)
64950 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
64951 cf_sk->conn_req.protocol = protocol;
64952 /* Increase the number of sockets created. */
64953 dbfs_atomic_inc(&cnt.caif_nr_socks);
64954 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
64955 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
64956 #ifdef CONFIG_DEBUG_FS
64957 if (!IS_ERR(debugfsdir)) {
64958
64959 diff -urNp linux-3.0.3/net/caif/cfctrl.c linux-3.0.3/net/caif/cfctrl.c
64960 --- linux-3.0.3/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
64961 +++ linux-3.0.3/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
64962 @@ -9,6 +9,7 @@
64963 #include <linux/stddef.h>
64964 #include <linux/spinlock.h>
64965 #include <linux/slab.h>
64966 +#include <linux/sched.h>
64967 #include <net/caif/caif_layer.h>
64968 #include <net/caif/cfpkt.h>
64969 #include <net/caif/cfctrl.h>
64970 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
64971 dev_info.id = 0xff;
64972 memset(this, 0, sizeof(*this));
64973 cfsrvl_init(&this->serv, 0, &dev_info, false);
64974 - atomic_set(&this->req_seq_no, 1);
64975 - atomic_set(&this->rsp_seq_no, 1);
64976 + atomic_set_unchecked(&this->req_seq_no, 1);
64977 + atomic_set_unchecked(&this->rsp_seq_no, 1);
64978 this->serv.layer.receive = cfctrl_recv;
64979 sprintf(this->serv.layer.name, "ctrl");
64980 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
64981 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
64982 struct cfctrl_request_info *req)
64983 {
64984 spin_lock_bh(&ctrl->info_list_lock);
64985 - atomic_inc(&ctrl->req_seq_no);
64986 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
64987 + atomic_inc_unchecked(&ctrl->req_seq_no);
64988 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
64989 list_add_tail(&req->list, &ctrl->list);
64990 spin_unlock_bh(&ctrl->info_list_lock);
64991 }
64992 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
64993 if (p != first)
64994 pr_warn("Requests are not received in order\n");
64995
64996 - atomic_set(&ctrl->rsp_seq_no,
64997 + atomic_set_unchecked(&ctrl->rsp_seq_no,
64998 p->sequence_no);
64999 list_del(&p->list);
65000 goto out;
65001 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
65002 struct cfctrl *cfctrl = container_obj(layer);
65003 struct cfctrl_request_info rsp, *req;
65004
65005 + pax_track_stack();
65006
65007 cfpkt_extr_head(pkt, &cmdrsp, 1);
65008 cmd = cmdrsp & CFCTRL_CMD_MASK;
65009 diff -urNp linux-3.0.3/net/core/datagram.c linux-3.0.3/net/core/datagram.c
65010 --- linux-3.0.3/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
65011 +++ linux-3.0.3/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
65012 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65013 }
65014
65015 kfree_skb(skb);
65016 - atomic_inc(&sk->sk_drops);
65017 + atomic_inc_unchecked(&sk->sk_drops);
65018 sk_mem_reclaim_partial(sk);
65019
65020 return err;
65021 diff -urNp linux-3.0.3/net/core/dev.c linux-3.0.3/net/core/dev.c
65022 --- linux-3.0.3/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
65023 +++ linux-3.0.3/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
65024 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65025 if (no_module && capable(CAP_NET_ADMIN))
65026 no_module = request_module("netdev-%s", name);
65027 if (no_module && capable(CAP_SYS_MODULE)) {
65028 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65029 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
65030 +#else
65031 if (!request_module("%s", name))
65032 pr_err("Loading kernel module for a network device "
65033 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65034 "instead\n", name);
65035 +#endif
65036 }
65037 }
65038 EXPORT_SYMBOL(dev_load);
65039 @@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
65040
65041 struct dev_gso_cb {
65042 void (*destructor)(struct sk_buff *skb);
65043 -};
65044 +} __no_const;
65045
65046 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65047
65048 @@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
65049 }
65050 EXPORT_SYMBOL(netif_rx_ni);
65051
65052 -static void net_tx_action(struct softirq_action *h)
65053 +static void net_tx_action(void)
65054 {
65055 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65056
65057 @@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
65058 }
65059 EXPORT_SYMBOL(netif_napi_del);
65060
65061 -static void net_rx_action(struct softirq_action *h)
65062 +static void net_rx_action(void)
65063 {
65064 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65065 unsigned long time_limit = jiffies + 2;
65066 diff -urNp linux-3.0.3/net/core/flow.c linux-3.0.3/net/core/flow.c
65067 --- linux-3.0.3/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
65068 +++ linux-3.0.3/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
65069 @@ -60,7 +60,7 @@ struct flow_cache {
65070 struct timer_list rnd_timer;
65071 };
65072
65073 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
65074 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65075 EXPORT_SYMBOL(flow_cache_genid);
65076 static struct flow_cache flow_cache_global;
65077 static struct kmem_cache *flow_cachep __read_mostly;
65078 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65079
65080 static int flow_entry_valid(struct flow_cache_entry *fle)
65081 {
65082 - if (atomic_read(&flow_cache_genid) != fle->genid)
65083 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65084 return 0;
65085 if (fle->object && !fle->object->ops->check(fle->object))
65086 return 0;
65087 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65088 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65089 fcp->hash_count++;
65090 }
65091 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65092 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65093 flo = fle->object;
65094 if (!flo)
65095 goto ret_object;
65096 @@ -274,7 +274,7 @@ nocache:
65097 }
65098 flo = resolver(net, key, family, dir, flo, ctx);
65099 if (fle) {
65100 - fle->genid = atomic_read(&flow_cache_genid);
65101 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
65102 if (!IS_ERR(flo))
65103 fle->object = flo;
65104 else
65105 diff -urNp linux-3.0.3/net/core/rtnetlink.c linux-3.0.3/net/core/rtnetlink.c
65106 --- linux-3.0.3/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
65107 +++ linux-3.0.3/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
65108 @@ -56,7 +56,7 @@
65109 struct rtnl_link {
65110 rtnl_doit_func doit;
65111 rtnl_dumpit_func dumpit;
65112 -};
65113 +} __no_const;
65114
65115 static DEFINE_MUTEX(rtnl_mutex);
65116
65117 diff -urNp linux-3.0.3/net/core/skbuff.c linux-3.0.3/net/core/skbuff.c
65118 --- linux-3.0.3/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
65119 +++ linux-3.0.3/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
65120 @@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
65121 struct sock *sk = skb->sk;
65122 int ret = 0;
65123
65124 + pax_track_stack();
65125 +
65126 if (splice_grow_spd(pipe, &spd))
65127 return -ENOMEM;
65128
65129 diff -urNp linux-3.0.3/net/core/sock.c linux-3.0.3/net/core/sock.c
65130 --- linux-3.0.3/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
65131 +++ linux-3.0.3/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
65132 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65133 */
65134 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65135 (unsigned)sk->sk_rcvbuf) {
65136 - atomic_inc(&sk->sk_drops);
65137 + atomic_inc_unchecked(&sk->sk_drops);
65138 return -ENOMEM;
65139 }
65140
65141 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65142 return err;
65143
65144 if (!sk_rmem_schedule(sk, skb->truesize)) {
65145 - atomic_inc(&sk->sk_drops);
65146 + atomic_inc_unchecked(&sk->sk_drops);
65147 return -ENOBUFS;
65148 }
65149
65150 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65151 skb_dst_force(skb);
65152
65153 spin_lock_irqsave(&list->lock, flags);
65154 - skb->dropcount = atomic_read(&sk->sk_drops);
65155 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65156 __skb_queue_tail(list, skb);
65157 spin_unlock_irqrestore(&list->lock, flags);
65158
65159 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65160 skb->dev = NULL;
65161
65162 if (sk_rcvqueues_full(sk, skb)) {
65163 - atomic_inc(&sk->sk_drops);
65164 + atomic_inc_unchecked(&sk->sk_drops);
65165 goto discard_and_relse;
65166 }
65167 if (nested)
65168 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65169 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65170 } else if (sk_add_backlog(sk, skb)) {
65171 bh_unlock_sock(sk);
65172 - atomic_inc(&sk->sk_drops);
65173 + atomic_inc_unchecked(&sk->sk_drops);
65174 goto discard_and_relse;
65175 }
65176
65177 @@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
65178 if (len > sizeof(peercred))
65179 len = sizeof(peercred);
65180 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
65181 - if (copy_to_user(optval, &peercred, len))
65182 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
65183 return -EFAULT;
65184 goto lenout;
65185 }
65186 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65187 return -ENOTCONN;
65188 if (lv < len)
65189 return -EINVAL;
65190 - if (copy_to_user(optval, address, len))
65191 + if (len > sizeof(address) || copy_to_user(optval, address, len))
65192 return -EFAULT;
65193 goto lenout;
65194 }
65195 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65196
65197 if (len > lv)
65198 len = lv;
65199 - if (copy_to_user(optval, &v, len))
65200 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
65201 return -EFAULT;
65202 lenout:
65203 if (put_user(len, optlen))
65204 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65205 */
65206 smp_wmb();
65207 atomic_set(&sk->sk_refcnt, 1);
65208 - atomic_set(&sk->sk_drops, 0);
65209 + atomic_set_unchecked(&sk->sk_drops, 0);
65210 }
65211 EXPORT_SYMBOL(sock_init_data);
65212
65213 diff -urNp linux-3.0.3/net/decnet/sysctl_net_decnet.c linux-3.0.3/net/decnet/sysctl_net_decnet.c
65214 --- linux-3.0.3/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
65215 +++ linux-3.0.3/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
65216 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65217
65218 if (len > *lenp) len = *lenp;
65219
65220 - if (copy_to_user(buffer, addr, len))
65221 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
65222 return -EFAULT;
65223
65224 *lenp = len;
65225 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65226
65227 if (len > *lenp) len = *lenp;
65228
65229 - if (copy_to_user(buffer, devname, len))
65230 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
65231 return -EFAULT;
65232
65233 *lenp = len;
65234 diff -urNp linux-3.0.3/net/econet/Kconfig linux-3.0.3/net/econet/Kconfig
65235 --- linux-3.0.3/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
65236 +++ linux-3.0.3/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
65237 @@ -4,7 +4,7 @@
65238
65239 config ECONET
65240 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65241 - depends on EXPERIMENTAL && INET
65242 + depends on EXPERIMENTAL && INET && BROKEN
65243 ---help---
65244 Econet is a fairly old and slow networking protocol mainly used by
65245 Acorn computers to access file and print servers. It uses native
65246 diff -urNp linux-3.0.3/net/ipv4/fib_frontend.c linux-3.0.3/net/ipv4/fib_frontend.c
65247 --- linux-3.0.3/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
65248 +++ linux-3.0.3/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
65249 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
65250 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65251 fib_sync_up(dev);
65252 #endif
65253 - atomic_inc(&net->ipv4.dev_addr_genid);
65254 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65255 rt_cache_flush(dev_net(dev), -1);
65256 break;
65257 case NETDEV_DOWN:
65258 fib_del_ifaddr(ifa, NULL);
65259 - atomic_inc(&net->ipv4.dev_addr_genid);
65260 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65261 if (ifa->ifa_dev->ifa_list == NULL) {
65262 /* Last address was deleted from this interface.
65263 * Disable IP.
65264 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
65265 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65266 fib_sync_up(dev);
65267 #endif
65268 - atomic_inc(&net->ipv4.dev_addr_genid);
65269 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65270 rt_cache_flush(dev_net(dev), -1);
65271 break;
65272 case NETDEV_DOWN:
65273 diff -urNp linux-3.0.3/net/ipv4/fib_semantics.c linux-3.0.3/net/ipv4/fib_semantics.c
65274 --- linux-3.0.3/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
65275 +++ linux-3.0.3/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
65276 @@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
65277 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65278 nh->nh_gw,
65279 nh->nh_parent->fib_scope);
65280 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65281 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65282
65283 return nh->nh_saddr;
65284 }
65285 diff -urNp linux-3.0.3/net/ipv4/inet_diag.c linux-3.0.3/net/ipv4/inet_diag.c
65286 --- linux-3.0.3/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
65287 +++ linux-3.0.3/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
65288 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65289 r->idiag_retrans = 0;
65290
65291 r->id.idiag_if = sk->sk_bound_dev_if;
65292 +
65293 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65294 + r->id.idiag_cookie[0] = 0;
65295 + r->id.idiag_cookie[1] = 0;
65296 +#else
65297 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65298 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65299 +#endif
65300
65301 r->id.idiag_sport = inet->inet_sport;
65302 r->id.idiag_dport = inet->inet_dport;
65303 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65304 r->idiag_family = tw->tw_family;
65305 r->idiag_retrans = 0;
65306 r->id.idiag_if = tw->tw_bound_dev_if;
65307 +
65308 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65309 + r->id.idiag_cookie[0] = 0;
65310 + r->id.idiag_cookie[1] = 0;
65311 +#else
65312 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65313 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65314 +#endif
65315 +
65316 r->id.idiag_sport = tw->tw_sport;
65317 r->id.idiag_dport = tw->tw_dport;
65318 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65319 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65320 if (sk == NULL)
65321 goto unlock;
65322
65323 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65324 err = -ESTALE;
65325 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65326 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65327 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65328 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65329 goto out;
65330 +#endif
65331
65332 err = -ENOMEM;
65333 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65334 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65335 r->idiag_retrans = req->retrans;
65336
65337 r->id.idiag_if = sk->sk_bound_dev_if;
65338 +
65339 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65340 + r->id.idiag_cookie[0] = 0;
65341 + r->id.idiag_cookie[1] = 0;
65342 +#else
65343 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65344 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65345 +#endif
65346
65347 tmo = req->expires - jiffies;
65348 if (tmo < 0)
65349 diff -urNp linux-3.0.3/net/ipv4/inet_hashtables.c linux-3.0.3/net/ipv4/inet_hashtables.c
65350 --- linux-3.0.3/net/ipv4/inet_hashtables.c 2011-08-23 21:44:40.000000000 -0400
65351 +++ linux-3.0.3/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
65352 @@ -18,12 +18,15 @@
65353 #include <linux/sched.h>
65354 #include <linux/slab.h>
65355 #include <linux/wait.h>
65356 +#include <linux/security.h>
65357
65358 #include <net/inet_connection_sock.h>
65359 #include <net/inet_hashtables.h>
65360 #include <net/secure_seq.h>
65361 #include <net/ip.h>
65362
65363 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65364 +
65365 /*
65366 * Allocate and initialize a new local port bind bucket.
65367 * The bindhash mutex for snum's hash chain must be held here.
65368 @@ -530,6 +533,8 @@ ok:
65369 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65370 spin_unlock(&head->lock);
65371
65372 + gr_update_task_in_ip_table(current, inet_sk(sk));
65373 +
65374 if (tw) {
65375 inet_twsk_deschedule(tw, death_row);
65376 while (twrefcnt) {
65377 diff -urNp linux-3.0.3/net/ipv4/inetpeer.c linux-3.0.3/net/ipv4/inetpeer.c
65378 --- linux-3.0.3/net/ipv4/inetpeer.c 2011-08-23 21:44:40.000000000 -0400
65379 +++ linux-3.0.3/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
65380 @@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
65381 unsigned int sequence;
65382 int invalidated, newrefcnt = 0;
65383
65384 + pax_track_stack();
65385 +
65386 /* Look up for the address quickly, lockless.
65387 * Because of a concurrent writer, we might not find an existing entry.
65388 */
65389 @@ -517,8 +519,8 @@ found: /* The existing node has been fo
65390 if (p) {
65391 p->daddr = *daddr;
65392 atomic_set(&p->refcnt, 1);
65393 - atomic_set(&p->rid, 0);
65394 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65395 + atomic_set_unchecked(&p->rid, 0);
65396 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65397 p->tcp_ts_stamp = 0;
65398 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65399 p->rate_tokens = 0;
65400 diff -urNp linux-3.0.3/net/ipv4/ip_fragment.c linux-3.0.3/net/ipv4/ip_fragment.c
65401 --- linux-3.0.3/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
65402 +++ linux-3.0.3/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
65403 @@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
65404 return 0;
65405
65406 start = qp->rid;
65407 - end = atomic_inc_return(&peer->rid);
65408 + end = atomic_inc_return_unchecked(&peer->rid);
65409 qp->rid = end;
65410
65411 rc = qp->q.fragments && (end - start) > max;
65412 diff -urNp linux-3.0.3/net/ipv4/ip_sockglue.c linux-3.0.3/net/ipv4/ip_sockglue.c
65413 --- linux-3.0.3/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65414 +++ linux-3.0.3/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65415 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
65416 int val;
65417 int len;
65418
65419 + pax_track_stack();
65420 +
65421 if (level != SOL_IP)
65422 return -EOPNOTSUPP;
65423
65424 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
65425 len = min_t(unsigned int, len, opt->optlen);
65426 if (put_user(len, optlen))
65427 return -EFAULT;
65428 - if (copy_to_user(optval, opt->__data, len))
65429 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
65430 + copy_to_user(optval, opt->__data, len))
65431 return -EFAULT;
65432 return 0;
65433 }
65434 diff -urNp linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c
65435 --- linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
65436 +++ linux-3.0.3/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
65437 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65438
65439 *len = 0;
65440
65441 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65442 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65443 if (*octets == NULL) {
65444 if (net_ratelimit())
65445 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65446 diff -urNp linux-3.0.3/net/ipv4/ping.c linux-3.0.3/net/ipv4/ping.c
65447 --- linux-3.0.3/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
65448 +++ linux-3.0.3/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
65449 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
65450 sk_rmem_alloc_get(sp),
65451 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65452 atomic_read(&sp->sk_refcnt), sp,
65453 - atomic_read(&sp->sk_drops), len);
65454 + atomic_read_unchecked(&sp->sk_drops), len);
65455 }
65456
65457 static int ping_seq_show(struct seq_file *seq, void *v)
65458 diff -urNp linux-3.0.3/net/ipv4/raw.c linux-3.0.3/net/ipv4/raw.c
65459 --- linux-3.0.3/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
65460 +++ linux-3.0.3/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
65461 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65462 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65463 {
65464 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65465 - atomic_inc(&sk->sk_drops);
65466 + atomic_inc_unchecked(&sk->sk_drops);
65467 kfree_skb(skb);
65468 return NET_RX_DROP;
65469 }
65470 @@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
65471
65472 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65473 {
65474 + struct icmp_filter filter;
65475 +
65476 if (optlen > sizeof(struct icmp_filter))
65477 optlen = sizeof(struct icmp_filter);
65478 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65479 + if (copy_from_user(&filter, optval, optlen))
65480 return -EFAULT;
65481 + raw_sk(sk)->filter = filter;
65482 return 0;
65483 }
65484
65485 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65486 {
65487 int len, ret = -EFAULT;
65488 + struct icmp_filter filter;
65489
65490 if (get_user(len, optlen))
65491 goto out;
65492 @@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
65493 if (len > sizeof(struct icmp_filter))
65494 len = sizeof(struct icmp_filter);
65495 ret = -EFAULT;
65496 - if (put_user(len, optlen) ||
65497 - copy_to_user(optval, &raw_sk(sk)->filter, len))
65498 + filter = raw_sk(sk)->filter;
65499 + if (put_user(len, optlen) || len > sizeof filter ||
65500 + copy_to_user(optval, &filter, len))
65501 goto out;
65502 ret = 0;
65503 out: return ret;
65504 @@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
65505 sk_wmem_alloc_get(sp),
65506 sk_rmem_alloc_get(sp),
65507 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65508 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65509 + atomic_read(&sp->sk_refcnt),
65510 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65511 + NULL,
65512 +#else
65513 + sp,
65514 +#endif
65515 + atomic_read_unchecked(&sp->sk_drops));
65516 }
65517
65518 static int raw_seq_show(struct seq_file *seq, void *v)
65519 diff -urNp linux-3.0.3/net/ipv4/route.c linux-3.0.3/net/ipv4/route.c
65520 --- linux-3.0.3/net/ipv4/route.c 2011-08-23 21:44:40.000000000 -0400
65521 +++ linux-3.0.3/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
65522 @@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
65523
65524 static inline int rt_genid(struct net *net)
65525 {
65526 - return atomic_read(&net->ipv4.rt_genid);
65527 + return atomic_read_unchecked(&net->ipv4.rt_genid);
65528 }
65529
65530 #ifdef CONFIG_PROC_FS
65531 @@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
65532 unsigned char shuffle;
65533
65534 get_random_bytes(&shuffle, sizeof(shuffle));
65535 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65536 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65537 }
65538
65539 /*
65540 @@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
65541 error = rt->dst.error;
65542 if (peer) {
65543 inet_peer_refcheck(rt->peer);
65544 - id = atomic_read(&peer->ip_id_count) & 0xffff;
65545 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
65546 if (peer->tcp_ts_stamp) {
65547 ts = peer->tcp_ts;
65548 tsage = get_seconds() - peer->tcp_ts_stamp;
65549 diff -urNp linux-3.0.3/net/ipv4/tcp.c linux-3.0.3/net/ipv4/tcp.c
65550 --- linux-3.0.3/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
65551 +++ linux-3.0.3/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
65552 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
65553 int val;
65554 int err = 0;
65555
65556 + pax_track_stack();
65557 +
65558 /* These are data/string values, all the others are ints */
65559 switch (optname) {
65560 case TCP_CONGESTION: {
65561 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
65562 struct tcp_sock *tp = tcp_sk(sk);
65563 int val, len;
65564
65565 + pax_track_stack();
65566 +
65567 if (get_user(len, optlen))
65568 return -EFAULT;
65569
65570 diff -urNp linux-3.0.3/net/ipv4/tcp_ipv4.c linux-3.0.3/net/ipv4/tcp_ipv4.c
65571 --- linux-3.0.3/net/ipv4/tcp_ipv4.c 2011-08-23 21:44:40.000000000 -0400
65572 +++ linux-3.0.3/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
65573 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
65574 int sysctl_tcp_low_latency __read_mostly;
65575 EXPORT_SYMBOL(sysctl_tcp_low_latency);
65576
65577 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65578 +extern int grsec_enable_blackhole;
65579 +#endif
65580
65581 #ifdef CONFIG_TCP_MD5SIG
65582 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
65583 @@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
65584 return 0;
65585
65586 reset:
65587 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65588 + if (!grsec_enable_blackhole)
65589 +#endif
65590 tcp_v4_send_reset(rsk, skb);
65591 discard:
65592 kfree_skb(skb);
65593 @@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
65594 TCP_SKB_CB(skb)->sacked = 0;
65595
65596 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
65597 - if (!sk)
65598 + if (!sk) {
65599 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65600 + ret = 1;
65601 +#endif
65602 goto no_tcp_socket;
65603 -
65604 + }
65605 process:
65606 - if (sk->sk_state == TCP_TIME_WAIT)
65607 + if (sk->sk_state == TCP_TIME_WAIT) {
65608 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65609 + ret = 2;
65610 +#endif
65611 goto do_time_wait;
65612 + }
65613
65614 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
65615 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
65616 @@ -1724,6 +1737,10 @@ no_tcp_socket:
65617 bad_packet:
65618 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
65619 } else {
65620 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65621 + if (!grsec_enable_blackhole || (ret == 1 &&
65622 + (skb->dev->flags & IFF_LOOPBACK)))
65623 +#endif
65624 tcp_v4_send_reset(NULL, skb);
65625 }
65626
65627 @@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
65628 0, /* non standard timer */
65629 0, /* open_requests have no inode */
65630 atomic_read(&sk->sk_refcnt),
65631 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65632 + NULL,
65633 +#else
65634 req,
65635 +#endif
65636 len);
65637 }
65638
65639 @@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
65640 sock_i_uid(sk),
65641 icsk->icsk_probes_out,
65642 sock_i_ino(sk),
65643 - atomic_read(&sk->sk_refcnt), sk,
65644 + atomic_read(&sk->sk_refcnt),
65645 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65646 + NULL,
65647 +#else
65648 + sk,
65649 +#endif
65650 jiffies_to_clock_t(icsk->icsk_rto),
65651 jiffies_to_clock_t(icsk->icsk_ack.ato),
65652 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
65653 @@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
65654 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
65655 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
65656 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
65657 - atomic_read(&tw->tw_refcnt), tw, len);
65658 + atomic_read(&tw->tw_refcnt),
65659 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65660 + NULL,
65661 +#else
65662 + tw,
65663 +#endif
65664 + len);
65665 }
65666
65667 #define TMPSZ 150
65668 diff -urNp linux-3.0.3/net/ipv4/tcp_minisocks.c linux-3.0.3/net/ipv4/tcp_minisocks.c
65669 --- linux-3.0.3/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
65670 +++ linux-3.0.3/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
65671 @@ -27,6 +27,10 @@
65672 #include <net/inet_common.h>
65673 #include <net/xfrm.h>
65674
65675 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65676 +extern int grsec_enable_blackhole;
65677 +#endif
65678 +
65679 int sysctl_tcp_syncookies __read_mostly = 1;
65680 EXPORT_SYMBOL(sysctl_tcp_syncookies);
65681
65682 @@ -745,6 +749,10 @@ listen_overflow:
65683
65684 embryonic_reset:
65685 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
65686 +
65687 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65688 + if (!grsec_enable_blackhole)
65689 +#endif
65690 if (!(flg & TCP_FLAG_RST))
65691 req->rsk_ops->send_reset(sk, skb);
65692
65693 diff -urNp linux-3.0.3/net/ipv4/tcp_output.c linux-3.0.3/net/ipv4/tcp_output.c
65694 --- linux-3.0.3/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
65695 +++ linux-3.0.3/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
65696 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
65697 int mss;
65698 int s_data_desired = 0;
65699
65700 + pax_track_stack();
65701 +
65702 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
65703 s_data_desired = cvp->s_data_desired;
65704 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
65705 diff -urNp linux-3.0.3/net/ipv4/tcp_probe.c linux-3.0.3/net/ipv4/tcp_probe.c
65706 --- linux-3.0.3/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
65707 +++ linux-3.0.3/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
65708 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
65709 if (cnt + width >= len)
65710 break;
65711
65712 - if (copy_to_user(buf + cnt, tbuf, width))
65713 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
65714 return -EFAULT;
65715 cnt += width;
65716 }
65717 diff -urNp linux-3.0.3/net/ipv4/tcp_timer.c linux-3.0.3/net/ipv4/tcp_timer.c
65718 --- linux-3.0.3/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
65719 +++ linux-3.0.3/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
65720 @@ -22,6 +22,10 @@
65721 #include <linux/gfp.h>
65722 #include <net/tcp.h>
65723
65724 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65725 +extern int grsec_lastack_retries;
65726 +#endif
65727 +
65728 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
65729 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
65730 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
65731 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
65732 }
65733 }
65734
65735 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65736 + if ((sk->sk_state == TCP_LAST_ACK) &&
65737 + (grsec_lastack_retries > 0) &&
65738 + (grsec_lastack_retries < retry_until))
65739 + retry_until = grsec_lastack_retries;
65740 +#endif
65741 +
65742 if (retransmits_timed_out(sk, retry_until,
65743 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
65744 /* Has it gone just too far? */
65745 diff -urNp linux-3.0.3/net/ipv4/udp.c linux-3.0.3/net/ipv4/udp.c
65746 --- linux-3.0.3/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
65747 +++ linux-3.0.3/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
65748 @@ -86,6 +86,7 @@
65749 #include <linux/types.h>
65750 #include <linux/fcntl.h>
65751 #include <linux/module.h>
65752 +#include <linux/security.h>
65753 #include <linux/socket.h>
65754 #include <linux/sockios.h>
65755 #include <linux/igmp.h>
65756 @@ -107,6 +108,10 @@
65757 #include <net/xfrm.h>
65758 #include "udp_impl.h"
65759
65760 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65761 +extern int grsec_enable_blackhole;
65762 +#endif
65763 +
65764 struct udp_table udp_table __read_mostly;
65765 EXPORT_SYMBOL(udp_table);
65766
65767 @@ -564,6 +569,9 @@ found:
65768 return s;
65769 }
65770
65771 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
65772 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
65773 +
65774 /*
65775 * This routine is called by the ICMP module when it gets some
65776 * sort of error condition. If err < 0 then the socket should
65777 @@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
65778 dport = usin->sin_port;
65779 if (dport == 0)
65780 return -EINVAL;
65781 +
65782 + err = gr_search_udp_sendmsg(sk, usin);
65783 + if (err)
65784 + return err;
65785 } else {
65786 if (sk->sk_state != TCP_ESTABLISHED)
65787 return -EDESTADDRREQ;
65788 +
65789 + err = gr_search_udp_sendmsg(sk, NULL);
65790 + if (err)
65791 + return err;
65792 +
65793 daddr = inet->inet_daddr;
65794 dport = inet->inet_dport;
65795 /* Open fast path for connected socket.
65796 @@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
65797 udp_lib_checksum_complete(skb)) {
65798 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65799 IS_UDPLITE(sk));
65800 - atomic_inc(&sk->sk_drops);
65801 + atomic_inc_unchecked(&sk->sk_drops);
65802 __skb_unlink(skb, rcvq);
65803 __skb_queue_tail(&list_kill, skb);
65804 }
65805 @@ -1184,6 +1201,10 @@ try_again:
65806 if (!skb)
65807 goto out;
65808
65809 + err = gr_search_udp_recvmsg(sk, skb);
65810 + if (err)
65811 + goto out_free;
65812 +
65813 ulen = skb->len - sizeof(struct udphdr);
65814 if (len > ulen)
65815 len = ulen;
65816 @@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
65817
65818 drop:
65819 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
65820 - atomic_inc(&sk->sk_drops);
65821 + atomic_inc_unchecked(&sk->sk_drops);
65822 kfree_skb(skb);
65823 return -1;
65824 }
65825 @@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
65826 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
65827
65828 if (!skb1) {
65829 - atomic_inc(&sk->sk_drops);
65830 + atomic_inc_unchecked(&sk->sk_drops);
65831 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
65832 IS_UDPLITE(sk));
65833 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
65834 @@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
65835 goto csum_error;
65836
65837 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
65838 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65839 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
65840 +#endif
65841 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
65842
65843 /*
65844 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
65845 sk_wmem_alloc_get(sp),
65846 sk_rmem_alloc_get(sp),
65847 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65848 - atomic_read(&sp->sk_refcnt), sp,
65849 - atomic_read(&sp->sk_drops), len);
65850 + atomic_read(&sp->sk_refcnt),
65851 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65852 + NULL,
65853 +#else
65854 + sp,
65855 +#endif
65856 + atomic_read_unchecked(&sp->sk_drops), len);
65857 }
65858
65859 int udp4_seq_show(struct seq_file *seq, void *v)
65860 diff -urNp linux-3.0.3/net/ipv6/inet6_connection_sock.c linux-3.0.3/net/ipv6/inet6_connection_sock.c
65861 --- linux-3.0.3/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
65862 +++ linux-3.0.3/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
65863 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
65864 #ifdef CONFIG_XFRM
65865 {
65866 struct rt6_info *rt = (struct rt6_info *)dst;
65867 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
65868 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
65869 }
65870 #endif
65871 }
65872 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
65873 #ifdef CONFIG_XFRM
65874 if (dst) {
65875 struct rt6_info *rt = (struct rt6_info *)dst;
65876 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
65877 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
65878 __sk_dst_reset(sk);
65879 dst = NULL;
65880 }
65881 diff -urNp linux-3.0.3/net/ipv6/ipv6_sockglue.c linux-3.0.3/net/ipv6/ipv6_sockglue.c
65882 --- linux-3.0.3/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
65883 +++ linux-3.0.3/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
65884 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
65885 int val, valbool;
65886 int retv = -ENOPROTOOPT;
65887
65888 + pax_track_stack();
65889 +
65890 if (optval == NULL)
65891 val=0;
65892 else {
65893 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
65894 int len;
65895 int val;
65896
65897 + pax_track_stack();
65898 +
65899 if (ip6_mroute_opt(optname))
65900 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
65901
65902 diff -urNp linux-3.0.3/net/ipv6/raw.c linux-3.0.3/net/ipv6/raw.c
65903 --- linux-3.0.3/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
65904 +++ linux-3.0.3/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
65905 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
65906 {
65907 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
65908 skb_checksum_complete(skb)) {
65909 - atomic_inc(&sk->sk_drops);
65910 + atomic_inc_unchecked(&sk->sk_drops);
65911 kfree_skb(skb);
65912 return NET_RX_DROP;
65913 }
65914 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
65915 struct raw6_sock *rp = raw6_sk(sk);
65916
65917 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
65918 - atomic_inc(&sk->sk_drops);
65919 + atomic_inc_unchecked(&sk->sk_drops);
65920 kfree_skb(skb);
65921 return NET_RX_DROP;
65922 }
65923 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
65924
65925 if (inet->hdrincl) {
65926 if (skb_checksum_complete(skb)) {
65927 - atomic_inc(&sk->sk_drops);
65928 + atomic_inc_unchecked(&sk->sk_drops);
65929 kfree_skb(skb);
65930 return NET_RX_DROP;
65931 }
65932 @@ -601,7 +601,7 @@ out:
65933 return err;
65934 }
65935
65936 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
65937 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
65938 struct flowi6 *fl6, struct dst_entry **dstp,
65939 unsigned int flags)
65940 {
65941 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
65942 u16 proto;
65943 int err;
65944
65945 + pax_track_stack();
65946 +
65947 /* Rough check on arithmetic overflow,
65948 better check is made in ip6_append_data().
65949 */
65950 @@ -909,12 +911,15 @@ do_confirm:
65951 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
65952 char __user *optval, int optlen)
65953 {
65954 + struct icmp6_filter filter;
65955 +
65956 switch (optname) {
65957 case ICMPV6_FILTER:
65958 if (optlen > sizeof(struct icmp6_filter))
65959 optlen = sizeof(struct icmp6_filter);
65960 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
65961 + if (copy_from_user(&filter, optval, optlen))
65962 return -EFAULT;
65963 + raw6_sk(sk)->filter = filter;
65964 return 0;
65965 default:
65966 return -ENOPROTOOPT;
65967 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
65968 char __user *optval, int __user *optlen)
65969 {
65970 int len;
65971 + struct icmp6_filter filter;
65972
65973 switch (optname) {
65974 case ICMPV6_FILTER:
65975 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
65976 len = sizeof(struct icmp6_filter);
65977 if (put_user(len, optlen))
65978 return -EFAULT;
65979 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
65980 + filter = raw6_sk(sk)->filter;
65981 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
65982 return -EFAULT;
65983 return 0;
65984 default:
65985 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
65986 0, 0L, 0,
65987 sock_i_uid(sp), 0,
65988 sock_i_ino(sp),
65989 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65990 + atomic_read(&sp->sk_refcnt),
65991 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65992 + NULL,
65993 +#else
65994 + sp,
65995 +#endif
65996 + atomic_read_unchecked(&sp->sk_drops));
65997 }
65998
65999 static int raw6_seq_show(struct seq_file *seq, void *v)
66000 diff -urNp linux-3.0.3/net/ipv6/tcp_ipv6.c linux-3.0.3/net/ipv6/tcp_ipv6.c
66001 --- linux-3.0.3/net/ipv6/tcp_ipv6.c 2011-08-23 21:44:40.000000000 -0400
66002 +++ linux-3.0.3/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
66003 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66004 }
66005 #endif
66006
66007 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66008 +extern int grsec_enable_blackhole;
66009 +#endif
66010 +
66011 static void tcp_v6_hash(struct sock *sk)
66012 {
66013 if (sk->sk_state != TCP_CLOSE) {
66014 @@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66015 return 0;
66016
66017 reset:
66018 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66019 + if (!grsec_enable_blackhole)
66020 +#endif
66021 tcp_v6_send_reset(sk, skb);
66022 discard:
66023 if (opt_skb)
66024 @@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66025 TCP_SKB_CB(skb)->sacked = 0;
66026
66027 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66028 - if (!sk)
66029 + if (!sk) {
66030 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66031 + ret = 1;
66032 +#endif
66033 goto no_tcp_socket;
66034 + }
66035
66036 process:
66037 - if (sk->sk_state == TCP_TIME_WAIT)
66038 + if (sk->sk_state == TCP_TIME_WAIT) {
66039 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66040 + ret = 2;
66041 +#endif
66042 goto do_time_wait;
66043 + }
66044
66045 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66046 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66047 @@ -1794,6 +1809,10 @@ no_tcp_socket:
66048 bad_packet:
66049 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66050 } else {
66051 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66052 + if (!grsec_enable_blackhole || (ret == 1 &&
66053 + (skb->dev->flags & IFF_LOOPBACK)))
66054 +#endif
66055 tcp_v6_send_reset(NULL, skb);
66056 }
66057
66058 @@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
66059 uid,
66060 0, /* non standard timer */
66061 0, /* open_requests have no inode */
66062 - 0, req);
66063 + 0,
66064 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66065 + NULL
66066 +#else
66067 + req
66068 +#endif
66069 + );
66070 }
66071
66072 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66073 @@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
66074 sock_i_uid(sp),
66075 icsk->icsk_probes_out,
66076 sock_i_ino(sp),
66077 - atomic_read(&sp->sk_refcnt), sp,
66078 + atomic_read(&sp->sk_refcnt),
66079 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66080 + NULL,
66081 +#else
66082 + sp,
66083 +#endif
66084 jiffies_to_clock_t(icsk->icsk_rto),
66085 jiffies_to_clock_t(icsk->icsk_ack.ato),
66086 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66087 @@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
66088 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66089 tw->tw_substate, 0, 0,
66090 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66091 - atomic_read(&tw->tw_refcnt), tw);
66092 + atomic_read(&tw->tw_refcnt),
66093 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66094 + NULL
66095 +#else
66096 + tw
66097 +#endif
66098 + );
66099 }
66100
66101 static int tcp6_seq_show(struct seq_file *seq, void *v)
66102 diff -urNp linux-3.0.3/net/ipv6/udp.c linux-3.0.3/net/ipv6/udp.c
66103 --- linux-3.0.3/net/ipv6/udp.c 2011-08-23 21:44:40.000000000 -0400
66104 +++ linux-3.0.3/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
66105 @@ -50,6 +50,10 @@
66106 #include <linux/seq_file.h>
66107 #include "udp_impl.h"
66108
66109 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66110 +extern int grsec_enable_blackhole;
66111 +#endif
66112 +
66113 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66114 {
66115 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66116 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66117
66118 return 0;
66119 drop:
66120 - atomic_inc(&sk->sk_drops);
66121 + atomic_inc_unchecked(&sk->sk_drops);
66122 drop_no_sk_drops_inc:
66123 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66124 kfree_skb(skb);
66125 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66126 continue;
66127 }
66128 drop:
66129 - atomic_inc(&sk->sk_drops);
66130 + atomic_inc_unchecked(&sk->sk_drops);
66131 UDP6_INC_STATS_BH(sock_net(sk),
66132 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66133 UDP6_INC_STATS_BH(sock_net(sk),
66134 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66135 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66136 proto == IPPROTO_UDPLITE);
66137
66138 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66139 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66140 +#endif
66141 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66142
66143 kfree_skb(skb);
66144 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66145 if (!sock_owned_by_user(sk))
66146 udpv6_queue_rcv_skb(sk, skb);
66147 else if (sk_add_backlog(sk, skb)) {
66148 - atomic_inc(&sk->sk_drops);
66149 + atomic_inc_unchecked(&sk->sk_drops);
66150 bh_unlock_sock(sk);
66151 sock_put(sk);
66152 goto discard;
66153 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66154 0, 0L, 0,
66155 sock_i_uid(sp), 0,
66156 sock_i_ino(sp),
66157 - atomic_read(&sp->sk_refcnt), sp,
66158 - atomic_read(&sp->sk_drops));
66159 + atomic_read(&sp->sk_refcnt),
66160 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66161 + NULL,
66162 +#else
66163 + sp,
66164 +#endif
66165 + atomic_read_unchecked(&sp->sk_drops));
66166 }
66167
66168 int udp6_seq_show(struct seq_file *seq, void *v)
66169 diff -urNp linux-3.0.3/net/irda/ircomm/ircomm_tty.c linux-3.0.3/net/irda/ircomm/ircomm_tty.c
66170 --- linux-3.0.3/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
66171 +++ linux-3.0.3/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
66172 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
66173 add_wait_queue(&self->open_wait, &wait);
66174
66175 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66176 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66177 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66178
66179 /* As far as I can see, we protect open_count - Jean II */
66180 spin_lock_irqsave(&self->spinlock, flags);
66181 if (!tty_hung_up_p(filp)) {
66182 extra_count = 1;
66183 - self->open_count--;
66184 + local_dec(&self->open_count);
66185 }
66186 spin_unlock_irqrestore(&self->spinlock, flags);
66187 - self->blocked_open++;
66188 + local_inc(&self->blocked_open);
66189
66190 while (1) {
66191 if (tty->termios->c_cflag & CBAUD) {
66192 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
66193 }
66194
66195 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66196 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66197 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66198
66199 schedule();
66200 }
66201 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
66202 if (extra_count) {
66203 /* ++ is not atomic, so this should be protected - Jean II */
66204 spin_lock_irqsave(&self->spinlock, flags);
66205 - self->open_count++;
66206 + local_inc(&self->open_count);
66207 spin_unlock_irqrestore(&self->spinlock, flags);
66208 }
66209 - self->blocked_open--;
66210 + local_dec(&self->blocked_open);
66211
66212 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66213 - __FILE__,__LINE__, tty->driver->name, self->open_count);
66214 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66215
66216 if (!retval)
66217 self->flags |= ASYNC_NORMAL_ACTIVE;
66218 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
66219 }
66220 /* ++ is not atomic, so this should be protected - Jean II */
66221 spin_lock_irqsave(&self->spinlock, flags);
66222 - self->open_count++;
66223 + local_inc(&self->open_count);
66224
66225 tty->driver_data = self;
66226 self->tty = tty;
66227 spin_unlock_irqrestore(&self->spinlock, flags);
66228
66229 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66230 - self->line, self->open_count);
66231 + self->line, local_read(&self->open_count));
66232
66233 /* Not really used by us, but lets do it anyway */
66234 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66235 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
66236 return;
66237 }
66238
66239 - if ((tty->count == 1) && (self->open_count != 1)) {
66240 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66241 /*
66242 * Uh, oh. tty->count is 1, which means that the tty
66243 * structure will be freed. state->count should always
66244 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
66245 */
66246 IRDA_DEBUG(0, "%s(), bad serial port count; "
66247 "tty->count is 1, state->count is %d\n", __func__ ,
66248 - self->open_count);
66249 - self->open_count = 1;
66250 + local_read(&self->open_count));
66251 + local_set(&self->open_count, 1);
66252 }
66253
66254 - if (--self->open_count < 0) {
66255 + if (local_dec_return(&self->open_count) < 0) {
66256 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66257 - __func__, self->line, self->open_count);
66258 - self->open_count = 0;
66259 + __func__, self->line, local_read(&self->open_count));
66260 + local_set(&self->open_count, 0);
66261 }
66262 - if (self->open_count) {
66263 + if (local_read(&self->open_count)) {
66264 spin_unlock_irqrestore(&self->spinlock, flags);
66265
66266 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66267 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
66268 tty->closing = 0;
66269 self->tty = NULL;
66270
66271 - if (self->blocked_open) {
66272 + if (local_read(&self->blocked_open)) {
66273 if (self->close_delay)
66274 schedule_timeout_interruptible(self->close_delay);
66275 wake_up_interruptible(&self->open_wait);
66276 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
66277 spin_lock_irqsave(&self->spinlock, flags);
66278 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66279 self->tty = NULL;
66280 - self->open_count = 0;
66281 + local_set(&self->open_count, 0);
66282 spin_unlock_irqrestore(&self->spinlock, flags);
66283
66284 wake_up_interruptible(&self->open_wait);
66285 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
66286 seq_putc(m, '\n');
66287
66288 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66289 - seq_printf(m, "Open count: %d\n", self->open_count);
66290 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66291 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66292 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66293
66294 diff -urNp linux-3.0.3/net/iucv/af_iucv.c linux-3.0.3/net/iucv/af_iucv.c
66295 --- linux-3.0.3/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
66296 +++ linux-3.0.3/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
66297 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
66298
66299 write_lock_bh(&iucv_sk_list.lock);
66300
66301 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66302 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66303 while (__iucv_get_sock_by_name(name)) {
66304 sprintf(name, "%08x",
66305 - atomic_inc_return(&iucv_sk_list.autobind_name));
66306 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66307 }
66308
66309 write_unlock_bh(&iucv_sk_list.lock);
66310 diff -urNp linux-3.0.3/net/key/af_key.c linux-3.0.3/net/key/af_key.c
66311 --- linux-3.0.3/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
66312 +++ linux-3.0.3/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
66313 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66314 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66315 struct xfrm_kmaddress k;
66316
66317 + pax_track_stack();
66318 +
66319 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66320 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66321 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66322 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66323 static u32 get_acqseq(void)
66324 {
66325 u32 res;
66326 - static atomic_t acqseq;
66327 + static atomic_unchecked_t acqseq;
66328
66329 do {
66330 - res = atomic_inc_return(&acqseq);
66331 + res = atomic_inc_return_unchecked(&acqseq);
66332 } while (!res);
66333 return res;
66334 }
66335 diff -urNp linux-3.0.3/net/lapb/lapb_iface.c linux-3.0.3/net/lapb/lapb_iface.c
66336 --- linux-3.0.3/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
66337 +++ linux-3.0.3/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
66338 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66339 goto out;
66340
66341 lapb->dev = dev;
66342 - lapb->callbacks = *callbacks;
66343 + lapb->callbacks = callbacks;
66344
66345 __lapb_insert_cb(lapb);
66346
66347 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66348
66349 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66350 {
66351 - if (lapb->callbacks.connect_confirmation)
66352 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
66353 + if (lapb->callbacks->connect_confirmation)
66354 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
66355 }
66356
66357 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66358 {
66359 - if (lapb->callbacks.connect_indication)
66360 - lapb->callbacks.connect_indication(lapb->dev, reason);
66361 + if (lapb->callbacks->connect_indication)
66362 + lapb->callbacks->connect_indication(lapb->dev, reason);
66363 }
66364
66365 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66366 {
66367 - if (lapb->callbacks.disconnect_confirmation)
66368 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66369 + if (lapb->callbacks->disconnect_confirmation)
66370 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66371 }
66372
66373 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66374 {
66375 - if (lapb->callbacks.disconnect_indication)
66376 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
66377 + if (lapb->callbacks->disconnect_indication)
66378 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
66379 }
66380
66381 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66382 {
66383 - if (lapb->callbacks.data_indication)
66384 - return lapb->callbacks.data_indication(lapb->dev, skb);
66385 + if (lapb->callbacks->data_indication)
66386 + return lapb->callbacks->data_indication(lapb->dev, skb);
66387
66388 kfree_skb(skb);
66389 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66390 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66391 {
66392 int used = 0;
66393
66394 - if (lapb->callbacks.data_transmit) {
66395 - lapb->callbacks.data_transmit(lapb->dev, skb);
66396 + if (lapb->callbacks->data_transmit) {
66397 + lapb->callbacks->data_transmit(lapb->dev, skb);
66398 used = 1;
66399 }
66400
66401 diff -urNp linux-3.0.3/net/mac80211/debugfs_sta.c linux-3.0.3/net/mac80211/debugfs_sta.c
66402 --- linux-3.0.3/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
66403 +++ linux-3.0.3/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
66404 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
66405 struct tid_ampdu_rx *tid_rx;
66406 struct tid_ampdu_tx *tid_tx;
66407
66408 + pax_track_stack();
66409 +
66410 rcu_read_lock();
66411
66412 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66413 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
66414 struct sta_info *sta = file->private_data;
66415 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66416
66417 + pax_track_stack();
66418 +
66419 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66420 htc->ht_supported ? "" : "not ");
66421 if (htc->ht_supported) {
66422 diff -urNp linux-3.0.3/net/mac80211/ieee80211_i.h linux-3.0.3/net/mac80211/ieee80211_i.h
66423 --- linux-3.0.3/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
66424 +++ linux-3.0.3/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
66425 @@ -27,6 +27,7 @@
66426 #include <net/ieee80211_radiotap.h>
66427 #include <net/cfg80211.h>
66428 #include <net/mac80211.h>
66429 +#include <asm/local.h>
66430 #include "key.h"
66431 #include "sta_info.h"
66432
66433 @@ -721,7 +722,7 @@ struct ieee80211_local {
66434 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66435 spinlock_t queue_stop_reason_lock;
66436
66437 - int open_count;
66438 + local_t open_count;
66439 int monitors, cooked_mntrs;
66440 /* number of interfaces with corresponding FIF_ flags */
66441 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66442 diff -urNp linux-3.0.3/net/mac80211/iface.c linux-3.0.3/net/mac80211/iface.c
66443 --- linux-3.0.3/net/mac80211/iface.c 2011-08-23 21:44:40.000000000 -0400
66444 +++ linux-3.0.3/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
66445 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66446 break;
66447 }
66448
66449 - if (local->open_count == 0) {
66450 + if (local_read(&local->open_count) == 0) {
66451 res = drv_start(local);
66452 if (res)
66453 goto err_del_bss;
66454 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66455 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66456
66457 if (!is_valid_ether_addr(dev->dev_addr)) {
66458 - if (!local->open_count)
66459 + if (!local_read(&local->open_count))
66460 drv_stop(local);
66461 return -EADDRNOTAVAIL;
66462 }
66463 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66464 mutex_unlock(&local->mtx);
66465
66466 if (coming_up)
66467 - local->open_count++;
66468 + local_inc(&local->open_count);
66469
66470 if (hw_reconf_flags) {
66471 ieee80211_hw_config(local, hw_reconf_flags);
66472 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66473 err_del_interface:
66474 drv_remove_interface(local, &sdata->vif);
66475 err_stop:
66476 - if (!local->open_count)
66477 + if (!local_read(&local->open_count))
66478 drv_stop(local);
66479 err_del_bss:
66480 sdata->bss = NULL;
66481 @@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
66482 }
66483
66484 if (going_down)
66485 - local->open_count--;
66486 + local_dec(&local->open_count);
66487
66488 switch (sdata->vif.type) {
66489 case NL80211_IFTYPE_AP_VLAN:
66490 @@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
66491
66492 ieee80211_recalc_ps(local, -1);
66493
66494 - if (local->open_count == 0) {
66495 + if (local_read(&local->open_count) == 0) {
66496 if (local->ops->napi_poll)
66497 napi_disable(&local->napi);
66498 ieee80211_clear_tx_pending(local);
66499 diff -urNp linux-3.0.3/net/mac80211/main.c linux-3.0.3/net/mac80211/main.c
66500 --- linux-3.0.3/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
66501 +++ linux-3.0.3/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
66502 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
66503 local->hw.conf.power_level = power;
66504 }
66505
66506 - if (changed && local->open_count) {
66507 + if (changed && local_read(&local->open_count)) {
66508 ret = drv_config(local, changed);
66509 /*
66510 * Goal:
66511 diff -urNp linux-3.0.3/net/mac80211/mlme.c linux-3.0.3/net/mac80211/mlme.c
66512 --- linux-3.0.3/net/mac80211/mlme.c 2011-08-23 21:44:40.000000000 -0400
66513 +++ linux-3.0.3/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
66514 @@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
66515 bool have_higher_than_11mbit = false;
66516 u16 ap_ht_cap_flags;
66517
66518 + pax_track_stack();
66519 +
66520 /* AssocResp and ReassocResp have identical structure */
66521
66522 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66523 diff -urNp linux-3.0.3/net/mac80211/pm.c linux-3.0.3/net/mac80211/pm.c
66524 --- linux-3.0.3/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
66525 +++ linux-3.0.3/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
66526 @@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
66527 cancel_work_sync(&local->dynamic_ps_enable_work);
66528 del_timer_sync(&local->dynamic_ps_timer);
66529
66530 - local->wowlan = wowlan && local->open_count;
66531 + local->wowlan = wowlan && local_read(&local->open_count);
66532 if (local->wowlan) {
66533 int err = drv_suspend(local, wowlan);
66534 if (err) {
66535 @@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
66536 }
66537
66538 /* stop hardware - this must stop RX */
66539 - if (local->open_count)
66540 + if (local_read(&local->open_count))
66541 ieee80211_stop_device(local);
66542
66543 suspend:
66544 diff -urNp linux-3.0.3/net/mac80211/rate.c linux-3.0.3/net/mac80211/rate.c
66545 --- linux-3.0.3/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
66546 +++ linux-3.0.3/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
66547 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66548
66549 ASSERT_RTNL();
66550
66551 - if (local->open_count)
66552 + if (local_read(&local->open_count))
66553 return -EBUSY;
66554
66555 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
66556 diff -urNp linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c
66557 --- linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
66558 +++ linux-3.0.3/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
66559 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
66560
66561 spin_unlock_irqrestore(&events->lock, status);
66562
66563 - if (copy_to_user(buf, pb, p))
66564 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
66565 return -EFAULT;
66566
66567 return p;
66568 diff -urNp linux-3.0.3/net/mac80211/util.c linux-3.0.3/net/mac80211/util.c
66569 --- linux-3.0.3/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
66570 +++ linux-3.0.3/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
66571 @@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
66572 #endif
66573
66574 /* restart hardware */
66575 - if (local->open_count) {
66576 + if (local_read(&local->open_count)) {
66577 /*
66578 * Upon resume hardware can sometimes be goofy due to
66579 * various platform / driver / bus issues, so restarting
66580 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c
66581 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
66582 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
66583 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
66584 /* Increase the refcnt counter of the dest */
66585 atomic_inc(&dest->refcnt);
66586
66587 - conn_flags = atomic_read(&dest->conn_flags);
66588 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
66589 if (cp->protocol != IPPROTO_UDP)
66590 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
66591 /* Bind with the destination and its corresponding transmitter */
66592 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
66593 atomic_set(&cp->refcnt, 1);
66594
66595 atomic_set(&cp->n_control, 0);
66596 - atomic_set(&cp->in_pkts, 0);
66597 + atomic_set_unchecked(&cp->in_pkts, 0);
66598
66599 atomic_inc(&ipvs->conn_count);
66600 if (flags & IP_VS_CONN_F_NO_CPORT)
66601 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
66602
66603 /* Don't drop the entry if its number of incoming packets is not
66604 located in [0, 8] */
66605 - i = atomic_read(&cp->in_pkts);
66606 + i = atomic_read_unchecked(&cp->in_pkts);
66607 if (i > 8 || i < 0) return 0;
66608
66609 if (!todrop_rate[i]) return 0;
66610 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c
66611 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
66612 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
66613 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
66614 ret = cp->packet_xmit(skb, cp, pd->pp);
66615 /* do not touch skb anymore */
66616
66617 - atomic_inc(&cp->in_pkts);
66618 + atomic_inc_unchecked(&cp->in_pkts);
66619 ip_vs_conn_put(cp);
66620 return ret;
66621 }
66622 @@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
66623 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
66624 pkts = sysctl_sync_threshold(ipvs);
66625 else
66626 - pkts = atomic_add_return(1, &cp->in_pkts);
66627 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66628
66629 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
66630 cp->protocol == IPPROTO_SCTP) {
66631 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c
66632 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:44:40.000000000 -0400
66633 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
66634 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
66635 ip_vs_rs_hash(ipvs, dest);
66636 write_unlock_bh(&ipvs->rs_lock);
66637 }
66638 - atomic_set(&dest->conn_flags, conn_flags);
66639 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
66640
66641 /* bind the service */
66642 if (!dest->svc) {
66643 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
66644 " %-7s %-6d %-10d %-10d\n",
66645 &dest->addr.in6,
66646 ntohs(dest->port),
66647 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66648 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66649 atomic_read(&dest->weight),
66650 atomic_read(&dest->activeconns),
66651 atomic_read(&dest->inactconns));
66652 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
66653 "%-7s %-6d %-10d %-10d\n",
66654 ntohl(dest->addr.ip),
66655 ntohs(dest->port),
66656 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
66657 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
66658 atomic_read(&dest->weight),
66659 atomic_read(&dest->activeconns),
66660 atomic_read(&dest->inactconns));
66661 @@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
66662 struct ip_vs_dest_user *udest_compat;
66663 struct ip_vs_dest_user_kern udest;
66664
66665 + pax_track_stack();
66666 +
66667 if (!capable(CAP_NET_ADMIN))
66668 return -EPERM;
66669
66670 @@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
66671
66672 entry.addr = dest->addr.ip;
66673 entry.port = dest->port;
66674 - entry.conn_flags = atomic_read(&dest->conn_flags);
66675 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
66676 entry.weight = atomic_read(&dest->weight);
66677 entry.u_threshold = dest->u_threshold;
66678 entry.l_threshold = dest->l_threshold;
66679 @@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
66680 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
66681
66682 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
66683 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66684 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
66685 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
66686 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
66687 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
66688 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c
66689 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
66690 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
66691 @@ -648,7 +648,7 @@ control:
66692 * i.e only increment in_pkts for Templates.
66693 */
66694 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
66695 - int pkts = atomic_add_return(1, &cp->in_pkts);
66696 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66697
66698 if (pkts % sysctl_sync_period(ipvs) != 1)
66699 return;
66700 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
66701
66702 if (opt)
66703 memcpy(&cp->in_seq, opt, sizeof(*opt));
66704 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66705 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
66706 cp->state = state;
66707 cp->old_state = cp->state;
66708 /*
66709 diff -urNp linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c
66710 --- linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
66711 +++ linux-3.0.3/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
66712 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
66713 else
66714 rc = NF_ACCEPT;
66715 /* do not touch skb anymore */
66716 - atomic_inc(&cp->in_pkts);
66717 + atomic_inc_unchecked(&cp->in_pkts);
66718 goto out;
66719 }
66720
66721 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
66722 else
66723 rc = NF_ACCEPT;
66724 /* do not touch skb anymore */
66725 - atomic_inc(&cp->in_pkts);
66726 + atomic_inc_unchecked(&cp->in_pkts);
66727 goto out;
66728 }
66729
66730 diff -urNp linux-3.0.3/net/netfilter/Kconfig linux-3.0.3/net/netfilter/Kconfig
66731 --- linux-3.0.3/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
66732 +++ linux-3.0.3/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
66733 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
66734
66735 To compile it as a module, choose M here. If unsure, say N.
66736
66737 +config NETFILTER_XT_MATCH_GRADM
66738 + tristate '"gradm" match support'
66739 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
66740 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
66741 + ---help---
66742 + The gradm match allows to match on grsecurity RBAC being enabled.
66743 + It is useful when iptables rules are applied early on bootup to
66744 + prevent connections to the machine (except from a trusted host)
66745 + while the RBAC system is disabled.
66746 +
66747 config NETFILTER_XT_MATCH_HASHLIMIT
66748 tristate '"hashlimit" match support'
66749 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
66750 diff -urNp linux-3.0.3/net/netfilter/Makefile linux-3.0.3/net/netfilter/Makefile
66751 --- linux-3.0.3/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
66752 +++ linux-3.0.3/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
66753 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
66754 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
66755 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
66756 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
66757 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
66758 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
66759 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
66760 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
66761 diff -urNp linux-3.0.3/net/netfilter/nfnetlink_log.c linux-3.0.3/net/netfilter/nfnetlink_log.c
66762 --- linux-3.0.3/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
66763 +++ linux-3.0.3/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
66764 @@ -70,7 +70,7 @@ struct nfulnl_instance {
66765 };
66766
66767 static DEFINE_SPINLOCK(instances_lock);
66768 -static atomic_t global_seq;
66769 +static atomic_unchecked_t global_seq;
66770
66771 #define INSTANCE_BUCKETS 16
66772 static struct hlist_head instance_table[INSTANCE_BUCKETS];
66773 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
66774 /* global sequence number */
66775 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
66776 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
66777 - htonl(atomic_inc_return(&global_seq)));
66778 + htonl(atomic_inc_return_unchecked(&global_seq)));
66779
66780 if (data_len) {
66781 struct nlattr *nla;
66782 diff -urNp linux-3.0.3/net/netfilter/nfnetlink_queue.c linux-3.0.3/net/netfilter/nfnetlink_queue.c
66783 --- linux-3.0.3/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
66784 +++ linux-3.0.3/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
66785 @@ -58,7 +58,7 @@ struct nfqnl_instance {
66786 */
66787 spinlock_t lock;
66788 unsigned int queue_total;
66789 - atomic_t id_sequence; /* 'sequence' of pkt ids */
66790 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
66791 struct list_head queue_list; /* packets in queue */
66792 };
66793
66794 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
66795 nfmsg->version = NFNETLINK_V0;
66796 nfmsg->res_id = htons(queue->queue_num);
66797
66798 - entry->id = atomic_inc_return(&queue->id_sequence);
66799 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
66800 pmsg.packet_id = htonl(entry->id);
66801 pmsg.hw_protocol = entskb->protocol;
66802 pmsg.hook = entry->hook;
66803 @@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
66804 inst->peer_pid, inst->queue_total,
66805 inst->copy_mode, inst->copy_range,
66806 inst->queue_dropped, inst->queue_user_dropped,
66807 - atomic_read(&inst->id_sequence), 1);
66808 + atomic_read_unchecked(&inst->id_sequence), 1);
66809 }
66810
66811 static const struct seq_operations nfqnl_seq_ops = {
66812 diff -urNp linux-3.0.3/net/netfilter/xt_gradm.c linux-3.0.3/net/netfilter/xt_gradm.c
66813 --- linux-3.0.3/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
66814 +++ linux-3.0.3/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
66815 @@ -0,0 +1,51 @@
66816 +/*
66817 + * gradm match for netfilter
66818 + * Copyright © Zbigniew Krzystolik, 2010
66819 + *
66820 + * This program is free software; you can redistribute it and/or modify
66821 + * it under the terms of the GNU General Public License; either version
66822 + * 2 or 3 as published by the Free Software Foundation.
66823 + */
66824 +#include <linux/module.h>
66825 +#include <linux/moduleparam.h>
66826 +#include <linux/skbuff.h>
66827 +#include <linux/netfilter/x_tables.h>
66828 +#include <linux/grsecurity.h>
66829 +#include <linux/netfilter/xt_gradm.h>
66830 +
66831 +static bool
66832 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
66833 +{
66834 + const struct xt_gradm_mtinfo *info = par->matchinfo;
66835 + bool retval = false;
66836 + if (gr_acl_is_enabled())
66837 + retval = true;
66838 + return retval ^ info->invflags;
66839 +}
66840 +
66841 +static struct xt_match gradm_mt_reg __read_mostly = {
66842 + .name = "gradm",
66843 + .revision = 0,
66844 + .family = NFPROTO_UNSPEC,
66845 + .match = gradm_mt,
66846 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
66847 + .me = THIS_MODULE,
66848 +};
66849 +
66850 +static int __init gradm_mt_init(void)
66851 +{
66852 + return xt_register_match(&gradm_mt_reg);
66853 +}
66854 +
66855 +static void __exit gradm_mt_exit(void)
66856 +{
66857 + xt_unregister_match(&gradm_mt_reg);
66858 +}
66859 +
66860 +module_init(gradm_mt_init);
66861 +module_exit(gradm_mt_exit);
66862 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
66863 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
66864 +MODULE_LICENSE("GPL");
66865 +MODULE_ALIAS("ipt_gradm");
66866 +MODULE_ALIAS("ip6t_gradm");
66867 diff -urNp linux-3.0.3/net/netfilter/xt_statistic.c linux-3.0.3/net/netfilter/xt_statistic.c
66868 --- linux-3.0.3/net/netfilter/xt_statistic.c 2011-07-21 22:17:23.000000000 -0400
66869 +++ linux-3.0.3/net/netfilter/xt_statistic.c 2011-08-23 21:47:56.000000000 -0400
66870 @@ -18,7 +18,7 @@
66871 #include <linux/netfilter/x_tables.h>
66872
66873 struct xt_statistic_priv {
66874 - atomic_t count;
66875 + atomic_unchecked_t count;
66876 } ____cacheline_aligned_in_smp;
66877
66878 MODULE_LICENSE("GPL");
66879 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
66880 break;
66881 case XT_STATISTIC_MODE_NTH:
66882 do {
66883 - oval = atomic_read(&info->master->count);
66884 + oval = atomic_read_unchecked(&info->master->count);
66885 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
66886 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
66887 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
66888 if (nval == 0)
66889 ret = !ret;
66890 break;
66891 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
66892 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
66893 if (info->master == NULL)
66894 return -ENOMEM;
66895 - atomic_set(&info->master->count, info->u.nth.count);
66896 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
66897
66898 return 0;
66899 }
66900 diff -urNp linux-3.0.3/net/netlink/af_netlink.c linux-3.0.3/net/netlink/af_netlink.c
66901 --- linux-3.0.3/net/netlink/af_netlink.c 2011-07-21 22:17:23.000000000 -0400
66902 +++ linux-3.0.3/net/netlink/af_netlink.c 2011-08-23 21:47:56.000000000 -0400
66903 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
66904 sk->sk_error_report(sk);
66905 }
66906 }
66907 - atomic_inc(&sk->sk_drops);
66908 + atomic_inc_unchecked(&sk->sk_drops);
66909 }
66910
66911 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
66912 @@ -1994,7 +1994,7 @@ static int netlink_seq_show(struct seq_f
66913 sk_wmem_alloc_get(s),
66914 nlk->cb,
66915 atomic_read(&s->sk_refcnt),
66916 - atomic_read(&s->sk_drops),
66917 + atomic_read_unchecked(&s->sk_drops),
66918 sock_i_ino(s)
66919 );
66920
66921 diff -urNp linux-3.0.3/net/netrom/af_netrom.c linux-3.0.3/net/netrom/af_netrom.c
66922 --- linux-3.0.3/net/netrom/af_netrom.c 2011-07-21 22:17:23.000000000 -0400
66923 +++ linux-3.0.3/net/netrom/af_netrom.c 2011-08-23 21:48:14.000000000 -0400
66924 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *soc
66925 struct sock *sk = sock->sk;
66926 struct nr_sock *nr = nr_sk(sk);
66927
66928 + memset(sax, 0, sizeof(*sax));
66929 lock_sock(sk);
66930 if (peer != 0) {
66931 if (sk->sk_state != TCP_ESTABLISHED) {
66932 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *soc
66933 *uaddr_len = sizeof(struct full_sockaddr_ax25);
66934 } else {
66935 sax->fsa_ax25.sax25_family = AF_NETROM;
66936 - sax->fsa_ax25.sax25_ndigis = 0;
66937 sax->fsa_ax25.sax25_call = nr->source_addr;
66938 *uaddr_len = sizeof(struct sockaddr_ax25);
66939 }
66940 diff -urNp linux-3.0.3/net/packet/af_packet.c linux-3.0.3/net/packet/af_packet.c
66941 --- linux-3.0.3/net/packet/af_packet.c 2011-07-21 22:17:23.000000000 -0400
66942 +++ linux-3.0.3/net/packet/af_packet.c 2011-08-23 21:47:56.000000000 -0400
66943 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
66944
66945 spin_lock(&sk->sk_receive_queue.lock);
66946 po->stats.tp_packets++;
66947 - skb->dropcount = atomic_read(&sk->sk_drops);
66948 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
66949 __skb_queue_tail(&sk->sk_receive_queue, skb);
66950 spin_unlock(&sk->sk_receive_queue.lock);
66951 sk->sk_data_ready(sk, skb->len);
66952 return 0;
66953
66954 drop_n_acct:
66955 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
66956 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
66957
66958 drop_n_restore:
66959 if (skb_head != skb->data && skb_shared(skb)) {
66960 @@ -2168,7 +2168,7 @@ static int packet_getsockopt(struct sock
66961 case PACKET_HDRLEN:
66962 if (len > sizeof(int))
66963 len = sizeof(int);
66964 - if (copy_from_user(&val, optval, len))
66965 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
66966 return -EFAULT;
66967 switch (val) {
66968 case TPACKET_V1:
66969 @@ -2206,7 +2206,7 @@ static int packet_getsockopt(struct sock
66970
66971 if (put_user(len, optlen))
66972 return -EFAULT;
66973 - if (copy_to_user(optval, data, len))
66974 + if (len > sizeof(st) || copy_to_user(optval, data, len))
66975 return -EFAULT;
66976 return 0;
66977 }
66978 diff -urNp linux-3.0.3/net/phonet/af_phonet.c linux-3.0.3/net/phonet/af_phonet.c
66979 --- linux-3.0.3/net/phonet/af_phonet.c 2011-07-21 22:17:23.000000000 -0400
66980 +++ linux-3.0.3/net/phonet/af_phonet.c 2011-08-23 21:48:14.000000000 -0400
66981 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
66982 {
66983 struct phonet_protocol *pp;
66984
66985 - if (protocol >= PHONET_NPROTO)
66986 + if (protocol < 0 || protocol >= PHONET_NPROTO)
66987 return NULL;
66988
66989 rcu_read_lock();
66990 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
66991 {
66992 int err = 0;
66993
66994 - if (protocol >= PHONET_NPROTO)
66995 + if (protocol < 0 || protocol >= PHONET_NPROTO)
66996 return -EINVAL;
66997
66998 err = proto_register(pp->prot, 1);
66999 diff -urNp linux-3.0.3/net/phonet/pep.c linux-3.0.3/net/phonet/pep.c
67000 --- linux-3.0.3/net/phonet/pep.c 2011-07-21 22:17:23.000000000 -0400
67001 +++ linux-3.0.3/net/phonet/pep.c 2011-08-23 21:47:56.000000000 -0400
67002 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
67003
67004 case PNS_PEP_CTRL_REQ:
67005 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
67006 - atomic_inc(&sk->sk_drops);
67007 + atomic_inc_unchecked(&sk->sk_drops);
67008 break;
67009 }
67010 __skb_pull(skb, 4);
67011 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
67012 }
67013
67014 if (pn->rx_credits == 0) {
67015 - atomic_inc(&sk->sk_drops);
67016 + atomic_inc_unchecked(&sk->sk_drops);
67017 err = -ENOBUFS;
67018 break;
67019 }
67020 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
67021 }
67022
67023 if (pn->rx_credits == 0) {
67024 - atomic_inc(&sk->sk_drops);
67025 + atomic_inc_unchecked(&sk->sk_drops);
67026 err = NET_RX_DROP;
67027 break;
67028 }
67029 diff -urNp linux-3.0.3/net/phonet/socket.c linux-3.0.3/net/phonet/socket.c
67030 --- linux-3.0.3/net/phonet/socket.c 2011-07-21 22:17:23.000000000 -0400
67031 +++ linux-3.0.3/net/phonet/socket.c 2011-08-23 21:48:14.000000000 -0400
67032 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_f
67033 pn->resource, sk->sk_state,
67034 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
67035 sock_i_uid(sk), sock_i_ino(sk),
67036 - atomic_read(&sk->sk_refcnt), sk,
67037 - atomic_read(&sk->sk_drops), &len);
67038 + atomic_read(&sk->sk_refcnt),
67039 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67040 + NULL,
67041 +#else
67042 + sk,
67043 +#endif
67044 + atomic_read_unchecked(&sk->sk_drops), &len);
67045 }
67046 seq_printf(seq, "%*s\n", 127 - len, "");
67047 return 0;
67048 diff -urNp linux-3.0.3/net/rds/cong.c linux-3.0.3/net/rds/cong.c
67049 --- linux-3.0.3/net/rds/cong.c 2011-07-21 22:17:23.000000000 -0400
67050 +++ linux-3.0.3/net/rds/cong.c 2011-08-23 21:47:56.000000000 -0400
67051 @@ -77,7 +77,7 @@
67052 * finds that the saved generation number is smaller than the global generation
67053 * number, it wakes up the process.
67054 */
67055 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
67056 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
67057
67058 /*
67059 * Congestion monitoring
67060 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
67061 rdsdebug("waking map %p for %pI4\n",
67062 map, &map->m_addr);
67063 rds_stats_inc(s_cong_update_received);
67064 - atomic_inc(&rds_cong_generation);
67065 + atomic_inc_unchecked(&rds_cong_generation);
67066 if (waitqueue_active(&map->m_waitq))
67067 wake_up(&map->m_waitq);
67068 if (waitqueue_active(&rds_poll_waitq))
67069 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
67070
67071 int rds_cong_updated_since(unsigned long *recent)
67072 {
67073 - unsigned long gen = atomic_read(&rds_cong_generation);
67074 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
67075
67076 if (likely(*recent == gen))
67077 return 0;
67078 diff -urNp linux-3.0.3/net/rds/ib_cm.c linux-3.0.3/net/rds/ib_cm.c
67079 --- linux-3.0.3/net/rds/ib_cm.c 2011-07-21 22:17:23.000000000 -0400
67080 +++ linux-3.0.3/net/rds/ib_cm.c 2011-08-23 21:47:56.000000000 -0400
67081 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
67082 /* Clear the ACK state */
67083 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67084 #ifdef KERNEL_HAS_ATOMIC64
67085 - atomic64_set(&ic->i_ack_next, 0);
67086 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67087 #else
67088 ic->i_ack_next = 0;
67089 #endif
67090 diff -urNp linux-3.0.3/net/rds/ib.h linux-3.0.3/net/rds/ib.h
67091 --- linux-3.0.3/net/rds/ib.h 2011-07-21 22:17:23.000000000 -0400
67092 +++ linux-3.0.3/net/rds/ib.h 2011-08-23 21:47:56.000000000 -0400
67093 @@ -127,7 +127,7 @@ struct rds_ib_connection {
67094 /* sending acks */
67095 unsigned long i_ack_flags;
67096 #ifdef KERNEL_HAS_ATOMIC64
67097 - atomic64_t i_ack_next; /* next ACK to send */
67098 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67099 #else
67100 spinlock_t i_ack_lock; /* protect i_ack_next */
67101 u64 i_ack_next; /* next ACK to send */
67102 diff -urNp linux-3.0.3/net/rds/ib_recv.c linux-3.0.3/net/rds/ib_recv.c
67103 --- linux-3.0.3/net/rds/ib_recv.c 2011-07-21 22:17:23.000000000 -0400
67104 +++ linux-3.0.3/net/rds/ib_recv.c 2011-08-23 21:47:56.000000000 -0400
67105 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67106 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
67107 int ack_required)
67108 {
67109 - atomic64_set(&ic->i_ack_next, seq);
67110 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67111 if (ack_required) {
67112 smp_mb__before_clear_bit();
67113 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67114 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67115 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67116 smp_mb__after_clear_bit();
67117
67118 - return atomic64_read(&ic->i_ack_next);
67119 + return atomic64_read_unchecked(&ic->i_ack_next);
67120 }
67121 #endif
67122
67123 diff -urNp linux-3.0.3/net/rds/iw_cm.c linux-3.0.3/net/rds/iw_cm.c
67124 --- linux-3.0.3/net/rds/iw_cm.c 2011-07-21 22:17:23.000000000 -0400
67125 +++ linux-3.0.3/net/rds/iw_cm.c 2011-08-23 21:47:56.000000000 -0400
67126 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
67127 /* Clear the ACK state */
67128 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67129 #ifdef KERNEL_HAS_ATOMIC64
67130 - atomic64_set(&ic->i_ack_next, 0);
67131 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67132 #else
67133 ic->i_ack_next = 0;
67134 #endif
67135 diff -urNp linux-3.0.3/net/rds/iw.h linux-3.0.3/net/rds/iw.h
67136 --- linux-3.0.3/net/rds/iw.h 2011-07-21 22:17:23.000000000 -0400
67137 +++ linux-3.0.3/net/rds/iw.h 2011-08-23 21:47:56.000000000 -0400
67138 @@ -133,7 +133,7 @@ struct rds_iw_connection {
67139 /* sending acks */
67140 unsigned long i_ack_flags;
67141 #ifdef KERNEL_HAS_ATOMIC64
67142 - atomic64_t i_ack_next; /* next ACK to send */
67143 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67144 #else
67145 spinlock_t i_ack_lock; /* protect i_ack_next */
67146 u64 i_ack_next; /* next ACK to send */
67147 diff -urNp linux-3.0.3/net/rds/iw_rdma.c linux-3.0.3/net/rds/iw_rdma.c
67148 --- linux-3.0.3/net/rds/iw_rdma.c 2011-07-21 22:17:23.000000000 -0400
67149 +++ linux-3.0.3/net/rds/iw_rdma.c 2011-08-23 21:48:14.000000000 -0400
67150 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
67151 struct rdma_cm_id *pcm_id;
67152 int rc;
67153
67154 + pax_track_stack();
67155 +
67156 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
67157 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
67158
67159 diff -urNp linux-3.0.3/net/rds/iw_recv.c linux-3.0.3/net/rds/iw_recv.c
67160 --- linux-3.0.3/net/rds/iw_recv.c 2011-07-21 22:17:23.000000000 -0400
67161 +++ linux-3.0.3/net/rds/iw_recv.c 2011-08-23 21:47:56.000000000 -0400
67162 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67163 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
67164 int ack_required)
67165 {
67166 - atomic64_set(&ic->i_ack_next, seq);
67167 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67168 if (ack_required) {
67169 smp_mb__before_clear_bit();
67170 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67171 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67172 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67173 smp_mb__after_clear_bit();
67174
67175 - return atomic64_read(&ic->i_ack_next);
67176 + return atomic64_read_unchecked(&ic->i_ack_next);
67177 }
67178 #endif
67179
67180 diff -urNp linux-3.0.3/net/rxrpc/af_rxrpc.c linux-3.0.3/net/rxrpc/af_rxrpc.c
67181 --- linux-3.0.3/net/rxrpc/af_rxrpc.c 2011-07-21 22:17:23.000000000 -0400
67182 +++ linux-3.0.3/net/rxrpc/af_rxrpc.c 2011-08-23 21:47:56.000000000 -0400
67183 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
67184 __be32 rxrpc_epoch;
67185
67186 /* current debugging ID */
67187 -atomic_t rxrpc_debug_id;
67188 +atomic_unchecked_t rxrpc_debug_id;
67189
67190 /* count of skbs currently in use */
67191 atomic_t rxrpc_n_skbs;
67192 diff -urNp linux-3.0.3/net/rxrpc/ar-ack.c linux-3.0.3/net/rxrpc/ar-ack.c
67193 --- linux-3.0.3/net/rxrpc/ar-ack.c 2011-07-21 22:17:23.000000000 -0400
67194 +++ linux-3.0.3/net/rxrpc/ar-ack.c 2011-08-23 21:48:14.000000000 -0400
67195 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
67196
67197 _enter("{%d,%d,%d,%d},",
67198 call->acks_hard, call->acks_unacked,
67199 - atomic_read(&call->sequence),
67200 + atomic_read_unchecked(&call->sequence),
67201 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
67202
67203 stop = 0;
67204 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
67205
67206 /* each Tx packet has a new serial number */
67207 sp->hdr.serial =
67208 - htonl(atomic_inc_return(&call->conn->serial));
67209 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
67210
67211 hdr = (struct rxrpc_header *) txb->head;
67212 hdr->serial = sp->hdr.serial;
67213 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
67214 */
67215 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
67216 {
67217 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
67218 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
67219 }
67220
67221 /*
67222 @@ -629,7 +629,7 @@ process_further:
67223
67224 latest = ntohl(sp->hdr.serial);
67225 hard = ntohl(ack.firstPacket);
67226 - tx = atomic_read(&call->sequence);
67227 + tx = atomic_read_unchecked(&call->sequence);
67228
67229 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67230 latest,
67231 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_stru
67232 u32 abort_code = RX_PROTOCOL_ERROR;
67233 u8 *acks = NULL;
67234
67235 + pax_track_stack();
67236 +
67237 //printk("\n--------------------\n");
67238 _enter("{%d,%s,%lx} [%lu]",
67239 call->debug_id, rxrpc_call_states[call->state], call->events,
67240 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_stru
67241 goto maybe_reschedule;
67242
67243 send_ACK_with_skew:
67244 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
67245 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
67246 ntohl(ack.serial));
67247 send_ACK:
67248 mtu = call->conn->trans->peer->if_mtu;
67249 @@ -1173,7 +1175,7 @@ send_ACK:
67250 ackinfo.rxMTU = htonl(5692);
67251 ackinfo.jumbo_max = htonl(4);
67252
67253 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67254 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67255 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67256 ntohl(hdr.serial),
67257 ntohs(ack.maxSkew),
67258 @@ -1191,7 +1193,7 @@ send_ACK:
67259 send_message:
67260 _debug("send message");
67261
67262 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67263 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67264 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
67265 send_message_2:
67266
67267 diff -urNp linux-3.0.3/net/rxrpc/ar-call.c linux-3.0.3/net/rxrpc/ar-call.c
67268 --- linux-3.0.3/net/rxrpc/ar-call.c 2011-07-21 22:17:23.000000000 -0400
67269 +++ linux-3.0.3/net/rxrpc/ar-call.c 2011-08-23 21:47:56.000000000 -0400
67270 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
67271 spin_lock_init(&call->lock);
67272 rwlock_init(&call->state_lock);
67273 atomic_set(&call->usage, 1);
67274 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
67275 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67276 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
67277
67278 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
67279 diff -urNp linux-3.0.3/net/rxrpc/ar-connection.c linux-3.0.3/net/rxrpc/ar-connection.c
67280 --- linux-3.0.3/net/rxrpc/ar-connection.c 2011-07-21 22:17:23.000000000 -0400
67281 +++ linux-3.0.3/net/rxrpc/ar-connection.c 2011-08-23 21:47:56.000000000 -0400
67282 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
67283 rwlock_init(&conn->lock);
67284 spin_lock_init(&conn->state_lock);
67285 atomic_set(&conn->usage, 1);
67286 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
67287 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67288 conn->avail_calls = RXRPC_MAXCALLS;
67289 conn->size_align = 4;
67290 conn->header_size = sizeof(struct rxrpc_header);
67291 diff -urNp linux-3.0.3/net/rxrpc/ar-connevent.c linux-3.0.3/net/rxrpc/ar-connevent.c
67292 --- linux-3.0.3/net/rxrpc/ar-connevent.c 2011-07-21 22:17:23.000000000 -0400
67293 +++ linux-3.0.3/net/rxrpc/ar-connevent.c 2011-08-23 21:47:56.000000000 -0400
67294 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
67295
67296 len = iov[0].iov_len + iov[1].iov_len;
67297
67298 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67299 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67300 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
67301
67302 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67303 diff -urNp linux-3.0.3/net/rxrpc/ar-input.c linux-3.0.3/net/rxrpc/ar-input.c
67304 --- linux-3.0.3/net/rxrpc/ar-input.c 2011-07-21 22:17:23.000000000 -0400
67305 +++ linux-3.0.3/net/rxrpc/ar-input.c 2011-08-23 21:47:56.000000000 -0400
67306 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
67307 /* track the latest serial number on this connection for ACK packet
67308 * information */
67309 serial = ntohl(sp->hdr.serial);
67310 - hi_serial = atomic_read(&call->conn->hi_serial);
67311 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
67312 while (serial > hi_serial)
67313 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
67314 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
67315 serial);
67316
67317 /* request ACK generation for any ACK or DATA packet that requests
67318 diff -urNp linux-3.0.3/net/rxrpc/ar-internal.h linux-3.0.3/net/rxrpc/ar-internal.h
67319 --- linux-3.0.3/net/rxrpc/ar-internal.h 2011-07-21 22:17:23.000000000 -0400
67320 +++ linux-3.0.3/net/rxrpc/ar-internal.h 2011-08-23 21:47:56.000000000 -0400
67321 @@ -272,8 +272,8 @@ struct rxrpc_connection {
67322 int error; /* error code for local abort */
67323 int debug_id; /* debug ID for printks */
67324 unsigned call_counter; /* call ID counter */
67325 - atomic_t serial; /* packet serial number counter */
67326 - atomic_t hi_serial; /* highest serial number received */
67327 + atomic_unchecked_t serial; /* packet serial number counter */
67328 + atomic_unchecked_t hi_serial; /* highest serial number received */
67329 u8 avail_calls; /* number of calls available */
67330 u8 size_align; /* data size alignment (for security) */
67331 u8 header_size; /* rxrpc + security header size */
67332 @@ -346,7 +346,7 @@ struct rxrpc_call {
67333 spinlock_t lock;
67334 rwlock_t state_lock; /* lock for state transition */
67335 atomic_t usage;
67336 - atomic_t sequence; /* Tx data packet sequence counter */
67337 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
67338 u32 abort_code; /* local/remote abort code */
67339 enum { /* current state of call */
67340 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
67341 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
67342 */
67343 extern atomic_t rxrpc_n_skbs;
67344 extern __be32 rxrpc_epoch;
67345 -extern atomic_t rxrpc_debug_id;
67346 +extern atomic_unchecked_t rxrpc_debug_id;
67347 extern struct workqueue_struct *rxrpc_workqueue;
67348
67349 /*
67350 diff -urNp linux-3.0.3/net/rxrpc/ar-local.c linux-3.0.3/net/rxrpc/ar-local.c
67351 --- linux-3.0.3/net/rxrpc/ar-local.c 2011-07-21 22:17:23.000000000 -0400
67352 +++ linux-3.0.3/net/rxrpc/ar-local.c 2011-08-23 21:47:56.000000000 -0400
67353 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
67354 spin_lock_init(&local->lock);
67355 rwlock_init(&local->services_lock);
67356 atomic_set(&local->usage, 1);
67357 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
67358 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67359 memcpy(&local->srx, srx, sizeof(*srx));
67360 }
67361
67362 diff -urNp linux-3.0.3/net/rxrpc/ar-output.c linux-3.0.3/net/rxrpc/ar-output.c
67363 --- linux-3.0.3/net/rxrpc/ar-output.c 2011-07-21 22:17:23.000000000 -0400
67364 +++ linux-3.0.3/net/rxrpc/ar-output.c 2011-08-23 21:47:56.000000000 -0400
67365 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
67366 sp->hdr.cid = call->cid;
67367 sp->hdr.callNumber = call->call_id;
67368 sp->hdr.seq =
67369 - htonl(atomic_inc_return(&call->sequence));
67370 + htonl(atomic_inc_return_unchecked(&call->sequence));
67371 sp->hdr.serial =
67372 - htonl(atomic_inc_return(&conn->serial));
67373 + htonl(atomic_inc_return_unchecked(&conn->serial));
67374 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
67375 sp->hdr.userStatus = 0;
67376 sp->hdr.securityIndex = conn->security_ix;
67377 diff -urNp linux-3.0.3/net/rxrpc/ar-peer.c linux-3.0.3/net/rxrpc/ar-peer.c
67378 --- linux-3.0.3/net/rxrpc/ar-peer.c 2011-07-21 22:17:23.000000000 -0400
67379 +++ linux-3.0.3/net/rxrpc/ar-peer.c 2011-08-23 21:47:56.000000000 -0400
67380 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
67381 INIT_LIST_HEAD(&peer->error_targets);
67382 spin_lock_init(&peer->lock);
67383 atomic_set(&peer->usage, 1);
67384 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
67385 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67386 memcpy(&peer->srx, srx, sizeof(*srx));
67387
67388 rxrpc_assess_MTU_size(peer);
67389 diff -urNp linux-3.0.3/net/rxrpc/ar-proc.c linux-3.0.3/net/rxrpc/ar-proc.c
67390 --- linux-3.0.3/net/rxrpc/ar-proc.c 2011-07-21 22:17:23.000000000 -0400
67391 +++ linux-3.0.3/net/rxrpc/ar-proc.c 2011-08-23 21:47:56.000000000 -0400
67392 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
67393 atomic_read(&conn->usage),
67394 rxrpc_conn_states[conn->state],
67395 key_serial(conn->key),
67396 - atomic_read(&conn->serial),
67397 - atomic_read(&conn->hi_serial));
67398 + atomic_read_unchecked(&conn->serial),
67399 + atomic_read_unchecked(&conn->hi_serial));
67400
67401 return 0;
67402 }
67403 diff -urNp linux-3.0.3/net/rxrpc/ar-transport.c linux-3.0.3/net/rxrpc/ar-transport.c
67404 --- linux-3.0.3/net/rxrpc/ar-transport.c 2011-07-21 22:17:23.000000000 -0400
67405 +++ linux-3.0.3/net/rxrpc/ar-transport.c 2011-08-23 21:47:56.000000000 -0400
67406 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
67407 spin_lock_init(&trans->client_lock);
67408 rwlock_init(&trans->conn_lock);
67409 atomic_set(&trans->usage, 1);
67410 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
67411 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67412
67413 if (peer->srx.transport.family == AF_INET) {
67414 switch (peer->srx.transport_type) {
67415 diff -urNp linux-3.0.3/net/rxrpc/rxkad.c linux-3.0.3/net/rxrpc/rxkad.c
67416 --- linux-3.0.3/net/rxrpc/rxkad.c 2011-07-21 22:17:23.000000000 -0400
67417 +++ linux-3.0.3/net/rxrpc/rxkad.c 2011-08-23 21:48:14.000000000 -0400
67418 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
67419 u16 check;
67420 int nsg;
67421
67422 + pax_track_stack();
67423 +
67424 sp = rxrpc_skb(skb);
67425
67426 _enter("");
67427 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
67428 u16 check;
67429 int nsg;
67430
67431 + pax_track_stack();
67432 +
67433 _enter("");
67434
67435 sp = rxrpc_skb(skb);
67436 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
67437
67438 len = iov[0].iov_len + iov[1].iov_len;
67439
67440 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67441 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67442 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
67443
67444 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67445 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
67446
67447 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
67448
67449 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
67450 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67451 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
67452
67453 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
67454 diff -urNp linux-3.0.3/net/sctp/proc.c linux-3.0.3/net/sctp/proc.c
67455 --- linux-3.0.3/net/sctp/proc.c 2011-07-21 22:17:23.000000000 -0400
67456 +++ linux-3.0.3/net/sctp/proc.c 2011-08-23 21:48:14.000000000 -0400
67457 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct s
67458 seq_printf(seq,
67459 "%8pK %8pK %-3d %-3d %-2d %-4d "
67460 "%4d %8d %8d %7d %5lu %-5d %5d ",
67461 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
67462 + assoc, sk,
67463 + sctp_sk(sk)->type, sk->sk_state,
67464 assoc->state, hash,
67465 assoc->assoc_id,
67466 assoc->sndbuf_used,
67467 diff -urNp linux-3.0.3/net/sctp/socket.c linux-3.0.3/net/sctp/socket.c
67468 --- linux-3.0.3/net/sctp/socket.c 2011-07-21 22:17:23.000000000 -0400
67469 +++ linux-3.0.3/net/sctp/socket.c 2011-08-23 21:47:56.000000000 -0400
67470 @@ -4452,7 +4452,7 @@ static int sctp_getsockopt_peer_addrs(st
67471 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
67472 if (space_left < addrlen)
67473 return -ENOMEM;
67474 - if (copy_to_user(to, &temp, addrlen))
67475 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
67476 return -EFAULT;
67477 to += addrlen;
67478 cnt++;
67479 diff -urNp linux-3.0.3/net/socket.c linux-3.0.3/net/socket.c
67480 --- linux-3.0.3/net/socket.c 2011-08-23 21:44:40.000000000 -0400
67481 +++ linux-3.0.3/net/socket.c 2011-08-23 21:48:14.000000000 -0400
67482 @@ -88,6 +88,7 @@
67483 #include <linux/nsproxy.h>
67484 #include <linux/magic.h>
67485 #include <linux/slab.h>
67486 +#include <linux/in.h>
67487
67488 #include <asm/uaccess.h>
67489 #include <asm/unistd.h>
67490 @@ -105,6 +106,8 @@
67491 #include <linux/sockios.h>
67492 #include <linux/atalk.h>
67493
67494 +#include <linux/grsock.h>
67495 +
67496 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
67497 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
67498 unsigned long nr_segs, loff_t pos);
67499 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struc
67500 &sockfs_dentry_operations, SOCKFS_MAGIC);
67501 }
67502
67503 -static struct vfsmount *sock_mnt __read_mostly;
67504 +struct vfsmount *sock_mnt __read_mostly;
67505
67506 static struct file_system_type sock_fs_type = {
67507 .name = "sockfs",
67508 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int f
67509 return -EAFNOSUPPORT;
67510 if (type < 0 || type >= SOCK_MAX)
67511 return -EINVAL;
67512 + if (protocol < 0)
67513 + return -EINVAL;
67514
67515 /* Compatibility.
67516
67517 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int
67518 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
67519 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
67520
67521 + if(!gr_search_socket(family, type, protocol)) {
67522 + retval = -EACCES;
67523 + goto out;
67524 + }
67525 +
67526 + if (gr_handle_sock_all(family, type, protocol)) {
67527 + retval = -EACCES;
67528 + goto out;
67529 + }
67530 +
67531 retval = sock_create(family, type, protocol, &sock);
67532 if (retval < 0)
67533 goto out;
67534 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
67535 if (sock) {
67536 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
67537 if (err >= 0) {
67538 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
67539 + err = -EACCES;
67540 + goto error;
67541 + }
67542 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
67543 + if (err)
67544 + goto error;
67545 +
67546 err = security_socket_bind(sock,
67547 (struct sockaddr *)&address,
67548 addrlen);
67549 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
67550 (struct sockaddr *)
67551 &address, addrlen);
67552 }
67553 +error:
67554 fput_light(sock->file, fput_needed);
67555 }
67556 return err;
67557 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
67558 if ((unsigned)backlog > somaxconn)
67559 backlog = somaxconn;
67560
67561 + if (gr_handle_sock_server_other(sock->sk)) {
67562 + err = -EPERM;
67563 + goto error;
67564 + }
67565 +
67566 + err = gr_search_listen(sock);
67567 + if (err)
67568 + goto error;
67569 +
67570 err = security_socket_listen(sock, backlog);
67571 if (!err)
67572 err = sock->ops->listen(sock, backlog);
67573
67574 +error:
67575 fput_light(sock->file, fput_needed);
67576 }
67577 return err;
67578 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
67579 newsock->type = sock->type;
67580 newsock->ops = sock->ops;
67581
67582 + if (gr_handle_sock_server_other(sock->sk)) {
67583 + err = -EPERM;
67584 + sock_release(newsock);
67585 + goto out_put;
67586 + }
67587 +
67588 + err = gr_search_accept(sock);
67589 + if (err) {
67590 + sock_release(newsock);
67591 + goto out_put;
67592 + }
67593 +
67594 /*
67595 * We don't need try_module_get here, as the listening socket (sock)
67596 * has the protocol module (sock->ops->owner) held.
67597 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
67598 fd_install(newfd, newfile);
67599 err = newfd;
67600
67601 + gr_attach_curr_ip(newsock->sk);
67602 +
67603 out_put:
67604 fput_light(sock->file, fput_needed);
67605 out:
67606 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
67607 int, addrlen)
67608 {
67609 struct socket *sock;
67610 + struct sockaddr *sck;
67611 struct sockaddr_storage address;
67612 int err, fput_needed;
67613
67614 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
67615 if (err < 0)
67616 goto out_put;
67617
67618 + sck = (struct sockaddr *)&address;
67619 +
67620 + if (gr_handle_sock_client(sck)) {
67621 + err = -EACCES;
67622 + goto out_put;
67623 + }
67624 +
67625 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
67626 + if (err)
67627 + goto out_put;
67628 +
67629 err =
67630 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
67631 if (err)
67632 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *
67633 unsigned char *ctl_buf = ctl;
67634 int err, ctl_len, iov_size, total_len;
67635
67636 + pax_track_stack();
67637 +
67638 err = -EFAULT;
67639 if (MSG_CMSG_COMPAT & flags) {
67640 if (get_compat_msghdr(msg_sys, msg_compat))
67641 diff -urNp linux-3.0.3/net/sunrpc/sched.c linux-3.0.3/net/sunrpc/sched.c
67642 --- linux-3.0.3/net/sunrpc/sched.c 2011-07-21 22:17:23.000000000 -0400
67643 +++ linux-3.0.3/net/sunrpc/sched.c 2011-08-23 21:47:56.000000000 -0400
67644 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
67645 #ifdef RPC_DEBUG
67646 static void rpc_task_set_debuginfo(struct rpc_task *task)
67647 {
67648 - static atomic_t rpc_pid;
67649 + static atomic_unchecked_t rpc_pid;
67650
67651 - task->tk_pid = atomic_inc_return(&rpc_pid);
67652 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
67653 }
67654 #else
67655 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
67656 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c
67657 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c 2011-07-21 22:17:23.000000000 -0400
67658 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-23 21:47:56.000000000 -0400
67659 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
67660 static unsigned int min_max_inline = 4096;
67661 static unsigned int max_max_inline = 65536;
67662
67663 -atomic_t rdma_stat_recv;
67664 -atomic_t rdma_stat_read;
67665 -atomic_t rdma_stat_write;
67666 -atomic_t rdma_stat_sq_starve;
67667 -atomic_t rdma_stat_rq_starve;
67668 -atomic_t rdma_stat_rq_poll;
67669 -atomic_t rdma_stat_rq_prod;
67670 -atomic_t rdma_stat_sq_poll;
67671 -atomic_t rdma_stat_sq_prod;
67672 +atomic_unchecked_t rdma_stat_recv;
67673 +atomic_unchecked_t rdma_stat_read;
67674 +atomic_unchecked_t rdma_stat_write;
67675 +atomic_unchecked_t rdma_stat_sq_starve;
67676 +atomic_unchecked_t rdma_stat_rq_starve;
67677 +atomic_unchecked_t rdma_stat_rq_poll;
67678 +atomic_unchecked_t rdma_stat_rq_prod;
67679 +atomic_unchecked_t rdma_stat_sq_poll;
67680 +atomic_unchecked_t rdma_stat_sq_prod;
67681
67682 /* Temporary NFS request map and context caches */
67683 struct kmem_cache *svc_rdma_map_cachep;
67684 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
67685 len -= *ppos;
67686 if (len > *lenp)
67687 len = *lenp;
67688 - if (len && copy_to_user(buffer, str_buf, len))
67689 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
67690 return -EFAULT;
67691 *lenp = len;
67692 *ppos += len;
67693 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
67694 {
67695 .procname = "rdma_stat_read",
67696 .data = &rdma_stat_read,
67697 - .maxlen = sizeof(atomic_t),
67698 + .maxlen = sizeof(atomic_unchecked_t),
67699 .mode = 0644,
67700 .proc_handler = read_reset_stat,
67701 },
67702 {
67703 .procname = "rdma_stat_recv",
67704 .data = &rdma_stat_recv,
67705 - .maxlen = sizeof(atomic_t),
67706 + .maxlen = sizeof(atomic_unchecked_t),
67707 .mode = 0644,
67708 .proc_handler = read_reset_stat,
67709 },
67710 {
67711 .procname = "rdma_stat_write",
67712 .data = &rdma_stat_write,
67713 - .maxlen = sizeof(atomic_t),
67714 + .maxlen = sizeof(atomic_unchecked_t),
67715 .mode = 0644,
67716 .proc_handler = read_reset_stat,
67717 },
67718 {
67719 .procname = "rdma_stat_sq_starve",
67720 .data = &rdma_stat_sq_starve,
67721 - .maxlen = sizeof(atomic_t),
67722 + .maxlen = sizeof(atomic_unchecked_t),
67723 .mode = 0644,
67724 .proc_handler = read_reset_stat,
67725 },
67726 {
67727 .procname = "rdma_stat_rq_starve",
67728 .data = &rdma_stat_rq_starve,
67729 - .maxlen = sizeof(atomic_t),
67730 + .maxlen = sizeof(atomic_unchecked_t),
67731 .mode = 0644,
67732 .proc_handler = read_reset_stat,
67733 },
67734 {
67735 .procname = "rdma_stat_rq_poll",
67736 .data = &rdma_stat_rq_poll,
67737 - .maxlen = sizeof(atomic_t),
67738 + .maxlen = sizeof(atomic_unchecked_t),
67739 .mode = 0644,
67740 .proc_handler = read_reset_stat,
67741 },
67742 {
67743 .procname = "rdma_stat_rq_prod",
67744 .data = &rdma_stat_rq_prod,
67745 - .maxlen = sizeof(atomic_t),
67746 + .maxlen = sizeof(atomic_unchecked_t),
67747 .mode = 0644,
67748 .proc_handler = read_reset_stat,
67749 },
67750 {
67751 .procname = "rdma_stat_sq_poll",
67752 .data = &rdma_stat_sq_poll,
67753 - .maxlen = sizeof(atomic_t),
67754 + .maxlen = sizeof(atomic_unchecked_t),
67755 .mode = 0644,
67756 .proc_handler = read_reset_stat,
67757 },
67758 {
67759 .procname = "rdma_stat_sq_prod",
67760 .data = &rdma_stat_sq_prod,
67761 - .maxlen = sizeof(atomic_t),
67762 + .maxlen = sizeof(atomic_unchecked_t),
67763 .mode = 0644,
67764 .proc_handler = read_reset_stat,
67765 },
67766 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
67767 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-07-21 22:17:23.000000000 -0400
67768 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-23 21:47:56.000000000 -0400
67769 @@ -499,7 +499,7 @@ next_sge:
67770 svc_rdma_put_context(ctxt, 0);
67771 goto out;
67772 }
67773 - atomic_inc(&rdma_stat_read);
67774 + atomic_inc_unchecked(&rdma_stat_read);
67775
67776 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
67777 chl_map->ch[ch_no].count -= read_wr.num_sge;
67778 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
67779 dto_q);
67780 list_del_init(&ctxt->dto_q);
67781 } else {
67782 - atomic_inc(&rdma_stat_rq_starve);
67783 + atomic_inc_unchecked(&rdma_stat_rq_starve);
67784 clear_bit(XPT_DATA, &xprt->xpt_flags);
67785 ctxt = NULL;
67786 }
67787 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
67788 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
67789 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
67790 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
67791 - atomic_inc(&rdma_stat_recv);
67792 + atomic_inc_unchecked(&rdma_stat_recv);
67793
67794 /* Build up the XDR from the receive buffers. */
67795 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
67796 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c
67797 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-07-21 22:17:23.000000000 -0400
67798 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-23 21:47:56.000000000 -0400
67799 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
67800 write_wr.wr.rdma.remote_addr = to;
67801
67802 /* Post It */
67803 - atomic_inc(&rdma_stat_write);
67804 + atomic_inc_unchecked(&rdma_stat_write);
67805 if (svc_rdma_send(xprt, &write_wr))
67806 goto err;
67807 return 0;
67808 diff -urNp linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c
67809 --- linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-07-21 22:17:23.000000000 -0400
67810 +++ linux-3.0.3/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-23 21:47:56.000000000 -0400
67811 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
67812 return;
67813
67814 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
67815 - atomic_inc(&rdma_stat_rq_poll);
67816 + atomic_inc_unchecked(&rdma_stat_rq_poll);
67817
67818 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
67819 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
67820 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
67821 }
67822
67823 if (ctxt)
67824 - atomic_inc(&rdma_stat_rq_prod);
67825 + atomic_inc_unchecked(&rdma_stat_rq_prod);
67826
67827 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
67828 /*
67829 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
67830 return;
67831
67832 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
67833 - atomic_inc(&rdma_stat_sq_poll);
67834 + atomic_inc_unchecked(&rdma_stat_sq_poll);
67835 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
67836 if (wc.status != IB_WC_SUCCESS)
67837 /* Close the transport */
67838 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
67839 }
67840
67841 if (ctxt)
67842 - atomic_inc(&rdma_stat_sq_prod);
67843 + atomic_inc_unchecked(&rdma_stat_sq_prod);
67844 }
67845
67846 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
67847 @@ -1272,7 +1272,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
67848 spin_lock_bh(&xprt->sc_lock);
67849 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
67850 spin_unlock_bh(&xprt->sc_lock);
67851 - atomic_inc(&rdma_stat_sq_starve);
67852 + atomic_inc_unchecked(&rdma_stat_sq_starve);
67853
67854 /* See if we can opportunistically reap SQ WR to make room */
67855 sq_cq_reap(xprt);
67856 diff -urNp linux-3.0.3/net/sysctl_net.c linux-3.0.3/net/sysctl_net.c
67857 --- linux-3.0.3/net/sysctl_net.c 2011-07-21 22:17:23.000000000 -0400
67858 +++ linux-3.0.3/net/sysctl_net.c 2011-08-23 21:48:14.000000000 -0400
67859 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
67860 struct ctl_table *table)
67861 {
67862 /* Allow network administrator to have same access as root. */
67863 - if (capable(CAP_NET_ADMIN)) {
67864 + if (capable_nolog(CAP_NET_ADMIN)) {
67865 int mode = (table->mode >> 6) & 7;
67866 return (mode << 6) | (mode << 3) | mode;
67867 }
67868 diff -urNp linux-3.0.3/net/unix/af_unix.c linux-3.0.3/net/unix/af_unix.c
67869 --- linux-3.0.3/net/unix/af_unix.c 2011-07-21 22:17:23.000000000 -0400
67870 +++ linux-3.0.3/net/unix/af_unix.c 2011-08-23 21:48:14.000000000 -0400
67871 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
67872 err = -ECONNREFUSED;
67873 if (!S_ISSOCK(inode->i_mode))
67874 goto put_fail;
67875 +
67876 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
67877 + err = -EACCES;
67878 + goto put_fail;
67879 + }
67880 +
67881 u = unix_find_socket_byinode(inode);
67882 if (!u)
67883 goto put_fail;
67884 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
67885 if (u) {
67886 struct dentry *dentry;
67887 dentry = unix_sk(u)->dentry;
67888 +
67889 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
67890 + err = -EPERM;
67891 + sock_put(u);
67892 + goto fail;
67893 + }
67894 +
67895 if (dentry)
67896 touch_atime(unix_sk(u)->mnt, dentry);
67897 } else
67898 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
67899 err = security_path_mknod(&nd.path, dentry, mode, 0);
67900 if (err)
67901 goto out_mknod_drop_write;
67902 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
67903 + err = -EACCES;
67904 + goto out_mknod_drop_write;
67905 + }
67906 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
67907 out_mknod_drop_write:
67908 mnt_drop_write(nd.path.mnt);
67909 if (err)
67910 goto out_mknod_dput;
67911 +
67912 + gr_handle_create(dentry, nd.path.mnt);
67913 +
67914 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
67915 dput(nd.path.dentry);
67916 nd.path.dentry = dentry;
67917 diff -urNp linux-3.0.3/net/wireless/core.h linux-3.0.3/net/wireless/core.h
67918 --- linux-3.0.3/net/wireless/core.h 2011-07-21 22:17:23.000000000 -0400
67919 +++ linux-3.0.3/net/wireless/core.h 2011-08-23 21:47:56.000000000 -0400
67920 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
67921 struct mutex mtx;
67922
67923 /* rfkill support */
67924 - struct rfkill_ops rfkill_ops;
67925 + rfkill_ops_no_const rfkill_ops;
67926 struct rfkill *rfkill;
67927 struct work_struct rfkill_sync;
67928
67929 diff -urNp linux-3.0.3/net/wireless/wext-core.c linux-3.0.3/net/wireless/wext-core.c
67930 --- linux-3.0.3/net/wireless/wext-core.c 2011-07-21 22:17:23.000000000 -0400
67931 +++ linux-3.0.3/net/wireless/wext-core.c 2011-08-23 21:47:56.000000000 -0400
67932 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
67933 */
67934
67935 /* Support for very large requests */
67936 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
67937 - (user_length > descr->max_tokens)) {
67938 + if (user_length > descr->max_tokens) {
67939 /* Allow userspace to GET more than max so
67940 * we can support any size GET requests.
67941 * There is still a limit : -ENOMEM.
67942 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
67943 }
67944 }
67945
67946 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
67947 - /*
67948 - * If this is a GET, but not NOMAX, it means that the extra
67949 - * data is not bounded by userspace, but by max_tokens. Thus
67950 - * set the length to max_tokens. This matches the extra data
67951 - * allocation.
67952 - * The driver should fill it with the number of tokens it
67953 - * provided, and it may check iwp->length rather than having
67954 - * knowledge of max_tokens. If the driver doesn't change the
67955 - * iwp->length, this ioctl just copies back max_token tokens
67956 - * filled with zeroes. Hopefully the driver isn't claiming
67957 - * them to be valid data.
67958 - */
67959 - iwp->length = descr->max_tokens;
67960 - }
67961 -
67962 err = handler(dev, info, (union iwreq_data *) iwp, extra);
67963
67964 iwp->length += essid_compat;
67965 diff -urNp linux-3.0.3/net/xfrm/xfrm_policy.c linux-3.0.3/net/xfrm/xfrm_policy.c
67966 --- linux-3.0.3/net/xfrm/xfrm_policy.c 2011-07-21 22:17:23.000000000 -0400
67967 +++ linux-3.0.3/net/xfrm/xfrm_policy.c 2011-08-23 21:47:56.000000000 -0400
67968 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
67969 {
67970 policy->walk.dead = 1;
67971
67972 - atomic_inc(&policy->genid);
67973 + atomic_inc_unchecked(&policy->genid);
67974
67975 if (del_timer(&policy->timer))
67976 xfrm_pol_put(policy);
67977 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
67978 hlist_add_head(&policy->bydst, chain);
67979 xfrm_pol_hold(policy);
67980 net->xfrm.policy_count[dir]++;
67981 - atomic_inc(&flow_cache_genid);
67982 + atomic_inc_unchecked(&flow_cache_genid);
67983 if (delpol)
67984 __xfrm_policy_unlink(delpol, dir);
67985 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
67986 @@ -1528,7 +1528,7 @@ free_dst:
67987 goto out;
67988 }
67989
67990 -static int inline
67991 +static inline int
67992 xfrm_dst_alloc_copy(void **target, const void *src, int size)
67993 {
67994 if (!*target) {
67995 @@ -1540,7 +1540,7 @@ xfrm_dst_alloc_copy(void **target, const
67996 return 0;
67997 }
67998
67999 -static int inline
68000 +static inline int
68001 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
68002 {
68003 #ifdef CONFIG_XFRM_SUB_POLICY
68004 @@ -1552,7 +1552,7 @@ xfrm_dst_update_parent(struct dst_entry
68005 #endif
68006 }
68007
68008 -static int inline
68009 +static inline int
68010 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
68011 {
68012 #ifdef CONFIG_XFRM_SUB_POLICY
68013 @@ -1646,7 +1646,7 @@ xfrm_resolve_and_create_bundle(struct xf
68014
68015 xdst->num_pols = num_pols;
68016 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
68017 - xdst->policy_genid = atomic_read(&pols[0]->genid);
68018 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
68019
68020 return xdst;
68021 }
68022 @@ -2333,7 +2333,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
68023 if (xdst->xfrm_genid != dst->xfrm->genid)
68024 return 0;
68025 if (xdst->num_pols > 0 &&
68026 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
68027 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
68028 return 0;
68029
68030 mtu = dst_mtu(dst->child);
68031 @@ -2861,7 +2861,7 @@ static int xfrm_policy_migrate(struct xf
68032 sizeof(pol->xfrm_vec[i].saddr));
68033 pol->xfrm_vec[i].encap_family = mp->new_family;
68034 /* flush bundles */
68035 - atomic_inc(&pol->genid);
68036 + atomic_inc_unchecked(&pol->genid);
68037 }
68038 }
68039
68040 diff -urNp linux-3.0.3/net/xfrm/xfrm_user.c linux-3.0.3/net/xfrm/xfrm_user.c
68041 --- linux-3.0.3/net/xfrm/xfrm_user.c 2011-07-21 22:17:23.000000000 -0400
68042 +++ linux-3.0.3/net/xfrm/xfrm_user.c 2011-08-23 21:48:14.000000000 -0400
68043 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
68044 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
68045 int i;
68046
68047 + pax_track_stack();
68048 +
68049 if (xp->xfrm_nr == 0)
68050 return 0;
68051
68052 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
68053 int err;
68054 int n = 0;
68055
68056 + pax_track_stack();
68057 +
68058 if (attrs[XFRMA_MIGRATE] == NULL)
68059 return -EINVAL;
68060
68061 diff -urNp linux-3.0.3/scripts/basic/fixdep.c linux-3.0.3/scripts/basic/fixdep.c
68062 --- linux-3.0.3/scripts/basic/fixdep.c 2011-07-21 22:17:23.000000000 -0400
68063 +++ linux-3.0.3/scripts/basic/fixdep.c 2011-08-23 21:47:56.000000000 -0400
68064 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
68065
68066 static void parse_config_file(const char *map, size_t len)
68067 {
68068 - const int *end = (const int *) (map + len);
68069 + const unsigned int *end = (const unsigned int *) (map + len);
68070 /* start at +1, so that p can never be < map */
68071 - const int *m = (const int *) map + 1;
68072 + const unsigned int *m = (const unsigned int *) map + 1;
68073 const char *p, *q;
68074
68075 for (; m < end; m++) {
68076 @@ -405,7 +405,7 @@ static void print_deps(void)
68077 static void traps(void)
68078 {
68079 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
68080 - int *p = (int *)test;
68081 + unsigned int *p = (unsigned int *)test;
68082
68083 if (*p != INT_CONF) {
68084 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
68085 diff -urNp linux-3.0.3/scripts/gcc-plugin.sh linux-3.0.3/scripts/gcc-plugin.sh
68086 --- linux-3.0.3/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
68087 +++ linux-3.0.3/scripts/gcc-plugin.sh 2011-08-23 21:47:56.000000000 -0400
68088 @@ -0,0 +1,2 @@
68089 +#!/bin/sh
68090 +echo "#include \"gcc-plugin.h\"" | $* -x c -shared - -o /dev/null -I`$* -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
68091 diff -urNp linux-3.0.3/scripts/Makefile.build linux-3.0.3/scripts/Makefile.build
68092 --- linux-3.0.3/scripts/Makefile.build 2011-07-21 22:17:23.000000000 -0400
68093 +++ linux-3.0.3/scripts/Makefile.build 2011-08-23 21:47:56.000000000 -0400
68094 @@ -109,7 +109,7 @@ endif
68095 endif
68096
68097 # Do not include host rules unless needed
68098 -ifneq ($(hostprogs-y)$(hostprogs-m),)
68099 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
68100 include scripts/Makefile.host
68101 endif
68102
68103 diff -urNp linux-3.0.3/scripts/Makefile.clean linux-3.0.3/scripts/Makefile.clean
68104 --- linux-3.0.3/scripts/Makefile.clean 2011-07-21 22:17:23.000000000 -0400
68105 +++ linux-3.0.3/scripts/Makefile.clean 2011-08-23 21:47:56.000000000 -0400
68106 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
68107 __clean-files := $(extra-y) $(always) \
68108 $(targets) $(clean-files) \
68109 $(host-progs) \
68110 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
68111 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
68112 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
68113
68114 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
68115
68116 diff -urNp linux-3.0.3/scripts/Makefile.host linux-3.0.3/scripts/Makefile.host
68117 --- linux-3.0.3/scripts/Makefile.host 2011-07-21 22:17:23.000000000 -0400
68118 +++ linux-3.0.3/scripts/Makefile.host 2011-08-23 21:47:56.000000000 -0400
68119 @@ -31,6 +31,7 @@
68120 # Note: Shared libraries consisting of C++ files are not supported
68121
68122 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
68123 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
68124
68125 # C code
68126 # Executables compiled from a single .c file
68127 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
68128 # Shared libaries (only .c supported)
68129 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
68130 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
68131 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
68132 # Remove .so files from "xxx-objs"
68133 host-cobjs := $(filter-out %.so,$(host-cobjs))
68134
68135 diff -urNp linux-3.0.3/scripts/mod/file2alias.c linux-3.0.3/scripts/mod/file2alias.c
68136 --- linux-3.0.3/scripts/mod/file2alias.c 2011-07-21 22:17:23.000000000 -0400
68137 +++ linux-3.0.3/scripts/mod/file2alias.c 2011-08-23 21:47:56.000000000 -0400
68138 @@ -72,7 +72,7 @@ static void device_id_check(const char *
68139 unsigned long size, unsigned long id_size,
68140 void *symval)
68141 {
68142 - int i;
68143 + unsigned int i;
68144
68145 if (size % id_size || size < id_size) {
68146 if (cross_build != 0)
68147 @@ -102,7 +102,7 @@ static void device_id_check(const char *
68148 /* USB is special because the bcdDevice can be matched against a numeric range */
68149 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
68150 static void do_usb_entry(struct usb_device_id *id,
68151 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
68152 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
68153 unsigned char range_lo, unsigned char range_hi,
68154 unsigned char max, struct module *mod)
68155 {
68156 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
68157 for (i = 0; i < count; i++) {
68158 const char *id = (char *)devs[i].id;
68159 char acpi_id[sizeof(devs[0].id)];
68160 - int j;
68161 + unsigned int j;
68162
68163 buf_printf(&mod->dev_table_buf,
68164 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68165 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
68166
68167 for (j = 0; j < PNP_MAX_DEVICES; j++) {
68168 const char *id = (char *)card->devs[j].id;
68169 - int i2, j2;
68170 + unsigned int i2, j2;
68171 int dup = 0;
68172
68173 if (!id[0])
68174 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
68175 /* add an individual alias for every device entry */
68176 if (!dup) {
68177 char acpi_id[sizeof(card->devs[0].id)];
68178 - int k;
68179 + unsigned int k;
68180
68181 buf_printf(&mod->dev_table_buf,
68182 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68183 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, co
68184 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
68185 char *alias)
68186 {
68187 - int i, j;
68188 + unsigned int i, j;
68189
68190 sprintf(alias, "dmi*");
68191
68192 diff -urNp linux-3.0.3/scripts/mod/modpost.c linux-3.0.3/scripts/mod/modpost.c
68193 --- linux-3.0.3/scripts/mod/modpost.c 2011-07-21 22:17:23.000000000 -0400
68194 +++ linux-3.0.3/scripts/mod/modpost.c 2011-08-23 21:47:56.000000000 -0400
68195 @@ -892,6 +892,7 @@ enum mismatch {
68196 ANY_INIT_TO_ANY_EXIT,
68197 ANY_EXIT_TO_ANY_INIT,
68198 EXPORT_TO_INIT_EXIT,
68199 + DATA_TO_TEXT
68200 };
68201
68202 struct sectioncheck {
68203 @@ -1000,6 +1001,12 @@ const struct sectioncheck sectioncheck[]
68204 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
68205 .mismatch = EXPORT_TO_INIT_EXIT,
68206 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
68207 +},
68208 +/* Do not reference code from writable data */
68209 +{
68210 + .fromsec = { DATA_SECTIONS, NULL },
68211 + .tosec = { TEXT_SECTIONS, NULL },
68212 + .mismatch = DATA_TO_TEXT
68213 }
68214 };
68215
68216 @@ -1122,10 +1129,10 @@ static Elf_Sym *find_elf_symbol(struct e
68217 continue;
68218 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
68219 continue;
68220 - if (sym->st_value == addr)
68221 - return sym;
68222 /* Find a symbol nearby - addr are maybe negative */
68223 d = sym->st_value - addr;
68224 + if (d == 0)
68225 + return sym;
68226 if (d < 0)
68227 d = addr - sym->st_value;
68228 if (d < distance) {
68229 @@ -1404,6 +1411,14 @@ static void report_sec_mismatch(const ch
68230 tosym, prl_to, prl_to, tosym);
68231 free(prl_to);
68232 break;
68233 + case DATA_TO_TEXT:
68234 +/*
68235 + fprintf(stderr,
68236 + "The variable %s references\n"
68237 + "the %s %s%s%s\n",
68238 + fromsym, to, sec2annotation(tosec), tosym, to_p);
68239 +*/
68240 + break;
68241 }
68242 fprintf(stderr, "\n");
68243 }
68244 @@ -1629,7 +1644,7 @@ static void section_rel(const char *modn
68245 static void check_sec_ref(struct module *mod, const char *modname,
68246 struct elf_info *elf)
68247 {
68248 - int i;
68249 + unsigned int i;
68250 Elf_Shdr *sechdrs = elf->sechdrs;
68251
68252 /* Walk through all sections */
68253 @@ -1727,7 +1742,7 @@ void __attribute__((format(printf, 2, 3)
68254 va_end(ap);
68255 }
68256
68257 -void buf_write(struct buffer *buf, const char *s, int len)
68258 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
68259 {
68260 if (buf->size - buf->pos < len) {
68261 buf->size += len + SZ;
68262 @@ -1939,7 +1954,7 @@ static void write_if_changed(struct buff
68263 if (fstat(fileno(file), &st) < 0)
68264 goto close_write;
68265
68266 - if (st.st_size != b->pos)
68267 + if (st.st_size != (off_t)b->pos)
68268 goto close_write;
68269
68270 tmp = NOFAIL(malloc(b->pos));
68271 diff -urNp linux-3.0.3/scripts/mod/modpost.h linux-3.0.3/scripts/mod/modpost.h
68272 --- linux-3.0.3/scripts/mod/modpost.h 2011-07-21 22:17:23.000000000 -0400
68273 +++ linux-3.0.3/scripts/mod/modpost.h 2011-08-23 21:47:56.000000000 -0400
68274 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
68275
68276 struct buffer {
68277 char *p;
68278 - int pos;
68279 - int size;
68280 + unsigned int pos;
68281 + unsigned int size;
68282 };
68283
68284 void __attribute__((format(printf, 2, 3)))
68285 buf_printf(struct buffer *buf, const char *fmt, ...);
68286
68287 void
68288 -buf_write(struct buffer *buf, const char *s, int len);
68289 +buf_write(struct buffer *buf, const char *s, unsigned int len);
68290
68291 struct module {
68292 struct module *next;
68293 diff -urNp linux-3.0.3/scripts/mod/sumversion.c linux-3.0.3/scripts/mod/sumversion.c
68294 --- linux-3.0.3/scripts/mod/sumversion.c 2011-07-21 22:17:23.000000000 -0400
68295 +++ linux-3.0.3/scripts/mod/sumversion.c 2011-08-23 21:47:56.000000000 -0400
68296 @@ -470,7 +470,7 @@ static void write_version(const char *fi
68297 goto out;
68298 }
68299
68300 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
68301 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
68302 warn("writing sum in %s failed: %s\n",
68303 filename, strerror(errno));
68304 goto out;
68305 diff -urNp linux-3.0.3/scripts/pnmtologo.c linux-3.0.3/scripts/pnmtologo.c
68306 --- linux-3.0.3/scripts/pnmtologo.c 2011-07-21 22:17:23.000000000 -0400
68307 +++ linux-3.0.3/scripts/pnmtologo.c 2011-08-23 21:47:56.000000000 -0400
68308 @@ -237,14 +237,14 @@ static void write_header(void)
68309 fprintf(out, " * Linux logo %s\n", logoname);
68310 fputs(" */\n\n", out);
68311 fputs("#include <linux/linux_logo.h>\n\n", out);
68312 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
68313 + fprintf(out, "static unsigned char %s_data[] = {\n",
68314 logoname);
68315 }
68316
68317 static void write_footer(void)
68318 {
68319 fputs("\n};\n\n", out);
68320 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
68321 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
68322 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
68323 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
68324 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
68325 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
68326 fputs("\n};\n\n", out);
68327
68328 /* write logo clut */
68329 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
68330 + fprintf(out, "static unsigned char %s_clut[] = {\n",
68331 logoname);
68332 write_hex_cnt = 0;
68333 for (i = 0; i < logo_clutsize; i++) {
68334 diff -urNp linux-3.0.3/security/apparmor/lsm.c linux-3.0.3/security/apparmor/lsm.c
68335 --- linux-3.0.3/security/apparmor/lsm.c 2011-08-23 21:44:40.000000000 -0400
68336 +++ linux-3.0.3/security/apparmor/lsm.c 2011-08-23 21:48:14.000000000 -0400
68337 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
68338 return error;
68339 }
68340
68341 -static struct security_operations apparmor_ops = {
68342 +static struct security_operations apparmor_ops __read_only = {
68343 .name = "apparmor",
68344
68345 .ptrace_access_check = apparmor_ptrace_access_check,
68346 diff -urNp linux-3.0.3/security/commoncap.c linux-3.0.3/security/commoncap.c
68347 --- linux-3.0.3/security/commoncap.c 2011-07-21 22:17:23.000000000 -0400
68348 +++ linux-3.0.3/security/commoncap.c 2011-08-23 21:48:14.000000000 -0400
68349 @@ -28,6 +28,7 @@
68350 #include <linux/prctl.h>
68351 #include <linux/securebits.h>
68352 #include <linux/user_namespace.h>
68353 +#include <net/sock.h>
68354
68355 /*
68356 * If a non-root user executes a setuid-root binary in
68357 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
68358
68359 int cap_netlink_recv(struct sk_buff *skb, int cap)
68360 {
68361 - if (!cap_raised(current_cap(), cap))
68362 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
68363 return -EPERM;
68364 return 0;
68365 }
68366 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_bin
68367 {
68368 const struct cred *cred = current_cred();
68369
68370 + if (gr_acl_enable_at_secure())
68371 + return 1;
68372 +
68373 if (cred->uid != 0) {
68374 if (bprm->cap_effective)
68375 return 1;
68376 diff -urNp linux-3.0.3/security/integrity/ima/ima_api.c linux-3.0.3/security/integrity/ima/ima_api.c
68377 --- linux-3.0.3/security/integrity/ima/ima_api.c 2011-07-21 22:17:23.000000000 -0400
68378 +++ linux-3.0.3/security/integrity/ima/ima_api.c 2011-08-23 21:47:56.000000000 -0400
68379 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
68380 int result;
68381
68382 /* can overflow, only indicator */
68383 - atomic_long_inc(&ima_htable.violations);
68384 + atomic_long_inc_unchecked(&ima_htable.violations);
68385
68386 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
68387 if (!entry) {
68388 diff -urNp linux-3.0.3/security/integrity/ima/ima_fs.c linux-3.0.3/security/integrity/ima/ima_fs.c
68389 --- linux-3.0.3/security/integrity/ima/ima_fs.c 2011-07-21 22:17:23.000000000 -0400
68390 +++ linux-3.0.3/security/integrity/ima/ima_fs.c 2011-08-23 21:47:56.000000000 -0400
68391 @@ -28,12 +28,12 @@
68392 static int valid_policy = 1;
68393 #define TMPBUFLEN 12
68394 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
68395 - loff_t *ppos, atomic_long_t *val)
68396 + loff_t *ppos, atomic_long_unchecked_t *val)
68397 {
68398 char tmpbuf[TMPBUFLEN];
68399 ssize_t len;
68400
68401 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
68402 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
68403 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
68404 }
68405
68406 diff -urNp linux-3.0.3/security/integrity/ima/ima.h linux-3.0.3/security/integrity/ima/ima.h
68407 --- linux-3.0.3/security/integrity/ima/ima.h 2011-07-21 22:17:23.000000000 -0400
68408 +++ linux-3.0.3/security/integrity/ima/ima.h 2011-08-23 21:47:56.000000000 -0400
68409 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
68410 extern spinlock_t ima_queue_lock;
68411
68412 struct ima_h_table {
68413 - atomic_long_t len; /* number of stored measurements in the list */
68414 - atomic_long_t violations;
68415 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
68416 + atomic_long_unchecked_t violations;
68417 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
68418 };
68419 extern struct ima_h_table ima_htable;
68420 diff -urNp linux-3.0.3/security/integrity/ima/ima_queue.c linux-3.0.3/security/integrity/ima/ima_queue.c
68421 --- linux-3.0.3/security/integrity/ima/ima_queue.c 2011-07-21 22:17:23.000000000 -0400
68422 +++ linux-3.0.3/security/integrity/ima/ima_queue.c 2011-08-23 21:47:56.000000000 -0400
68423 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
68424 INIT_LIST_HEAD(&qe->later);
68425 list_add_tail_rcu(&qe->later, &ima_measurements);
68426
68427 - atomic_long_inc(&ima_htable.len);
68428 + atomic_long_inc_unchecked(&ima_htable.len);
68429 key = ima_hash_key(entry->digest);
68430 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
68431 return 0;
68432 diff -urNp linux-3.0.3/security/Kconfig linux-3.0.3/security/Kconfig
68433 --- linux-3.0.3/security/Kconfig 2011-07-21 22:17:23.000000000 -0400
68434 +++ linux-3.0.3/security/Kconfig 2011-08-23 21:48:14.000000000 -0400
68435 @@ -4,6 +4,554 @@
68436
68437 menu "Security options"
68438
68439 +source grsecurity/Kconfig
68440 +
68441 +menu "PaX"
68442 +
68443 + config ARCH_TRACK_EXEC_LIMIT
68444 + bool
68445 +
68446 + config PAX_PER_CPU_PGD
68447 + bool
68448 +
68449 + config TASK_SIZE_MAX_SHIFT
68450 + int
68451 + depends on X86_64
68452 + default 47 if !PAX_PER_CPU_PGD
68453 + default 42 if PAX_PER_CPU_PGD
68454 +
68455 + config PAX_ENABLE_PAE
68456 + bool
68457 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
68458 +
68459 +config PAX
68460 + bool "Enable various PaX features"
68461 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
68462 + help
68463 + This allows you to enable various PaX features. PaX adds
68464 + intrusion prevention mechanisms to the kernel that reduce
68465 + the risks posed by exploitable memory corruption bugs.
68466 +
68467 +menu "PaX Control"
68468 + depends on PAX
68469 +
68470 +config PAX_SOFTMODE
68471 + bool 'Support soft mode'
68472 + select PAX_PT_PAX_FLAGS
68473 + help
68474 + Enabling this option will allow you to run PaX in soft mode, that
68475 + is, PaX features will not be enforced by default, only on executables
68476 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
68477 + is the only way to mark executables for soft mode use.
68478 +
68479 + Soft mode can be activated by using the "pax_softmode=1" kernel command
68480 + line option on boot. Furthermore you can control various PaX features
68481 + at runtime via the entries in /proc/sys/kernel/pax.
68482 +
68483 +config PAX_EI_PAX
68484 + bool 'Use legacy ELF header marking'
68485 + help
68486 + Enabling this option will allow you to control PaX features on
68487 + a per executable basis via the 'chpax' utility available at
68488 + http://pax.grsecurity.net/. The control flags will be read from
68489 + an otherwise reserved part of the ELF header. This marking has
68490 + numerous drawbacks (no support for soft-mode, toolchain does not
68491 + know about the non-standard use of the ELF header) therefore it
68492 + has been deprecated in favour of PT_PAX_FLAGS support.
68493 +
68494 + Note that if you enable PT_PAX_FLAGS marking support as well,
68495 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
68496 +
68497 +config PAX_PT_PAX_FLAGS
68498 + bool 'Use ELF program header marking'
68499 + help
68500 + Enabling this option will allow you to control PaX features on
68501 + a per executable basis via the 'paxctl' utility available at
68502 + http://pax.grsecurity.net/. The control flags will be read from
68503 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
68504 + has the benefits of supporting both soft mode and being fully
68505 + integrated into the toolchain (the binutils patch is available
68506 + from http://pax.grsecurity.net).
68507 +
68508 + If your toolchain does not support PT_PAX_FLAGS markings,
68509 + you can create one in most cases with 'paxctl -C'.
68510 +
68511 + Note that if you enable the legacy EI_PAX marking support as well,
68512 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
68513 +
68514 +choice
68515 + prompt 'MAC system integration'
68516 + default PAX_HAVE_ACL_FLAGS
68517 + help
68518 + Mandatory Access Control systems have the option of controlling
68519 + PaX flags on a per executable basis, choose the method supported
68520 + by your particular system.
68521 +
68522 + - "none": if your MAC system does not interact with PaX,
68523 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
68524 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
68525 +
68526 + NOTE: this option is for developers/integrators only.
68527 +
68528 + config PAX_NO_ACL_FLAGS
68529 + bool 'none'
68530 +
68531 + config PAX_HAVE_ACL_FLAGS
68532 + bool 'direct'
68533 +
68534 + config PAX_HOOK_ACL_FLAGS
68535 + bool 'hook'
68536 +endchoice
68537 +
68538 +endmenu
68539 +
68540 +menu "Non-executable pages"
68541 + depends on PAX
68542 +
68543 +config PAX_NOEXEC
68544 + bool "Enforce non-executable pages"
68545 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
68546 + help
68547 + By design some architectures do not allow for protecting memory
68548 + pages against execution or even if they do, Linux does not make
68549 + use of this feature. In practice this means that if a page is
68550 + readable (such as the stack or heap) it is also executable.
68551 +
68552 + There is a well known exploit technique that makes use of this
68553 + fact and a common programming mistake where an attacker can
68554 + introduce code of his choice somewhere in the attacked program's
68555 + memory (typically the stack or the heap) and then execute it.
68556 +
68557 + If the attacked program was running with different (typically
68558 + higher) privileges than that of the attacker, then he can elevate
68559 + his own privilege level (e.g. get a root shell, write to files for
68560 + which he does not have write access to, etc).
68561 +
68562 + Enabling this option will let you choose from various features
68563 + that prevent the injection and execution of 'foreign' code in
68564 + a program.
68565 +
68566 + This will also break programs that rely on the old behaviour and
68567 + expect that dynamically allocated memory via the malloc() family
68568 + of functions is executable (which it is not). Notable examples
68569 + are the XFree86 4.x server, the java runtime and wine.
68570 +
68571 +config PAX_PAGEEXEC
68572 + bool "Paging based non-executable pages"
68573 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
68574 + select S390_SWITCH_AMODE if S390
68575 + select S390_EXEC_PROTECT if S390
68576 + select ARCH_TRACK_EXEC_LIMIT if X86_32
68577 + help
68578 + This implementation is based on the paging feature of the CPU.
68579 + On i386 without hardware non-executable bit support there is a
68580 + variable but usually low performance impact, however on Intel's
68581 + P4 core based CPUs it is very high so you should not enable this
68582 + for kernels meant to be used on such CPUs.
68583 +
68584 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
68585 + with hardware non-executable bit support there is no performance
68586 + impact, on ppc the impact is negligible.
68587 +
68588 + Note that several architectures require various emulations due to
68589 + badly designed userland ABIs, this will cause a performance impact
68590 + but will disappear as soon as userland is fixed. For example, ppc
68591 + userland MUST have been built with secure-plt by a recent toolchain.
68592 +
68593 +config PAX_SEGMEXEC
68594 + bool "Segmentation based non-executable pages"
68595 + depends on PAX_NOEXEC && X86_32
68596 + help
68597 + This implementation is based on the segmentation feature of the
68598 + CPU and has a very small performance impact, however applications
68599 + will be limited to a 1.5 GB address space instead of the normal
68600 + 3 GB.
68601 +
68602 +config PAX_EMUTRAMP
68603 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
68604 + default y if PARISC
68605 + help
68606 + There are some programs and libraries that for one reason or
68607 + another attempt to execute special small code snippets from
68608 + non-executable memory pages. Most notable examples are the
68609 + signal handler return code generated by the kernel itself and
68610 + the GCC trampolines.
68611 +
68612 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
68613 + such programs will no longer work under your kernel.
68614 +
68615 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
68616 + utilities to enable trampoline emulation for the affected programs
68617 + yet still have the protection provided by the non-executable pages.
68618 +
68619 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
68620 + your system will not even boot.
68621 +
68622 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
68623 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
68624 + for the affected files.
68625 +
68626 + NOTE: enabling this feature *may* open up a loophole in the
68627 + protection provided by non-executable pages that an attacker
68628 + could abuse. Therefore the best solution is to not have any
68629 + files on your system that would require this option. This can
68630 + be achieved by not using libc5 (which relies on the kernel
68631 + signal handler return code) and not using or rewriting programs
68632 + that make use of the nested function implementation of GCC.
68633 + Skilled users can just fix GCC itself so that it implements
68634 + nested function calls in a way that does not interfere with PaX.
68635 +
68636 +config PAX_EMUSIGRT
68637 + bool "Automatically emulate sigreturn trampolines"
68638 + depends on PAX_EMUTRAMP && PARISC
68639 + default y
68640 + help
68641 + Enabling this option will have the kernel automatically detect
68642 + and emulate signal return trampolines executing on the stack
68643 + that would otherwise lead to task termination.
68644 +
68645 + This solution is intended as a temporary one for users with
68646 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
68647 + Modula-3 runtime, etc) or executables linked to such, basically
68648 + everything that does not specify its own SA_RESTORER function in
68649 + normal executable memory like glibc 2.1+ does.
68650 +
68651 + On parisc you MUST enable this option, otherwise your system will
68652 + not even boot.
68653 +
68654 + NOTE: this feature cannot be disabled on a per executable basis
68655 + and since it *does* open up a loophole in the protection provided
68656 + by non-executable pages, the best solution is to not have any
68657 + files on your system that would require this option.
68658 +
68659 +config PAX_MPROTECT
68660 + bool "Restrict mprotect()"
68661 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
68662 + help
68663 + Enabling this option will prevent programs from
68664 + - changing the executable status of memory pages that were
68665 + not originally created as executable,
68666 + - making read-only executable pages writable again,
68667 + - creating executable pages from anonymous memory,
68668 + - making read-only-after-relocations (RELRO) data pages writable again.
68669 +
68670 + You should say Y here to complete the protection provided by
68671 + the enforcement of non-executable pages.
68672 +
68673 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
68674 + this feature on a per file basis.
68675 +
68676 +config PAX_MPROTECT_COMPAT
68677 + bool "Use legacy/compat protection demoting (read help)"
68678 + depends on PAX_MPROTECT
68679 + default n
68680 + help
68681 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
68682 + by sending the proper error code to the application. For some broken
68683 + userland, this can cause problems with Python or other applications. The
68684 + current implementation however allows for applications like clamav to
68685 + detect if JIT compilation/execution is allowed and to fall back gracefully
68686 + to an interpreter-based mode if it does not. While we encourage everyone
68687 + to use the current implementation as-is and push upstream to fix broken
68688 + userland (note that the RWX logging option can assist with this), in some
68689 + environments this may not be possible. Having to disable MPROTECT
68690 + completely on certain binaries reduces the security benefit of PaX,
68691 + so this option is provided for those environments to revert to the old
68692 + behavior.
68693 +
68694 +config PAX_ELFRELOCS
68695 + bool "Allow ELF text relocations (read help)"
68696 + depends on PAX_MPROTECT
68697 + default n
68698 + help
68699 + Non-executable pages and mprotect() restrictions are effective
68700 + in preventing the introduction of new executable code into an
68701 + attacked task's address space. There remain only two venues
68702 + for this kind of attack: if the attacker can execute already
68703 + existing code in the attacked task then he can either have it
68704 + create and mmap() a file containing his code or have it mmap()
68705 + an already existing ELF library that does not have position
68706 + independent code in it and use mprotect() on it to make it
68707 + writable and copy his code there. While protecting against
68708 + the former approach is beyond PaX, the latter can be prevented
68709 + by having only PIC ELF libraries on one's system (which do not
68710 + need to relocate their code). If you are sure this is your case,
68711 + as is the case with all modern Linux distributions, then leave
68712 + this option disabled. You should say 'n' here.
68713 +
68714 +config PAX_ETEXECRELOCS
68715 + bool "Allow ELF ET_EXEC text relocations"
68716 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
68717 + select PAX_ELFRELOCS
68718 + default y
68719 + help
68720 + On some architectures there are incorrectly created applications
68721 + that require text relocations and would not work without enabling
68722 + this option. If you are an alpha, ia64 or parisc user, you should
68723 + enable this option and disable it once you have made sure that
68724 + none of your applications need it.
68725 +
68726 +config PAX_EMUPLT
68727 + bool "Automatically emulate ELF PLT"
68728 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
68729 + default y
68730 + help
68731 + Enabling this option will have the kernel automatically detect
68732 + and emulate the Procedure Linkage Table entries in ELF files.
68733 + On some architectures such entries are in writable memory, and
68734 + become non-executable leading to task termination. Therefore
68735 + it is mandatory that you enable this option on alpha, parisc,
68736 + sparc and sparc64, otherwise your system would not even boot.
68737 +
68738 + NOTE: this feature *does* open up a loophole in the protection
68739 + provided by the non-executable pages, therefore the proper
68740 + solution is to modify the toolchain to produce a PLT that does
68741 + not need to be writable.
68742 +
68743 +config PAX_DLRESOLVE
68744 + bool 'Emulate old glibc resolver stub'
68745 + depends on PAX_EMUPLT && SPARC
68746 + default n
68747 + help
68748 + This option is needed if userland has an old glibc (before 2.4)
68749 + that puts a 'save' instruction into the runtime generated resolver
68750 + stub that needs special emulation.
68751 +
68752 +config PAX_KERNEXEC
68753 + bool "Enforce non-executable kernel pages"
68754 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
68755 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
68756 + help
68757 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
68758 + that is, enabling this option will make it harder to inject
68759 + and execute 'foreign' code in kernel memory itself.
68760 +
68761 + Note that on x86_64 kernels there is a known regression when
68762 + this feature and KVM/VMX are both enabled in the host kernel.
68763 +
68764 +config PAX_KERNEXEC_MODULE_TEXT
68765 + int "Minimum amount of memory reserved for module code"
68766 + default "4"
68767 + depends on PAX_KERNEXEC && X86_32 && MODULES
68768 + help
68769 + Due to implementation details the kernel must reserve a fixed
68770 + amount of memory for module code at compile time that cannot be
68771 + changed at runtime. Here you can specify the minimum amount
68772 + in MB that will be reserved. Due to the same implementation
68773 + details this size will always be rounded up to the next 2/4 MB
68774 + boundary (depends on PAE) so the actually available memory for
68775 + module code will usually be more than this minimum.
68776 +
68777 + The default 4 MB should be enough for most users but if you have
68778 + an excessive number of modules (e.g., most distribution configs
68779 + compile many drivers as modules) or use huge modules such as
68780 + nvidia's kernel driver, you will need to adjust this amount.
68781 + A good rule of thumb is to look at your currently loaded kernel
68782 + modules and add up their sizes.
68783 +
68784 +endmenu
68785 +
68786 +menu "Address Space Layout Randomization"
68787 + depends on PAX
68788 +
68789 +config PAX_ASLR
68790 + bool "Address Space Layout Randomization"
68791 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
68792 + help
68793 + Many if not most exploit techniques rely on the knowledge of
68794 + certain addresses in the attacked program. The following options
68795 + will allow the kernel to apply a certain amount of randomization
68796 + to specific parts of the program thereby forcing an attacker to
68797 + guess them in most cases. Any failed guess will most likely crash
68798 + the attacked program which allows the kernel to detect such attempts
68799 + and react on them. PaX itself provides no reaction mechanisms,
68800 + instead it is strongly encouraged that you make use of Nergal's
68801 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
68802 + (http://www.grsecurity.net/) built-in crash detection features or
68803 + develop one yourself.
68804 +
68805 + By saying Y here you can choose to randomize the following areas:
68806 + - top of the task's kernel stack
68807 + - top of the task's userland stack
68808 + - base address for mmap() requests that do not specify one
68809 + (this includes all libraries)
68810 + - base address of the main executable
68811 +
68812 + It is strongly recommended to say Y here as address space layout
68813 + randomization has negligible impact on performance yet it provides
68814 + a very effective protection.
68815 +
68816 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
68817 + this feature on a per file basis.
68818 +
68819 +config PAX_RANDKSTACK
68820 + bool "Randomize kernel stack base"
68821 + depends on PAX_ASLR && X86_TSC && X86
68822 + help
68823 + By saying Y here the kernel will randomize every task's kernel
68824 + stack on every system call. This will not only force an attacker
68825 + to guess it but also prevent him from making use of possible
68826 + leaked information about it.
68827 +
68828 + Since the kernel stack is a rather scarce resource, randomization
68829 + may cause unexpected stack overflows, therefore you should very
68830 + carefully test your system. Note that once enabled in the kernel
68831 + configuration, this feature cannot be disabled on a per file basis.
68832 +
68833 +config PAX_RANDUSTACK
68834 + bool "Randomize user stack base"
68835 + depends on PAX_ASLR
68836 + help
68837 + By saying Y here the kernel will randomize every task's userland
68838 + stack. The randomization is done in two steps where the second
68839 + one may apply a big amount of shift to the top of the stack and
68840 + cause problems for programs that want to use lots of memory (more
68841 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
68842 + For this reason the second step can be controlled by 'chpax' or
68843 + 'paxctl' on a per file basis.
68844 +
68845 +config PAX_RANDMMAP
68846 + bool "Randomize mmap() base"
68847 + depends on PAX_ASLR
68848 + help
68849 + By saying Y here the kernel will use a randomized base address for
68850 + mmap() requests that do not specify one themselves. As a result
68851 + all dynamically loaded libraries will appear at random addresses
68852 + and therefore be harder to exploit by a technique where an attacker
68853 + attempts to execute library code for his purposes (e.g. spawn a
68854 + shell from an exploited program that is running at an elevated
68855 + privilege level).
68856 +
68857 + Furthermore, if a program is relinked as a dynamic ELF file, its
68858 + base address will be randomized as well, completing the full
68859 + randomization of the address space layout. Attacking such programs
68860 + becomes a guess game. You can find an example of doing this at
68861 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
68862 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
68863 +
68864 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
68865 + feature on a per file basis.
68866 +
68867 +endmenu
68868 +
68869 +menu "Miscellaneous hardening features"
68870 +
68871 +config PAX_MEMORY_SANITIZE
68872 + bool "Sanitize all freed memory"
68873 + help
68874 + By saying Y here the kernel will erase memory pages as soon as they
68875 + are freed. This in turn reduces the lifetime of data stored in the
68876 + pages, making it less likely that sensitive information such as
68877 + passwords, cryptographic secrets, etc stay in memory for too long.
68878 +
68879 + This is especially useful for programs whose runtime is short, long
68880 + lived processes and the kernel itself benefit from this as long as
68881 + they operate on whole memory pages and ensure timely freeing of pages
68882 + that may hold sensitive information.
68883 +
68884 + The tradeoff is performance impact, on a single CPU system kernel
68885 + compilation sees a 3% slowdown, other systems and workloads may vary
68886 + and you are advised to test this feature on your expected workload
68887 + before deploying it.
68888 +
68889 + Note that this feature does not protect data stored in live pages,
68890 + e.g., process memory swapped to disk may stay there for a long time.
68891 +
68892 +config PAX_MEMORY_STACKLEAK
68893 + bool "Sanitize kernel stack"
68894 + depends on X86
68895 + help
68896 + By saying Y here the kernel will erase the kernel stack before it
68897 + returns from a system call. This in turn reduces the information
68898 + that a kernel stack leak bug can reveal.
68899 +
68900 + Note that such a bug can still leak information that was put on
68901 + the stack by the current system call (the one eventually triggering
68902 + the bug) but traces of earlier system calls on the kernel stack
68903 + cannot leak anymore.
68904 +
68905 + The tradeoff is performance impact: on a single CPU system kernel
68906 + compilation sees a 1% slowdown, other systems and workloads may vary
68907 + and you are advised to test this feature on your expected workload
68908 + before deploying it.
68909 +
68910 + Note: full support for this feature requires gcc with plugin support
68911 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
68912 + is not supported). Using older gcc versions means that functions
68913 + with large enough stack frames may leave uninitialized memory behind
68914 + that may be exposed to a later syscall leaking the stack.
68915 +
68916 +config PAX_MEMORY_UDEREF
68917 + bool "Prevent invalid userland pointer dereference"
68918 + depends on X86 && !UML_X86 && !XEN
68919 + select PAX_PER_CPU_PGD if X86_64
68920 + help
68921 + By saying Y here the kernel will be prevented from dereferencing
68922 + userland pointers in contexts where the kernel expects only kernel
68923 + pointers. This is both a useful runtime debugging feature and a
68924 + security measure that prevents exploiting a class of kernel bugs.
68925 +
68926 + The tradeoff is that some virtualization solutions may experience
68927 + a huge slowdown and therefore you should not enable this feature
68928 + for kernels meant to run in such environments. Whether a given VM
68929 + solution is affected or not is best determined by simply trying it
68930 + out, the performance impact will be obvious right on boot as this
68931 + mechanism engages from very early on. A good rule of thumb is that
68932 + VMs running on CPUs without hardware virtualization support (i.e.,
68933 + the majority of IA-32 CPUs) will likely experience the slowdown.
68934 +
68935 +config PAX_REFCOUNT
68936 + bool "Prevent various kernel object reference counter overflows"
68937 + depends on GRKERNSEC && (X86 || SPARC64)
68938 + help
68939 + By saying Y here the kernel will detect and prevent overflowing
68940 + various (but not all) kinds of object reference counters. Such
68941 + overflows can normally occur due to bugs only and are often, if
68942 + not always, exploitable.
68943 +
68944 + The tradeoff is that data structures protected by an overflowed
68945 + refcount will never be freed and therefore will leak memory. Note
68946 + that this leak also happens even without this protection but in
68947 + that case the overflow can eventually trigger the freeing of the
68948 + data structure while it is still being used elsewhere, resulting
68949 + in the exploitable situation that this feature prevents.
68950 +
68951 + Since this has a negligible performance impact, you should enable
68952 + this feature.
68953 +
68954 +config PAX_USERCOPY
68955 + bool "Harden heap object copies between kernel and userland"
68956 + depends on X86 || PPC || SPARC || ARM
68957 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
68958 + help
68959 + By saying Y here the kernel will enforce the size of heap objects
68960 + when they are copied in either direction between the kernel and
68961 + userland, even if only a part of the heap object is copied.
68962 +
68963 + Specifically, this checking prevents information leaking from the
68964 + kernel heap during kernel to userland copies (if the kernel heap
68965 + object is otherwise fully initialized) and prevents kernel heap
68966 + overflows during userland to kernel copies.
68967 +
68968 + Note that the current implementation provides the strictest bounds
68969 + checks for the SLUB allocator.
68970 +
68971 + Enabling this option also enables per-slab cache protection against
68972 + data in a given cache being copied into/out of via userland
68973 + accessors. Though the whitelist of regions will be reduced over
68974 + time, it notably protects important data structures like task structs.
68975 +
68976 + If frame pointers are enabled on x86, this option will also restrict
68977 + copies into and out of the kernel stack to local variables within a
68978 + single frame.
68979 +
68980 + Since this has a negligible performance impact, you should enable
68981 + this feature.
68982 +
68983 +endmenu
68984 +
68985 +endmenu
68986 +
68987 config KEYS
68988 bool "Enable access key retention support"
68989 help
68990 @@ -167,7 +715,7 @@ config INTEL_TXT
68991 config LSM_MMAP_MIN_ADDR
68992 int "Low address space for LSM to protect from user allocation"
68993 depends on SECURITY && SECURITY_SELINUX
68994 - default 32768 if ARM
68995 + default 32768 if ALPHA || ARM || PARISC || SPARC32
68996 default 65536
68997 help
68998 This is the portion of low virtual memory which should be protected
68999 diff -urNp linux-3.0.3/security/keys/keyring.c linux-3.0.3/security/keys/keyring.c
69000 --- linux-3.0.3/security/keys/keyring.c 2011-07-21 22:17:23.000000000 -0400
69001 +++ linux-3.0.3/security/keys/keyring.c 2011-08-23 21:47:56.000000000 -0400
69002 @@ -215,15 +215,15 @@ static long keyring_read(const struct ke
69003 ret = -EFAULT;
69004
69005 for (loop = 0; loop < klist->nkeys; loop++) {
69006 + key_serial_t serial;
69007 key = klist->keys[loop];
69008 + serial = key->serial;
69009
69010 tmp = sizeof(key_serial_t);
69011 if (tmp > buflen)
69012 tmp = buflen;
69013
69014 - if (copy_to_user(buffer,
69015 - &key->serial,
69016 - tmp) != 0)
69017 + if (copy_to_user(buffer, &serial, tmp))
69018 goto error;
69019
69020 buflen -= tmp;
69021 diff -urNp linux-3.0.3/security/min_addr.c linux-3.0.3/security/min_addr.c
69022 --- linux-3.0.3/security/min_addr.c 2011-07-21 22:17:23.000000000 -0400
69023 +++ linux-3.0.3/security/min_addr.c 2011-08-23 21:48:14.000000000 -0400
69024 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
69025 */
69026 static void update_mmap_min_addr(void)
69027 {
69028 +#ifndef SPARC
69029 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
69030 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
69031 mmap_min_addr = dac_mmap_min_addr;
69032 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
69033 #else
69034 mmap_min_addr = dac_mmap_min_addr;
69035 #endif
69036 +#endif
69037 }
69038
69039 /*
69040 diff -urNp linux-3.0.3/security/security.c linux-3.0.3/security/security.c
69041 --- linux-3.0.3/security/security.c 2011-07-21 22:17:23.000000000 -0400
69042 +++ linux-3.0.3/security/security.c 2011-08-23 21:48:14.000000000 -0400
69043 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
69044 /* things that live in capability.c */
69045 extern void __init security_fixup_ops(struct security_operations *ops);
69046
69047 -static struct security_operations *security_ops;
69048 -static struct security_operations default_security_ops = {
69049 +static struct security_operations *security_ops __read_only;
69050 +static struct security_operations default_security_ops __read_only = {
69051 .name = "default",
69052 };
69053
69054 @@ -67,7 +67,9 @@ int __init security_init(void)
69055
69056 void reset_security_ops(void)
69057 {
69058 + pax_open_kernel();
69059 security_ops = &default_security_ops;
69060 + pax_close_kernel();
69061 }
69062
69063 /* Save user chosen LSM */
69064 diff -urNp linux-3.0.3/security/selinux/hooks.c linux-3.0.3/security/selinux/hooks.c
69065 --- linux-3.0.3/security/selinux/hooks.c 2011-07-21 22:17:23.000000000 -0400
69066 +++ linux-3.0.3/security/selinux/hooks.c 2011-08-23 21:48:14.000000000 -0400
69067 @@ -93,7 +93,6 @@
69068 #define NUM_SEL_MNT_OPTS 5
69069
69070 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
69071 -extern struct security_operations *security_ops;
69072
69073 /* SECMARK reference count */
69074 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
69075 @@ -5454,7 +5453,7 @@ static int selinux_key_getsecurity(struc
69076
69077 #endif
69078
69079 -static struct security_operations selinux_ops = {
69080 +static struct security_operations selinux_ops __read_only = {
69081 .name = "selinux",
69082
69083 .ptrace_access_check = selinux_ptrace_access_check,
69084 diff -urNp linux-3.0.3/security/selinux/include/xfrm.h linux-3.0.3/security/selinux/include/xfrm.h
69085 --- linux-3.0.3/security/selinux/include/xfrm.h 2011-07-21 22:17:23.000000000 -0400
69086 +++ linux-3.0.3/security/selinux/include/xfrm.h 2011-08-23 21:47:56.000000000 -0400
69087 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
69088
69089 static inline void selinux_xfrm_notify_policyload(void)
69090 {
69091 - atomic_inc(&flow_cache_genid);
69092 + atomic_inc_unchecked(&flow_cache_genid);
69093 }
69094 #else
69095 static inline int selinux_xfrm_enabled(void)
69096 diff -urNp linux-3.0.3/security/selinux/ss/services.c linux-3.0.3/security/selinux/ss/services.c
69097 --- linux-3.0.3/security/selinux/ss/services.c 2011-07-21 22:17:23.000000000 -0400
69098 +++ linux-3.0.3/security/selinux/ss/services.c 2011-08-23 21:48:14.000000000 -0400
69099 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, siz
69100 int rc = 0;
69101 struct policy_file file = { data, len }, *fp = &file;
69102
69103 + pax_track_stack();
69104 +
69105 if (!ss_initialized) {
69106 avtab_cache_init();
69107 rc = policydb_read(&policydb, fp);
69108 diff -urNp linux-3.0.3/security/smack/smack_lsm.c linux-3.0.3/security/smack/smack_lsm.c
69109 --- linux-3.0.3/security/smack/smack_lsm.c 2011-07-21 22:17:23.000000000 -0400
69110 +++ linux-3.0.3/security/smack/smack_lsm.c 2011-08-23 21:47:56.000000000 -0400
69111 @@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct
69112 return 0;
69113 }
69114
69115 -struct security_operations smack_ops = {
69116 +struct security_operations smack_ops __read_only = {
69117 .name = "smack",
69118
69119 .ptrace_access_check = smack_ptrace_access_check,
69120 diff -urNp linux-3.0.3/security/tomoyo/tomoyo.c linux-3.0.3/security/tomoyo/tomoyo.c
69121 --- linux-3.0.3/security/tomoyo/tomoyo.c 2011-07-21 22:17:23.000000000 -0400
69122 +++ linux-3.0.3/security/tomoyo/tomoyo.c 2011-08-23 21:47:56.000000000 -0400
69123 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
69124 * tomoyo_security_ops is a "struct security_operations" which is used for
69125 * registering TOMOYO.
69126 */
69127 -static struct security_operations tomoyo_security_ops = {
69128 +static struct security_operations tomoyo_security_ops __read_only = {
69129 .name = "tomoyo",
69130 .cred_alloc_blank = tomoyo_cred_alloc_blank,
69131 .cred_prepare = tomoyo_cred_prepare,
69132 diff -urNp linux-3.0.3/sound/aoa/codecs/onyx.c linux-3.0.3/sound/aoa/codecs/onyx.c
69133 --- linux-3.0.3/sound/aoa/codecs/onyx.c 2011-07-21 22:17:23.000000000 -0400
69134 +++ linux-3.0.3/sound/aoa/codecs/onyx.c 2011-08-23 21:47:56.000000000 -0400
69135 @@ -54,7 +54,7 @@ struct onyx {
69136 spdif_locked:1,
69137 analog_locked:1,
69138 original_mute:2;
69139 - int open_count;
69140 + local_t open_count;
69141 struct codec_info *codec_info;
69142
69143 /* mutex serializes concurrent access to the device
69144 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
69145 struct onyx *onyx = cii->codec_data;
69146
69147 mutex_lock(&onyx->mutex);
69148 - onyx->open_count++;
69149 + local_inc(&onyx->open_count);
69150 mutex_unlock(&onyx->mutex);
69151
69152 return 0;
69153 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
69154 struct onyx *onyx = cii->codec_data;
69155
69156 mutex_lock(&onyx->mutex);
69157 - onyx->open_count--;
69158 - if (!onyx->open_count)
69159 + if (local_dec_and_test(&onyx->open_count))
69160 onyx->spdif_locked = onyx->analog_locked = 0;
69161 mutex_unlock(&onyx->mutex);
69162
69163 diff -urNp linux-3.0.3/sound/aoa/codecs/onyx.h linux-3.0.3/sound/aoa/codecs/onyx.h
69164 --- linux-3.0.3/sound/aoa/codecs/onyx.h 2011-07-21 22:17:23.000000000 -0400
69165 +++ linux-3.0.3/sound/aoa/codecs/onyx.h 2011-08-23 21:47:56.000000000 -0400
69166 @@ -11,6 +11,7 @@
69167 #include <linux/i2c.h>
69168 #include <asm/pmac_low_i2c.h>
69169 #include <asm/prom.h>
69170 +#include <asm/local.h>
69171
69172 /* PCM3052 register definitions */
69173
69174 diff -urNp linux-3.0.3/sound/core/seq/seq_device.c linux-3.0.3/sound/core/seq/seq_device.c
69175 --- linux-3.0.3/sound/core/seq/seq_device.c 2011-07-21 22:17:23.000000000 -0400
69176 +++ linux-3.0.3/sound/core/seq/seq_device.c 2011-08-23 21:47:56.000000000 -0400
69177 @@ -63,7 +63,7 @@ struct ops_list {
69178 int argsize; /* argument size */
69179
69180 /* operators */
69181 - struct snd_seq_dev_ops ops;
69182 + struct snd_seq_dev_ops *ops;
69183
69184 /* registred devices */
69185 struct list_head dev_list; /* list of devices */
69186 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
69187
69188 mutex_lock(&ops->reg_mutex);
69189 /* copy driver operators */
69190 - ops->ops = *entry;
69191 + ops->ops = entry;
69192 ops->driver |= DRIVER_LOADED;
69193 ops->argsize = argsize;
69194
69195 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
69196 dev->name, ops->id, ops->argsize, dev->argsize);
69197 return -EINVAL;
69198 }
69199 - if (ops->ops.init_device(dev) >= 0) {
69200 + if (ops->ops->init_device(dev) >= 0) {
69201 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
69202 ops->num_init_devices++;
69203 } else {
69204 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
69205 dev->name, ops->id, ops->argsize, dev->argsize);
69206 return -EINVAL;
69207 }
69208 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
69209 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
69210 dev->status = SNDRV_SEQ_DEVICE_FREE;
69211 dev->driver_data = NULL;
69212 ops->num_init_devices--;
69213 diff -urNp linux-3.0.3/sound/drivers/mts64.c linux-3.0.3/sound/drivers/mts64.c
69214 --- linux-3.0.3/sound/drivers/mts64.c 2011-07-21 22:17:23.000000000 -0400
69215 +++ linux-3.0.3/sound/drivers/mts64.c 2011-08-23 21:47:56.000000000 -0400
69216 @@ -28,6 +28,7 @@
69217 #include <sound/initval.h>
69218 #include <sound/rawmidi.h>
69219 #include <sound/control.h>
69220 +#include <asm/local.h>
69221
69222 #define CARD_NAME "Miditerminal 4140"
69223 #define DRIVER_NAME "MTS64"
69224 @@ -66,7 +67,7 @@ struct mts64 {
69225 struct pardevice *pardev;
69226 int pardev_claimed;
69227
69228 - int open_count;
69229 + local_t open_count;
69230 int current_midi_output_port;
69231 int current_midi_input_port;
69232 u8 mode[MTS64_NUM_INPUT_PORTS];
69233 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
69234 {
69235 struct mts64 *mts = substream->rmidi->private_data;
69236
69237 - if (mts->open_count == 0) {
69238 + if (local_read(&mts->open_count) == 0) {
69239 /* We don't need a spinlock here, because this is just called
69240 if the device has not been opened before.
69241 So there aren't any IRQs from the device */
69242 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
69243
69244 msleep(50);
69245 }
69246 - ++(mts->open_count);
69247 + local_inc(&mts->open_count);
69248
69249 return 0;
69250 }
69251 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
69252 struct mts64 *mts = substream->rmidi->private_data;
69253 unsigned long flags;
69254
69255 - --(mts->open_count);
69256 - if (mts->open_count == 0) {
69257 + if (local_dec_return(&mts->open_count) == 0) {
69258 /* We need the spinlock_irqsave here because we can still
69259 have IRQs at this point */
69260 spin_lock_irqsave(&mts->lock, flags);
69261 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
69262
69263 msleep(500);
69264
69265 - } else if (mts->open_count < 0)
69266 - mts->open_count = 0;
69267 + } else if (local_read(&mts->open_count) < 0)
69268 + local_set(&mts->open_count, 0);
69269
69270 return 0;
69271 }
69272 diff -urNp linux-3.0.3/sound/drivers/opl4/opl4_lib.c linux-3.0.3/sound/drivers/opl4/opl4_lib.c
69273 --- linux-3.0.3/sound/drivers/opl4/opl4_lib.c 2011-07-21 22:17:23.000000000 -0400
69274 +++ linux-3.0.3/sound/drivers/opl4/opl4_lib.c 2011-08-23 21:47:56.000000000 -0400
69275 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
69276 MODULE_DESCRIPTION("OPL4 driver");
69277 MODULE_LICENSE("GPL");
69278
69279 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
69280 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
69281 {
69282 int timeout = 10;
69283 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
69284 diff -urNp linux-3.0.3/sound/drivers/portman2x4.c linux-3.0.3/sound/drivers/portman2x4.c
69285 --- linux-3.0.3/sound/drivers/portman2x4.c 2011-07-21 22:17:23.000000000 -0400
69286 +++ linux-3.0.3/sound/drivers/portman2x4.c 2011-08-23 21:47:56.000000000 -0400
69287 @@ -47,6 +47,7 @@
69288 #include <sound/initval.h>
69289 #include <sound/rawmidi.h>
69290 #include <sound/control.h>
69291 +#include <asm/local.h>
69292
69293 #define CARD_NAME "Portman 2x4"
69294 #define DRIVER_NAME "portman"
69295 @@ -84,7 +85,7 @@ struct portman {
69296 struct pardevice *pardev;
69297 int pardev_claimed;
69298
69299 - int open_count;
69300 + local_t open_count;
69301 int mode[PORTMAN_NUM_INPUT_PORTS];
69302 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
69303 };
69304 diff -urNp linux-3.0.3/sound/firewire/amdtp.c linux-3.0.3/sound/firewire/amdtp.c
69305 --- linux-3.0.3/sound/firewire/amdtp.c 2011-07-21 22:17:23.000000000 -0400
69306 +++ linux-3.0.3/sound/firewire/amdtp.c 2011-08-23 21:47:56.000000000 -0400
69307 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
69308 ptr = s->pcm_buffer_pointer + data_blocks;
69309 if (ptr >= pcm->runtime->buffer_size)
69310 ptr -= pcm->runtime->buffer_size;
69311 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
69312 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
69313
69314 s->pcm_period_pointer += data_blocks;
69315 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
69316 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
69317 */
69318 void amdtp_out_stream_update(struct amdtp_out_stream *s)
69319 {
69320 - ACCESS_ONCE(s->source_node_id_field) =
69321 + ACCESS_ONCE_RW(s->source_node_id_field) =
69322 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
69323 }
69324 EXPORT_SYMBOL(amdtp_out_stream_update);
69325 diff -urNp linux-3.0.3/sound/firewire/amdtp.h linux-3.0.3/sound/firewire/amdtp.h
69326 --- linux-3.0.3/sound/firewire/amdtp.h 2011-07-21 22:17:23.000000000 -0400
69327 +++ linux-3.0.3/sound/firewire/amdtp.h 2011-08-23 21:47:56.000000000 -0400
69328 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
69329 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
69330 struct snd_pcm_substream *pcm)
69331 {
69332 - ACCESS_ONCE(s->pcm) = pcm;
69333 + ACCESS_ONCE_RW(s->pcm) = pcm;
69334 }
69335
69336 /**
69337 diff -urNp linux-3.0.3/sound/firewire/isight.c linux-3.0.3/sound/firewire/isight.c
69338 --- linux-3.0.3/sound/firewire/isight.c 2011-07-21 22:17:23.000000000 -0400
69339 +++ linux-3.0.3/sound/firewire/isight.c 2011-08-23 21:47:56.000000000 -0400
69340 @@ -97,7 +97,7 @@ static void isight_update_pointers(struc
69341 ptr += count;
69342 if (ptr >= runtime->buffer_size)
69343 ptr -= runtime->buffer_size;
69344 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
69345 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
69346
69347 isight->period_counter += count;
69348 if (isight->period_counter >= runtime->period_size) {
69349 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_p
69350 if (err < 0)
69351 return err;
69352
69353 - ACCESS_ONCE(isight->pcm_active) = true;
69354 + ACCESS_ONCE_RW(isight->pcm_active) = true;
69355
69356 return 0;
69357 }
69358 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm
69359 {
69360 struct isight *isight = substream->private_data;
69361
69362 - ACCESS_ONCE(isight->pcm_active) = false;
69363 + ACCESS_ONCE_RW(isight->pcm_active) = false;
69364
69365 mutex_lock(&isight->mutex);
69366 isight_stop_streaming(isight);
69367 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm
69368
69369 switch (cmd) {
69370 case SNDRV_PCM_TRIGGER_START:
69371 - ACCESS_ONCE(isight->pcm_running) = true;
69372 + ACCESS_ONCE_RW(isight->pcm_running) = true;
69373 break;
69374 case SNDRV_PCM_TRIGGER_STOP:
69375 - ACCESS_ONCE(isight->pcm_running) = false;
69376 + ACCESS_ONCE_RW(isight->pcm_running) = false;
69377 break;
69378 default:
69379 return -EINVAL;
69380 diff -urNp linux-3.0.3/sound/isa/cmi8330.c linux-3.0.3/sound/isa/cmi8330.c
69381 --- linux-3.0.3/sound/isa/cmi8330.c 2011-07-21 22:17:23.000000000 -0400
69382 +++ linux-3.0.3/sound/isa/cmi8330.c 2011-08-23 21:47:56.000000000 -0400
69383 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
69384
69385 struct snd_pcm *pcm;
69386 struct snd_cmi8330_stream {
69387 - struct snd_pcm_ops ops;
69388 + snd_pcm_ops_no_const ops;
69389 snd_pcm_open_callback_t open;
69390 void *private_data; /* sb or wss */
69391 } streams[2];
69392 diff -urNp linux-3.0.3/sound/oss/sb_audio.c linux-3.0.3/sound/oss/sb_audio.c
69393 --- linux-3.0.3/sound/oss/sb_audio.c 2011-07-21 22:17:23.000000000 -0400
69394 +++ linux-3.0.3/sound/oss/sb_audio.c 2011-08-23 21:47:56.000000000 -0400
69395 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
69396 buf16 = (signed short *)(localbuf + localoffs);
69397 while (c)
69398 {
69399 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69400 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69401 if (copy_from_user(lbuf8,
69402 userbuf+useroffs + p,
69403 locallen))
69404 diff -urNp linux-3.0.3/sound/oss/swarm_cs4297a.c linux-3.0.3/sound/oss/swarm_cs4297a.c
69405 --- linux-3.0.3/sound/oss/swarm_cs4297a.c 2011-07-21 22:17:23.000000000 -0400
69406 +++ linux-3.0.3/sound/oss/swarm_cs4297a.c 2011-08-23 21:47:56.000000000 -0400
69407 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
69408 {
69409 struct cs4297a_state *s;
69410 u32 pwr, id;
69411 - mm_segment_t fs;
69412 int rval;
69413 #ifndef CONFIG_BCM_CS4297A_CSWARM
69414 u64 cfg;
69415 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
69416 if (!rval) {
69417 char *sb1250_duart_present;
69418
69419 +#if 0
69420 + mm_segment_t fs;
69421 fs = get_fs();
69422 set_fs(KERNEL_DS);
69423 -#if 0
69424 val = SOUND_MASK_LINE;
69425 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
69426 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
69427 val = initvol[i].vol;
69428 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
69429 }
69430 + set_fs(fs);
69431 // cs4297a_write_ac97(s, 0x18, 0x0808);
69432 #else
69433 // cs4297a_write_ac97(s, 0x5e, 0x180);
69434 cs4297a_write_ac97(s, 0x02, 0x0808);
69435 cs4297a_write_ac97(s, 0x18, 0x0808);
69436 #endif
69437 - set_fs(fs);
69438
69439 list_add(&s->list, &cs4297a_devs);
69440
69441 diff -urNp linux-3.0.3/sound/pci/hda/hda_codec.h linux-3.0.3/sound/pci/hda/hda_codec.h
69442 --- linux-3.0.3/sound/pci/hda/hda_codec.h 2011-07-21 22:17:23.000000000 -0400
69443 +++ linux-3.0.3/sound/pci/hda/hda_codec.h 2011-08-23 21:47:56.000000000 -0400
69444 @@ -615,7 +615,7 @@ struct hda_bus_ops {
69445 /* notify power-up/down from codec to controller */
69446 void (*pm_notify)(struct hda_bus *bus);
69447 #endif
69448 -};
69449 +} __no_const;
69450
69451 /* template to pass to the bus constructor */
69452 struct hda_bus_template {
69453 @@ -713,6 +713,7 @@ struct hda_codec_ops {
69454 #endif
69455 void (*reboot_notify)(struct hda_codec *codec);
69456 };
69457 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
69458
69459 /* record for amp information cache */
69460 struct hda_cache_head {
69461 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
69462 struct snd_pcm_substream *substream);
69463 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
69464 struct snd_pcm_substream *substream);
69465 -};
69466 +} __no_const;
69467
69468 /* PCM information for each substream */
69469 struct hda_pcm_stream {
69470 @@ -801,7 +802,7 @@ struct hda_codec {
69471 const char *modelname; /* model name for preset */
69472
69473 /* set by patch */
69474 - struct hda_codec_ops patch_ops;
69475 + hda_codec_ops_no_const patch_ops;
69476
69477 /* PCM to create, set by patch_ops.build_pcms callback */
69478 unsigned int num_pcms;
69479 diff -urNp linux-3.0.3/sound/pci/ice1712/ice1712.h linux-3.0.3/sound/pci/ice1712/ice1712.h
69480 --- linux-3.0.3/sound/pci/ice1712/ice1712.h 2011-07-21 22:17:23.000000000 -0400
69481 +++ linux-3.0.3/sound/pci/ice1712/ice1712.h 2011-08-23 21:47:56.000000000 -0400
69482 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
69483 unsigned int mask_flags; /* total mask bits */
69484 struct snd_akm4xxx_ops {
69485 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69486 - } ops;
69487 + } __no_const ops;
69488 };
69489
69490 struct snd_ice1712_spdif {
69491 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
69492 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69493 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69494 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69495 - } ops;
69496 + } __no_const ops;
69497 };
69498
69499
69500 diff -urNp linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c
69501 --- linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c 2011-07-21 22:17:23.000000000 -0400
69502 +++ linux-3.0.3/sound/pci/ymfpci/ymfpci_main.c 2011-08-23 21:47:56.000000000 -0400
69503 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
69504 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
69505 break;
69506 }
69507 - if (atomic_read(&chip->interrupt_sleep_count)) {
69508 - atomic_set(&chip->interrupt_sleep_count, 0);
69509 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69510 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69511 wake_up(&chip->interrupt_sleep);
69512 }
69513 __end:
69514 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
69515 continue;
69516 init_waitqueue_entry(&wait, current);
69517 add_wait_queue(&chip->interrupt_sleep, &wait);
69518 - atomic_inc(&chip->interrupt_sleep_count);
69519 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
69520 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
69521 remove_wait_queue(&chip->interrupt_sleep, &wait);
69522 }
69523 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
69524 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
69525 spin_unlock(&chip->reg_lock);
69526
69527 - if (atomic_read(&chip->interrupt_sleep_count)) {
69528 - atomic_set(&chip->interrupt_sleep_count, 0);
69529 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69530 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69531 wake_up(&chip->interrupt_sleep);
69532 }
69533 }
69534 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
69535 spin_lock_init(&chip->reg_lock);
69536 spin_lock_init(&chip->voice_lock);
69537 init_waitqueue_head(&chip->interrupt_sleep);
69538 - atomic_set(&chip->interrupt_sleep_count, 0);
69539 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69540 chip->card = card;
69541 chip->pci = pci;
69542 chip->irq = -1;
69543 diff -urNp linux-3.0.3/sound/soc/soc-core.c linux-3.0.3/sound/soc/soc-core.c
69544 --- linux-3.0.3/sound/soc/soc-core.c 2011-08-23 21:44:40.000000000 -0400
69545 +++ linux-3.0.3/sound/soc/soc-core.c 2011-08-23 21:47:56.000000000 -0400
69546 @@ -1021,7 +1021,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
69547 }
69548
69549 /* ASoC PCM operations */
69550 -static struct snd_pcm_ops soc_pcm_ops = {
69551 +static snd_pcm_ops_no_const soc_pcm_ops = {
69552 .open = soc_pcm_open,
69553 .close = soc_codec_close,
69554 .hw_params = soc_pcm_hw_params,
69555 @@ -2128,6 +2128,7 @@ static int soc_new_pcm(struct snd_soc_pc
69556 rtd->pcm = pcm;
69557 pcm->private_data = rtd;
69558 if (platform->driver->ops) {
69559 + /* this whole logic is broken... */
69560 soc_pcm_ops.mmap = platform->driver->ops->mmap;
69561 soc_pcm_ops.pointer = platform->driver->ops->pointer;
69562 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
69563 diff -urNp linux-3.0.3/sound/usb/card.h linux-3.0.3/sound/usb/card.h
69564 --- linux-3.0.3/sound/usb/card.h 2011-07-21 22:17:23.000000000 -0400
69565 +++ linux-3.0.3/sound/usb/card.h 2011-08-23 21:47:56.000000000 -0400
69566 @@ -44,6 +44,7 @@ struct snd_urb_ops {
69567 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
69568 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
69569 };
69570 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
69571
69572 struct snd_usb_substream {
69573 struct snd_usb_stream *stream;
69574 @@ -93,7 +94,7 @@ struct snd_usb_substream {
69575 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
69576 spinlock_t lock;
69577
69578 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
69579 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
69580 };
69581
69582 struct snd_usb_stream {
69583 diff -urNp linux-3.0.3/tools/gcc/constify_plugin.c linux-3.0.3/tools/gcc/constify_plugin.c
69584 --- linux-3.0.3/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
69585 +++ linux-3.0.3/tools/gcc/constify_plugin.c 2011-08-24 18:13:06.000000000 -0400
69586 @@ -0,0 +1,259 @@
69587 +/*
69588 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
69589 + * Licensed under the GPL v2, or (at your option) v3
69590 + *
69591 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
69592 + *
69593 + * Usage:
69594 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
69595 + * $ gcc -fplugin=constify_plugin.so test.c -O2
69596 + */
69597 +
69598 +#include "gcc-plugin.h"
69599 +#include "config.h"
69600 +#include "system.h"
69601 +#include "coretypes.h"
69602 +#include "tree.h"
69603 +#include "tree-pass.h"
69604 +#include "intl.h"
69605 +#include "plugin-version.h"
69606 +#include "tm.h"
69607 +#include "toplev.h"
69608 +#include "function.h"
69609 +#include "tree-flow.h"
69610 +#include "plugin.h"
69611 +//#include "c-tree.h"
69612 +
69613 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
69614 +
69615 +int plugin_is_GPL_compatible;
69616 +
69617 +static struct plugin_info const_plugin_info = {
69618 + .version = "20110824",
69619 + .help = "no-constify\tturn off constification\n",
69620 +};
69621 +
69622 +static bool walk_struct(tree node);
69623 +
69624 +static void deconstify_node(tree type)
69625 +{
69626 + tree field;
69627 +
69628 + C_TYPE_FIELDS_READONLY(type) = 0;
69629 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
69630 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
69631 + if (code == RECORD_TYPE || code == UNION_TYPE)
69632 + deconstify_node(TREE_TYPE(field));
69633 + TREE_READONLY(field) = 0;
69634 + TYPE_READONLY(TREE_TYPE(field)) = 0;
69635 + }
69636 +}
69637 +
69638 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
69639 +{
69640 + tree type;
69641 +
69642 + *no_add_attrs = true;
69643 + if (TREE_CODE(*node) == FUNCTION_DECL) {
69644 + error("%qE attribute does not apply to functions", name);
69645 + return NULL_TREE;
69646 + }
69647 +
69648 + if (TREE_CODE(*node) == VAR_DECL) {
69649 + error("%qE attribute does not apply to variables", name);
69650 + return NULL_TREE;
69651 + }
69652 +
69653 + if (!DECL_P(*node)) {
69654 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
69655 + *no_add_attrs = false;
69656 + else
69657 + error("%qE attribute applies to struct and union types only", name);
69658 + return NULL_TREE;
69659 + }
69660 +
69661 + type = TREE_TYPE(*node);
69662 +
69663 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
69664 + error("%qE attribute applies to struct and union types only", name);
69665 + return NULL_TREE;
69666 + }
69667 +
69668 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) {
69669 + error("%qE attribute is already applied to the type", name);
69670 + return NULL_TREE;
69671 + }
69672 +
69673 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
69674 + error("%qE attribute used on type that is not constified", name);
69675 + return NULL_TREE;
69676 + }
69677 +
69678 + if (TREE_CODE(*node) == TYPE_DECL) {
69679 + TREE_TYPE(*node) = build_qualified_type(type, TYPE_QUALS(type) & ~TYPE_QUAL_CONST);
69680 + TYPE_FIELDS(TREE_TYPE(*node)) = copy_list(TYPE_FIELDS(TREE_TYPE(*node)));
69681 + deconstify_node(TREE_TYPE(*node));
69682 + return NULL_TREE;
69683 + }
69684 +
69685 + return NULL_TREE;
69686 +}
69687 +
69688 +static struct attribute_spec no_const_attr = {
69689 + .name = "no_const",
69690 + .min_length = 0,
69691 + .max_length = 0,
69692 + .decl_required = false,
69693 + .type_required = false,
69694 + .function_type_required = false,
69695 + .handler = handle_no_const_attribute
69696 +};
69697 +
69698 +static void register_attributes(void *event_data, void *data)
69699 +{
69700 + register_attribute(&no_const_attr);
69701 +}
69702 +
69703 +static void constify_node(tree node)
69704 +{
69705 + TREE_READONLY(node) = 1;
69706 +}
69707 +
69708 +static bool is_fptr(tree field)
69709 +{
69710 + tree ptr = TREE_TYPE(field);
69711 +
69712 + if (TREE_CODE(ptr) != POINTER_TYPE)
69713 + return false;
69714 +
69715 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
69716 +}
69717 +
69718 +static bool walk_struct(tree node)
69719 +{
69720 + tree field;
69721 +
69722 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
69723 + return false;
69724 +
69725 + if (TYPE_FIELDS(node) == NULL_TREE)
69726 + return false;
69727 +
69728 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
69729 + tree type = TREE_TYPE(field);
69730 + enum tree_code code = TREE_CODE(type);
69731 + if (code == RECORD_TYPE || code == UNION_TYPE) {
69732 + if (!(walk_struct(type)))
69733 + return false;
69734 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
69735 + return false;
69736 + }
69737 + return true;
69738 +}
69739 +
69740 +static void finish_type(void *event_data, void *data)
69741 +{
69742 + tree node = (tree)event_data;
69743 +
69744 + if (node == NULL_TREE)
69745 + return;
69746 +
69747 + if (TREE_READONLY(node))
69748 + return;
69749 +
69750 + if (walk_struct(node))
69751 + constify_node(node);
69752 +}
69753 +
69754 +static unsigned int check_local_variables(void);
69755 +
69756 +struct gimple_opt_pass pass_local_variable = {
69757 + {
69758 + .type = GIMPLE_PASS,
69759 + .name = "check_local_variables",
69760 + .gate = NULL,
69761 + .execute = check_local_variables,
69762 + .sub = NULL,
69763 + .next = NULL,
69764 + .static_pass_number = 0,
69765 + .tv_id = TV_NONE,
69766 + .properties_required = 0,
69767 + .properties_provided = 0,
69768 + .properties_destroyed = 0,
69769 + .todo_flags_start = 0,
69770 + .todo_flags_finish = 0
69771 + }
69772 +};
69773 +
69774 +static unsigned int check_local_variables(void)
69775 +{
69776 + tree var;
69777 + referenced_var_iterator rvi;
69778 +
69779 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
69780 + FOR_EACH_REFERENCED_VAR(var, rvi) {
69781 +#else
69782 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
69783 +#endif
69784 + tree type = TREE_TYPE(var);
69785 +
69786 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
69787 + continue;
69788 +
69789 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
69790 + continue;
69791 +
69792 + if (!TYPE_READONLY(type))
69793 + continue;
69794 +
69795 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
69796 +// continue;
69797 +
69798 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
69799 +// continue;
69800 +
69801 + if (walk_struct(type)) {
69802 + error("constified variable %qE cannot be local", var);
69803 + return 1;
69804 + }
69805 + }
69806 + return 0;
69807 +}
69808 +
69809 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
69810 +{
69811 + const char * const plugin_name = plugin_info->base_name;
69812 + const int argc = plugin_info->argc;
69813 + const struct plugin_argument * const argv = plugin_info->argv;
69814 + int i;
69815 + bool constify = true;
69816 +
69817 + struct register_pass_info local_variable_pass_info = {
69818 + .pass = &pass_local_variable.pass,
69819 + .reference_pass_name = "*referenced_vars",
69820 + .ref_pass_instance_number = 0,
69821 + .pos_op = PASS_POS_INSERT_AFTER
69822 + };
69823 +
69824 + if (!plugin_default_version_check(version, &gcc_version)) {
69825 + error(G_("incompatible gcc/plugin versions"));
69826 + return 1;
69827 + }
69828 +
69829 + for (i = 0; i < argc; ++i) {
69830 + if (!(strcmp(argv[i].key, "no-constify"))) {
69831 + constify = false;
69832 + continue;
69833 + }
69834 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
69835 + }
69836 +
69837 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
69838 + if (constify) {
69839 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
69840 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
69841 + }
69842 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
69843 +
69844 + return 0;
69845 +}
69846 diff -urNp linux-3.0.3/tools/gcc/Makefile linux-3.0.3/tools/gcc/Makefile
69847 --- linux-3.0.3/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
69848 +++ linux-3.0.3/tools/gcc/Makefile 2011-08-23 21:47:56.000000000 -0400
69849 @@ -0,0 +1,12 @@
69850 +#CC := gcc
69851 +#PLUGIN_SOURCE_FILES := pax_plugin.c
69852 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
69853 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
69854 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
69855 +
69856 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
69857 +
69858 +hostlibs-y := stackleak_plugin.so constify_plugin.so
69859 +always := $(hostlibs-y)
69860 +stackleak_plugin-objs := stackleak_plugin.o
69861 +constify_plugin-objs := constify_plugin.o
69862 diff -urNp linux-3.0.3/tools/gcc/stackleak_plugin.c linux-3.0.3/tools/gcc/stackleak_plugin.c
69863 --- linux-3.0.3/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
69864 +++ linux-3.0.3/tools/gcc/stackleak_plugin.c 2011-08-23 21:47:56.000000000 -0400
69865 @@ -0,0 +1,243 @@
69866 +/*
69867 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
69868 + * Licensed under the GPL v2
69869 + *
69870 + * Note: the choice of the license means that the compilation process is
69871 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
69872 + * but for the kernel it doesn't matter since it doesn't link against
69873 + * any of the gcc libraries
69874 + *
69875 + * gcc plugin to help implement various PaX features
69876 + *
69877 + * - track lowest stack pointer
69878 + *
69879 + * TODO:
69880 + * - initialize all local variables
69881 + *
69882 + * BUGS:
69883 + * - cloned functions are instrumented twice
69884 + */
69885 +#include "gcc-plugin.h"
69886 +#include "config.h"
69887 +#include "system.h"
69888 +#include "coretypes.h"
69889 +#include "tree.h"
69890 +#include "tree-pass.h"
69891 +#include "intl.h"
69892 +#include "plugin-version.h"
69893 +#include "tm.h"
69894 +#include "toplev.h"
69895 +#include "basic-block.h"
69896 +#include "gimple.h"
69897 +//#include "expr.h" where are you...
69898 +#include "diagnostic.h"
69899 +#include "rtl.h"
69900 +#include "emit-rtl.h"
69901 +#include "function.h"
69902 +
69903 +int plugin_is_GPL_compatible;
69904 +
69905 +static int track_frame_size = -1;
69906 +static const char track_function[] = "pax_track_stack";
69907 +static bool init_locals;
69908 +
69909 +static struct plugin_info stackleak_plugin_info = {
69910 + .version = "201106030000",
69911 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
69912 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
69913 +};
69914 +
69915 +static bool gate_stackleak_track_stack(void);
69916 +static unsigned int execute_stackleak_tree_instrument(void);
69917 +static unsigned int execute_stackleak_final(void);
69918 +
69919 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
69920 + .pass = {
69921 + .type = GIMPLE_PASS,
69922 + .name = "stackleak_tree_instrument",
69923 + .gate = gate_stackleak_track_stack,
69924 + .execute = execute_stackleak_tree_instrument,
69925 + .sub = NULL,
69926 + .next = NULL,
69927 + .static_pass_number = 0,
69928 + .tv_id = TV_NONE,
69929 + .properties_required = PROP_gimple_leh | PROP_cfg,
69930 + .properties_provided = 0,
69931 + .properties_destroyed = 0,
69932 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
69933 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
69934 + }
69935 +};
69936 +
69937 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
69938 + .pass = {
69939 + .type = RTL_PASS,
69940 + .name = "stackleak_final",
69941 + .gate = gate_stackleak_track_stack,
69942 + .execute = execute_stackleak_final,
69943 + .sub = NULL,
69944 + .next = NULL,
69945 + .static_pass_number = 0,
69946 + .tv_id = TV_NONE,
69947 + .properties_required = 0,
69948 + .properties_provided = 0,
69949 + .properties_destroyed = 0,
69950 + .todo_flags_start = 0,
69951 + .todo_flags_finish = 0
69952 + }
69953 +};
69954 +
69955 +static bool gate_stackleak_track_stack(void)
69956 +{
69957 + return track_frame_size >= 0;
69958 +}
69959 +
69960 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
69961 +{
69962 + gimple call;
69963 + tree decl, type;
69964 +
69965 + // insert call to void pax_track_stack(void)
69966 + type = build_function_type_list(void_type_node, NULL_TREE);
69967 + decl = build_fn_decl(track_function, type);
69968 + DECL_ASSEMBLER_NAME(decl); // for LTO
69969 + call = gimple_build_call(decl, 0);
69970 + if (before)
69971 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
69972 + else
69973 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
69974 +}
69975 +
69976 +static unsigned int execute_stackleak_tree_instrument(void)
69977 +{
69978 + basic_block bb;
69979 + gimple_stmt_iterator gsi;
69980 +
69981 + // 1. loop through BBs and GIMPLE statements
69982 + FOR_EACH_BB(bb) {
69983 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
69984 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
69985 + tree decl;
69986 + gimple stmt = gsi_stmt(gsi);
69987 +
69988 + if (!is_gimple_call(stmt))
69989 + continue;
69990 + decl = gimple_call_fndecl(stmt);
69991 + if (!decl)
69992 + continue;
69993 + if (TREE_CODE(decl) != FUNCTION_DECL)
69994 + continue;
69995 + if (!DECL_BUILT_IN(decl))
69996 + continue;
69997 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
69998 + continue;
69999 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
70000 + continue;
70001 +
70002 + // 2. insert track call after each __builtin_alloca call
70003 + stackleak_add_instrumentation(&gsi, false);
70004 +// print_node(stderr, "pax", decl, 4);
70005 + }
70006 + }
70007 +
70008 + // 3. insert track call at the beginning
70009 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
70010 + gsi = gsi_start_bb(bb);
70011 + stackleak_add_instrumentation(&gsi, true);
70012 +
70013 + return 0;
70014 +}
70015 +
70016 +static unsigned int execute_stackleak_final(void)
70017 +{
70018 + rtx insn;
70019 +
70020 + if (cfun->calls_alloca)
70021 + return 0;
70022 +
70023 + // 1. find pax_track_stack calls
70024 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
70025 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
70026 + rtx body;
70027 +
70028 + if (!CALL_P(insn))
70029 + continue;
70030 + body = PATTERN(insn);
70031 + if (GET_CODE(body) != CALL)
70032 + continue;
70033 + body = XEXP(body, 0);
70034 + if (GET_CODE(body) != MEM)
70035 + continue;
70036 + body = XEXP(body, 0);
70037 + if (GET_CODE(body) != SYMBOL_REF)
70038 + continue;
70039 + if (strcmp(XSTR(body, 0), track_function))
70040 + continue;
70041 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70042 + // 2. delete call if function frame is not big enough
70043 + if (get_frame_size() >= track_frame_size)
70044 + continue;
70045 + delete_insn_and_edges(insn);
70046 + }
70047 +
70048 +// print_simple_rtl(stderr, get_insns());
70049 +// print_rtl(stderr, get_insns());
70050 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70051 +
70052 + return 0;
70053 +}
70054 +
70055 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70056 +{
70057 + const char * const plugin_name = plugin_info->base_name;
70058 + const int argc = plugin_info->argc;
70059 + const struct plugin_argument * const argv = plugin_info->argv;
70060 + int i;
70061 + struct register_pass_info stackleak_tree_instrument_pass_info = {
70062 + .pass = &stackleak_tree_instrument_pass.pass,
70063 +// .reference_pass_name = "tree_profile",
70064 + .reference_pass_name = "optimized",
70065 + .ref_pass_instance_number = 0,
70066 + .pos_op = PASS_POS_INSERT_AFTER
70067 + };
70068 + struct register_pass_info stackleak_final_pass_info = {
70069 + .pass = &stackleak_final_rtl_opt_pass.pass,
70070 + .reference_pass_name = "final",
70071 + .ref_pass_instance_number = 0,
70072 + .pos_op = PASS_POS_INSERT_BEFORE
70073 + };
70074 +
70075 + if (!plugin_default_version_check(version, &gcc_version)) {
70076 + error(G_("incompatible gcc/plugin versions"));
70077 + return 1;
70078 + }
70079 +
70080 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
70081 +
70082 + for (i = 0; i < argc; ++i) {
70083 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
70084 + if (!argv[i].value) {
70085 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70086 + continue;
70087 + }
70088 + track_frame_size = atoi(argv[i].value);
70089 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
70090 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70091 + continue;
70092 + }
70093 + if (!strcmp(argv[i].key, "initialize-locals")) {
70094 + if (argv[i].value) {
70095 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70096 + continue;
70097 + }
70098 + init_locals = true;
70099 + continue;
70100 + }
70101 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70102 + }
70103 +
70104 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
70105 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
70106 +
70107 + return 0;
70108 +}
70109 diff -urNp linux-3.0.3/usr/gen_init_cpio.c linux-3.0.3/usr/gen_init_cpio.c
70110 --- linux-3.0.3/usr/gen_init_cpio.c 2011-07-21 22:17:23.000000000 -0400
70111 +++ linux-3.0.3/usr/gen_init_cpio.c 2011-08-23 21:47:56.000000000 -0400
70112 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
70113 int retval;
70114 int rc = -1;
70115 int namesize;
70116 - int i;
70117 + unsigned int i;
70118
70119 mode |= S_IFREG;
70120
70121 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
70122 *env_var = *expanded = '\0';
70123 strncat(env_var, start + 2, end - start - 2);
70124 strncat(expanded, new_location, start - new_location);
70125 - strncat(expanded, getenv(env_var), PATH_MAX);
70126 - strncat(expanded, end + 1, PATH_MAX);
70127 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
70128 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
70129 strncpy(new_location, expanded, PATH_MAX);
70130 + new_location[PATH_MAX] = 0;
70131 } else
70132 break;
70133 }
70134 diff -urNp linux-3.0.3/virt/kvm/kvm_main.c linux-3.0.3/virt/kvm/kvm_main.c
70135 --- linux-3.0.3/virt/kvm/kvm_main.c 2011-07-21 22:17:23.000000000 -0400
70136 +++ linux-3.0.3/virt/kvm/kvm_main.c 2011-08-23 21:47:56.000000000 -0400
70137 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
70138
70139 static cpumask_var_t cpus_hardware_enabled;
70140 static int kvm_usage_count = 0;
70141 -static atomic_t hardware_enable_failed;
70142 +static atomic_unchecked_t hardware_enable_failed;
70143
70144 struct kmem_cache *kvm_vcpu_cache;
70145 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
70146 @@ -2176,7 +2176,7 @@ static void hardware_enable_nolock(void
70147
70148 if (r) {
70149 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
70150 - atomic_inc(&hardware_enable_failed);
70151 + atomic_inc_unchecked(&hardware_enable_failed);
70152 printk(KERN_INFO "kvm: enabling virtualization on "
70153 "CPU%d failed\n", cpu);
70154 }
70155 @@ -2230,10 +2230,10 @@ static int hardware_enable_all(void)
70156
70157 kvm_usage_count++;
70158 if (kvm_usage_count == 1) {
70159 - atomic_set(&hardware_enable_failed, 0);
70160 + atomic_set_unchecked(&hardware_enable_failed, 0);
70161 on_each_cpu(hardware_enable_nolock, NULL, 1);
70162
70163 - if (atomic_read(&hardware_enable_failed)) {
70164 + if (atomic_read_unchecked(&hardware_enable_failed)) {
70165 hardware_disable_all_nolock();
70166 r = -EBUSY;
70167 }
70168 @@ -2498,7 +2498,7 @@ static void kvm_sched_out(struct preempt
70169 kvm_arch_vcpu_put(vcpu);
70170 }
70171
70172 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70173 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70174 struct module *module)
70175 {
70176 int r;
70177 @@ -2561,7 +2561,7 @@ int kvm_init(void *opaque, unsigned vcpu
70178 if (!vcpu_align)
70179 vcpu_align = __alignof__(struct kvm_vcpu);
70180 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
70181 - 0, NULL);
70182 + SLAB_USERCOPY, NULL);
70183 if (!kvm_vcpu_cache) {
70184 r = -ENOMEM;
70185 goto out_free_3;
70186 @@ -2571,9 +2571,11 @@ int kvm_init(void *opaque, unsigned vcpu
70187 if (r)
70188 goto out_free;
70189
70190 - kvm_chardev_ops.owner = module;
70191 - kvm_vm_fops.owner = module;
70192 - kvm_vcpu_fops.owner = module;
70193 + pax_open_kernel();
70194 + *(void **)&kvm_chardev_ops.owner = module;
70195 + *(void **)&kvm_vm_fops.owner = module;
70196 + *(void **)&kvm_vcpu_fops.owner = module;
70197 + pax_close_kernel();
70198
70199 r = misc_register(&kvm_dev);
70200 if (r) {